]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dbuf.c
Provide macros for setting and getting blkptr birth times
[mirror_zfs.git] / module / zfs / dbuf.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright (c) 2019, Klara Inc.
28 * Copyright (c) 2019, Allan Jude
29 * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
30 */
31
32 #include <sys/zfs_context.h>
33 #include <sys/arc.h>
34 #include <sys/dmu.h>
35 #include <sys/dmu_send.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dbuf.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/spa.h>
43 #include <sys/zio.h>
44 #include <sys/dmu_zfetch.h>
45 #include <sys/sa.h>
46 #include <sys/sa_impl.h>
47 #include <sys/zfeature.h>
48 #include <sys/blkptr.h>
49 #include <sys/range_tree.h>
50 #include <sys/trace_zfs.h>
51 #include <sys/callb.h>
52 #include <sys/abd.h>
53 #include <sys/brt.h>
54 #include <sys/vdev.h>
55 #include <cityhash.h>
56 #include <sys/spa_impl.h>
57 #include <sys/wmsum.h>
58 #include <sys/vdev_impl.h>
59
60 static kstat_t *dbuf_ksp;
61
62 typedef struct dbuf_stats {
63 /*
64 * Various statistics about the size of the dbuf cache.
65 */
66 kstat_named_t cache_count;
67 kstat_named_t cache_size_bytes;
68 kstat_named_t cache_size_bytes_max;
69 /*
70 * Statistics regarding the bounds on the dbuf cache size.
71 */
72 kstat_named_t cache_target_bytes;
73 kstat_named_t cache_lowater_bytes;
74 kstat_named_t cache_hiwater_bytes;
75 /*
76 * Total number of dbuf cache evictions that have occurred.
77 */
78 kstat_named_t cache_total_evicts;
79 /*
80 * The distribution of dbuf levels in the dbuf cache and
81 * the total size of all dbufs at each level.
82 */
83 kstat_named_t cache_levels[DN_MAX_LEVELS];
84 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
85 /*
86 * Statistics about the dbuf hash table.
87 */
88 kstat_named_t hash_hits;
89 kstat_named_t hash_misses;
90 kstat_named_t hash_collisions;
91 kstat_named_t hash_elements;
92 kstat_named_t hash_elements_max;
93 /*
94 * Number of sublists containing more than one dbuf in the dbuf
95 * hash table. Keep track of the longest hash chain.
96 */
97 kstat_named_t hash_chains;
98 kstat_named_t hash_chain_max;
99 /*
100 * Number of times a dbuf_create() discovers that a dbuf was
101 * already created and in the dbuf hash table.
102 */
103 kstat_named_t hash_insert_race;
104 /*
105 * Number of entries in the hash table dbuf and mutex arrays.
106 */
107 kstat_named_t hash_table_count;
108 kstat_named_t hash_mutex_count;
109 /*
110 * Statistics about the size of the metadata dbuf cache.
111 */
112 kstat_named_t metadata_cache_count;
113 kstat_named_t metadata_cache_size_bytes;
114 kstat_named_t metadata_cache_size_bytes_max;
115 /*
116 * For diagnostic purposes, this is incremented whenever we can't add
117 * something to the metadata cache because it's full, and instead put
118 * the data in the regular dbuf cache.
119 */
120 kstat_named_t metadata_cache_overflow;
121 } dbuf_stats_t;
122
123 dbuf_stats_t dbuf_stats = {
124 { "cache_count", KSTAT_DATA_UINT64 },
125 { "cache_size_bytes", KSTAT_DATA_UINT64 },
126 { "cache_size_bytes_max", KSTAT_DATA_UINT64 },
127 { "cache_target_bytes", KSTAT_DATA_UINT64 },
128 { "cache_lowater_bytes", KSTAT_DATA_UINT64 },
129 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
130 { "cache_total_evicts", KSTAT_DATA_UINT64 },
131 { { "cache_levels_N", KSTAT_DATA_UINT64 } },
132 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
133 { "hash_hits", KSTAT_DATA_UINT64 },
134 { "hash_misses", KSTAT_DATA_UINT64 },
135 { "hash_collisions", KSTAT_DATA_UINT64 },
136 { "hash_elements", KSTAT_DATA_UINT64 },
137 { "hash_elements_max", KSTAT_DATA_UINT64 },
138 { "hash_chains", KSTAT_DATA_UINT64 },
139 { "hash_chain_max", KSTAT_DATA_UINT64 },
140 { "hash_insert_race", KSTAT_DATA_UINT64 },
141 { "hash_table_count", KSTAT_DATA_UINT64 },
142 { "hash_mutex_count", KSTAT_DATA_UINT64 },
143 { "metadata_cache_count", KSTAT_DATA_UINT64 },
144 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
145 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
146 { "metadata_cache_overflow", KSTAT_DATA_UINT64 }
147 };
148
149 struct {
150 wmsum_t cache_count;
151 wmsum_t cache_total_evicts;
152 wmsum_t cache_levels[DN_MAX_LEVELS];
153 wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
154 wmsum_t hash_hits;
155 wmsum_t hash_misses;
156 wmsum_t hash_collisions;
157 wmsum_t hash_chains;
158 wmsum_t hash_insert_race;
159 wmsum_t metadata_cache_count;
160 wmsum_t metadata_cache_overflow;
161 } dbuf_sums;
162
163 #define DBUF_STAT_INCR(stat, val) \
164 wmsum_add(&dbuf_sums.stat, val);
165 #define DBUF_STAT_DECR(stat, val) \
166 DBUF_STAT_INCR(stat, -(val));
167 #define DBUF_STAT_BUMP(stat) \
168 DBUF_STAT_INCR(stat, 1);
169 #define DBUF_STAT_BUMPDOWN(stat) \
170 DBUF_STAT_INCR(stat, -1);
171 #define DBUF_STAT_MAX(stat, v) { \
172 uint64_t _m; \
173 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
174 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
175 continue; \
176 }
177
178 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
179 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
180 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
181
182 /*
183 * Global data structures and functions for the dbuf cache.
184 */
185 static kmem_cache_t *dbuf_kmem_cache;
186 static taskq_t *dbu_evict_taskq;
187
188 static kthread_t *dbuf_cache_evict_thread;
189 static kmutex_t dbuf_evict_lock;
190 static kcondvar_t dbuf_evict_cv;
191 static boolean_t dbuf_evict_thread_exit;
192
193 /*
194 * There are two dbuf caches; each dbuf can only be in one of them at a time.
195 *
196 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
197 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
198 * that represent the metadata that describes filesystems/snapshots/
199 * bookmarks/properties/etc. We only evict from this cache when we export a
200 * pool, to short-circuit as much I/O as possible for all administrative
201 * commands that need the metadata. There is no eviction policy for this
202 * cache, because we try to only include types in it which would occupy a
203 * very small amount of space per object but create a large impact on the
204 * performance of these commands. Instead, after it reaches a maximum size
205 * (which should only happen on very small memory systems with a very large
206 * number of filesystem objects), we stop taking new dbufs into the
207 * metadata cache, instead putting them in the normal dbuf cache.
208 *
209 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
210 * are not currently held but have been recently released. These dbufs
211 * are not eligible for arc eviction until they are aged out of the cache.
212 * Dbufs that are aged out of the cache will be immediately destroyed and
213 * become eligible for arc eviction.
214 *
215 * Dbufs are added to these caches once the last hold is released. If a dbuf is
216 * later accessed and still exists in the dbuf cache, then it will be removed
217 * from the cache and later re-added to the head of the cache.
218 *
219 * If a given dbuf meets the requirements for the metadata cache, it will go
220 * there, otherwise it will be considered for the generic LRU dbuf cache. The
221 * caches and the refcounts tracking their sizes are stored in an array indexed
222 * by those caches' matching enum values (from dbuf_cached_state_t).
223 */
224 typedef struct dbuf_cache {
225 multilist_t cache;
226 zfs_refcount_t size ____cacheline_aligned;
227 } dbuf_cache_t;
228 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
229
230 /* Size limits for the caches */
231 static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
232 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
233
234 /* Set the default sizes of the caches to log2 fraction of arc size */
235 static uint_t dbuf_cache_shift = 5;
236 static uint_t dbuf_metadata_cache_shift = 6;
237
238 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
239 static uint_t dbuf_mutex_cache_shift = 0;
240
241 static unsigned long dbuf_cache_target_bytes(void);
242 static unsigned long dbuf_metadata_cache_target_bytes(void);
243
244 /*
245 * The LRU dbuf cache uses a three-stage eviction policy:
246 * - A low water marker designates when the dbuf eviction thread
247 * should stop evicting from the dbuf cache.
248 * - When we reach the maximum size (aka mid water mark), we
249 * signal the eviction thread to run.
250 * - The high water mark indicates when the eviction thread
251 * is unable to keep up with the incoming load and eviction must
252 * happen in the context of the calling thread.
253 *
254 * The dbuf cache:
255 * (max size)
256 * low water mid water hi water
257 * +----------------------------------------+----------+----------+
258 * | | | |
259 * | | | |
260 * | | | |
261 * | | | |
262 * +----------------------------------------+----------+----------+
263 * stop signal evict
264 * evicting eviction directly
265 * thread
266 *
267 * The high and low water marks indicate the operating range for the eviction
268 * thread. The low water mark is, by default, 90% of the total size of the
269 * cache and the high water mark is at 110% (both of these percentages can be
270 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
271 * respectively). The eviction thread will try to ensure that the cache remains
272 * within this range by waking up every second and checking if the cache is
273 * above the low water mark. The thread can also be woken up by callers adding
274 * elements into the cache if the cache is larger than the mid water (i.e max
275 * cache size). Once the eviction thread is woken up and eviction is required,
276 * it will continue evicting buffers until it's able to reduce the cache size
277 * to the low water mark. If the cache size continues to grow and hits the high
278 * water mark, then callers adding elements to the cache will begin to evict
279 * directly from the cache until the cache is no longer above the high water
280 * mark.
281 */
282
283 /*
284 * The percentage above and below the maximum cache size.
285 */
286 static uint_t dbuf_cache_hiwater_pct = 10;
287 static uint_t dbuf_cache_lowater_pct = 10;
288
289 static int
290 dbuf_cons(void *vdb, void *unused, int kmflag)
291 {
292 (void) unused, (void) kmflag;
293 dmu_buf_impl_t *db = vdb;
294 memset(db, 0, sizeof (dmu_buf_impl_t));
295
296 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
297 rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
298 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
299 multilist_link_init(&db->db_cache_link);
300 zfs_refcount_create(&db->db_holds);
301
302 return (0);
303 }
304
305 static void
306 dbuf_dest(void *vdb, void *unused)
307 {
308 (void) unused;
309 dmu_buf_impl_t *db = vdb;
310 mutex_destroy(&db->db_mtx);
311 rw_destroy(&db->db_rwlock);
312 cv_destroy(&db->db_changed);
313 ASSERT(!multilist_link_active(&db->db_cache_link));
314 zfs_refcount_destroy(&db->db_holds);
315 }
316
317 /*
318 * dbuf hash table routines
319 */
320 static dbuf_hash_table_t dbuf_hash_table;
321
322 /*
323 * We use Cityhash for this. It's fast, and has good hash properties without
324 * requiring any large static buffers.
325 */
326 static uint64_t
327 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
328 {
329 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
330 }
331
332 #define DTRACE_SET_STATE(db, why) \
333 DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \
334 const char *, why)
335
336 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
337 ((dbuf)->db.db_object == (obj) && \
338 (dbuf)->db_objset == (os) && \
339 (dbuf)->db_level == (level) && \
340 (dbuf)->db_blkid == (blkid))
341
342 dmu_buf_impl_t *
343 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid,
344 uint64_t *hash_out)
345 {
346 dbuf_hash_table_t *h = &dbuf_hash_table;
347 uint64_t hv;
348 uint64_t idx;
349 dmu_buf_impl_t *db;
350
351 hv = dbuf_hash(os, obj, level, blkid);
352 idx = hv & h->hash_table_mask;
353
354 mutex_enter(DBUF_HASH_MUTEX(h, idx));
355 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
356 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
357 mutex_enter(&db->db_mtx);
358 if (db->db_state != DB_EVICTING) {
359 mutex_exit(DBUF_HASH_MUTEX(h, idx));
360 return (db);
361 }
362 mutex_exit(&db->db_mtx);
363 }
364 }
365 mutex_exit(DBUF_HASH_MUTEX(h, idx));
366 if (hash_out != NULL)
367 *hash_out = hv;
368 return (NULL);
369 }
370
371 static dmu_buf_impl_t *
372 dbuf_find_bonus(objset_t *os, uint64_t object)
373 {
374 dnode_t *dn;
375 dmu_buf_impl_t *db = NULL;
376
377 if (dnode_hold(os, object, FTAG, &dn) == 0) {
378 rw_enter(&dn->dn_struct_rwlock, RW_READER);
379 if (dn->dn_bonus != NULL) {
380 db = dn->dn_bonus;
381 mutex_enter(&db->db_mtx);
382 }
383 rw_exit(&dn->dn_struct_rwlock);
384 dnode_rele(dn, FTAG);
385 }
386 return (db);
387 }
388
389 /*
390 * Insert an entry into the hash table. If there is already an element
391 * equal to elem in the hash table, then the already existing element
392 * will be returned and the new element will not be inserted.
393 * Otherwise returns NULL.
394 */
395 static dmu_buf_impl_t *
396 dbuf_hash_insert(dmu_buf_impl_t *db)
397 {
398 dbuf_hash_table_t *h = &dbuf_hash_table;
399 objset_t *os = db->db_objset;
400 uint64_t obj = db->db.db_object;
401 int level = db->db_level;
402 uint64_t blkid, idx;
403 dmu_buf_impl_t *dbf;
404 uint32_t i;
405
406 blkid = db->db_blkid;
407 ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash);
408 idx = db->db_hash & h->hash_table_mask;
409
410 mutex_enter(DBUF_HASH_MUTEX(h, idx));
411 for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
412 dbf = dbf->db_hash_next, i++) {
413 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
414 mutex_enter(&dbf->db_mtx);
415 if (dbf->db_state != DB_EVICTING) {
416 mutex_exit(DBUF_HASH_MUTEX(h, idx));
417 return (dbf);
418 }
419 mutex_exit(&dbf->db_mtx);
420 }
421 }
422
423 if (i > 0) {
424 DBUF_STAT_BUMP(hash_collisions);
425 if (i == 1)
426 DBUF_STAT_BUMP(hash_chains);
427
428 DBUF_STAT_MAX(hash_chain_max, i);
429 }
430
431 mutex_enter(&db->db_mtx);
432 db->db_hash_next = h->hash_table[idx];
433 h->hash_table[idx] = db;
434 mutex_exit(DBUF_HASH_MUTEX(h, idx));
435 uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
436 DBUF_STAT_MAX(hash_elements_max, he);
437
438 return (NULL);
439 }
440
441 /*
442 * This returns whether this dbuf should be stored in the metadata cache, which
443 * is based on whether it's from one of the dnode types that store data related
444 * to traversing dataset hierarchies.
445 */
446 static boolean_t
447 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
448 {
449 DB_DNODE_ENTER(db);
450 dmu_object_type_t type = DB_DNODE(db)->dn_type;
451 DB_DNODE_EXIT(db);
452
453 /* Check if this dbuf is one of the types we care about */
454 if (DMU_OT_IS_METADATA_CACHED(type)) {
455 /* If we hit this, then we set something up wrong in dmu_ot */
456 ASSERT(DMU_OT_IS_METADATA(type));
457
458 /*
459 * Sanity check for small-memory systems: don't allocate too
460 * much memory for this purpose.
461 */
462 if (zfs_refcount_count(
463 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
464 dbuf_metadata_cache_target_bytes()) {
465 DBUF_STAT_BUMP(metadata_cache_overflow);
466 return (B_FALSE);
467 }
468
469 return (B_TRUE);
470 }
471
472 return (B_FALSE);
473 }
474
475 /*
476 * Remove an entry from the hash table. It must be in the EVICTING state.
477 */
478 static void
479 dbuf_hash_remove(dmu_buf_impl_t *db)
480 {
481 dbuf_hash_table_t *h = &dbuf_hash_table;
482 uint64_t idx;
483 dmu_buf_impl_t *dbf, **dbp;
484
485 ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level,
486 db->db_blkid), ==, db->db_hash);
487 idx = db->db_hash & h->hash_table_mask;
488
489 /*
490 * We mustn't hold db_mtx to maintain lock ordering:
491 * DBUF_HASH_MUTEX > db_mtx.
492 */
493 ASSERT(zfs_refcount_is_zero(&db->db_holds));
494 ASSERT(db->db_state == DB_EVICTING);
495 ASSERT(!MUTEX_HELD(&db->db_mtx));
496
497 mutex_enter(DBUF_HASH_MUTEX(h, idx));
498 dbp = &h->hash_table[idx];
499 while ((dbf = *dbp) != db) {
500 dbp = &dbf->db_hash_next;
501 ASSERT(dbf != NULL);
502 }
503 *dbp = db->db_hash_next;
504 db->db_hash_next = NULL;
505 if (h->hash_table[idx] &&
506 h->hash_table[idx]->db_hash_next == NULL)
507 DBUF_STAT_BUMPDOWN(hash_chains);
508 mutex_exit(DBUF_HASH_MUTEX(h, idx));
509 atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
510 }
511
512 typedef enum {
513 DBVU_EVICTING,
514 DBVU_NOT_EVICTING
515 } dbvu_verify_type_t;
516
517 static void
518 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
519 {
520 #ifdef ZFS_DEBUG
521 int64_t holds;
522
523 if (db->db_user == NULL)
524 return;
525
526 /* Only data blocks support the attachment of user data. */
527 ASSERT(db->db_level == 0);
528
529 /* Clients must resolve a dbuf before attaching user data. */
530 ASSERT(db->db.db_data != NULL);
531 ASSERT3U(db->db_state, ==, DB_CACHED);
532
533 holds = zfs_refcount_count(&db->db_holds);
534 if (verify_type == DBVU_EVICTING) {
535 /*
536 * Immediate eviction occurs when holds == dirtycnt.
537 * For normal eviction buffers, holds is zero on
538 * eviction, except when dbuf_fix_old_data() calls
539 * dbuf_clear_data(). However, the hold count can grow
540 * during eviction even though db_mtx is held (see
541 * dmu_bonus_hold() for an example), so we can only
542 * test the generic invariant that holds >= dirtycnt.
543 */
544 ASSERT3U(holds, >=, db->db_dirtycnt);
545 } else {
546 if (db->db_user_immediate_evict == TRUE)
547 ASSERT3U(holds, >=, db->db_dirtycnt);
548 else
549 ASSERT3U(holds, >, 0);
550 }
551 #endif
552 }
553
554 static void
555 dbuf_evict_user(dmu_buf_impl_t *db)
556 {
557 dmu_buf_user_t *dbu = db->db_user;
558
559 ASSERT(MUTEX_HELD(&db->db_mtx));
560
561 if (dbu == NULL)
562 return;
563
564 dbuf_verify_user(db, DBVU_EVICTING);
565 db->db_user = NULL;
566
567 #ifdef ZFS_DEBUG
568 if (dbu->dbu_clear_on_evict_dbufp != NULL)
569 *dbu->dbu_clear_on_evict_dbufp = NULL;
570 #endif
571
572 if (db->db_caching_status != DB_NO_CACHE) {
573 /*
574 * This is a cached dbuf, so the size of the user data is
575 * included in its cached amount. We adjust it here because the
576 * user data has already been detached from the dbuf, and the
577 * sync functions are not supposed to touch it (the dbuf might
578 * not exist anymore by the time the sync functions run.
579 */
580 uint64_t size = dbu->dbu_size;
581 (void) zfs_refcount_remove_many(
582 &dbuf_caches[db->db_caching_status].size, size, db);
583 if (db->db_caching_status == DB_DBUF_CACHE)
584 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size);
585 }
586
587 /*
588 * There are two eviction callbacks - one that we call synchronously
589 * and one that we invoke via a taskq. The async one is useful for
590 * avoiding lock order reversals and limiting stack depth.
591 *
592 * Note that if we have a sync callback but no async callback,
593 * it's likely that the sync callback will free the structure
594 * containing the dbu. In that case we need to take care to not
595 * dereference dbu after calling the sync evict func.
596 */
597 boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
598
599 if (dbu->dbu_evict_func_sync != NULL)
600 dbu->dbu_evict_func_sync(dbu);
601
602 if (has_async) {
603 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
604 dbu, 0, &dbu->dbu_tqent);
605 }
606 }
607
608 boolean_t
609 dbuf_is_metadata(dmu_buf_impl_t *db)
610 {
611 /*
612 * Consider indirect blocks and spill blocks to be meta data.
613 */
614 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
615 return (B_TRUE);
616 } else {
617 boolean_t is_metadata;
618
619 DB_DNODE_ENTER(db);
620 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
621 DB_DNODE_EXIT(db);
622
623 return (is_metadata);
624 }
625 }
626
627 /*
628 * We want to exclude buffers that are on a special allocation class from
629 * L2ARC.
630 */
631 boolean_t
632 dbuf_is_l2cacheable(dmu_buf_impl_t *db)
633 {
634 if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL ||
635 (db->db_objset->os_secondary_cache ==
636 ZFS_CACHE_METADATA && dbuf_is_metadata(db))) {
637 if (l2arc_exclude_special == 0)
638 return (B_TRUE);
639
640 blkptr_t *bp = db->db_blkptr;
641 if (bp == NULL || BP_IS_HOLE(bp))
642 return (B_FALSE);
643 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
644 vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
645 vdev_t *vd = NULL;
646
647 if (vdev < rvd->vdev_children)
648 vd = rvd->vdev_child[vdev];
649
650 if (vd == NULL)
651 return (B_TRUE);
652
653 if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
654 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
655 return (B_TRUE);
656 }
657 return (B_FALSE);
658 }
659
660 static inline boolean_t
661 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
662 {
663 if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL ||
664 (dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA &&
665 (level > 0 ||
666 DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) {
667 if (l2arc_exclude_special == 0)
668 return (B_TRUE);
669
670 if (bp == NULL || BP_IS_HOLE(bp))
671 return (B_FALSE);
672 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
673 vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
674 vdev_t *vd = NULL;
675
676 if (vdev < rvd->vdev_children)
677 vd = rvd->vdev_child[vdev];
678
679 if (vd == NULL)
680 return (B_TRUE);
681
682 if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
683 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
684 return (B_TRUE);
685 }
686 return (B_FALSE);
687 }
688
689
690 /*
691 * This function *must* return indices evenly distributed between all
692 * sublists of the multilist. This is needed due to how the dbuf eviction
693 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
694 * distributed between all sublists and uses this assumption when
695 * deciding which sublist to evict from and how much to evict from it.
696 */
697 static unsigned int
698 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
699 {
700 dmu_buf_impl_t *db = obj;
701
702 /*
703 * The assumption here, is the hash value for a given
704 * dmu_buf_impl_t will remain constant throughout it's lifetime
705 * (i.e. it's objset, object, level and blkid fields don't change).
706 * Thus, we don't need to store the dbuf's sublist index
707 * on insertion, as this index can be recalculated on removal.
708 *
709 * Also, the low order bits of the hash value are thought to be
710 * distributed evenly. Otherwise, in the case that the multilist
711 * has a power of two number of sublists, each sublists' usage
712 * would not be evenly distributed. In this context full 64bit
713 * division would be a waste of time, so limit it to 32 bits.
714 */
715 return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
716 db->db_level, db->db_blkid) %
717 multilist_get_num_sublists(ml));
718 }
719
720 /*
721 * The target size of the dbuf cache can grow with the ARC target,
722 * unless limited by the tunable dbuf_cache_max_bytes.
723 */
724 static inline unsigned long
725 dbuf_cache_target_bytes(void)
726 {
727 return (MIN(dbuf_cache_max_bytes,
728 arc_target_bytes() >> dbuf_cache_shift));
729 }
730
731 /*
732 * The target size of the dbuf metadata cache can grow with the ARC target,
733 * unless limited by the tunable dbuf_metadata_cache_max_bytes.
734 */
735 static inline unsigned long
736 dbuf_metadata_cache_target_bytes(void)
737 {
738 return (MIN(dbuf_metadata_cache_max_bytes,
739 arc_target_bytes() >> dbuf_metadata_cache_shift));
740 }
741
742 static inline uint64_t
743 dbuf_cache_hiwater_bytes(void)
744 {
745 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
746 return (dbuf_cache_target +
747 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
748 }
749
750 static inline uint64_t
751 dbuf_cache_lowater_bytes(void)
752 {
753 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
754 return (dbuf_cache_target -
755 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
756 }
757
758 static inline boolean_t
759 dbuf_cache_above_lowater(void)
760 {
761 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
762 dbuf_cache_lowater_bytes());
763 }
764
765 /*
766 * Evict the oldest eligible dbuf from the dbuf cache.
767 */
768 static void
769 dbuf_evict_one(void)
770 {
771 int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
772 multilist_sublist_t *mls = multilist_sublist_lock(
773 &dbuf_caches[DB_DBUF_CACHE].cache, idx);
774
775 ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
776
777 dmu_buf_impl_t *db = multilist_sublist_tail(mls);
778 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
779 db = multilist_sublist_prev(mls, db);
780 }
781
782 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
783 multilist_sublist_t *, mls);
784
785 if (db != NULL) {
786 multilist_sublist_remove(mls, db);
787 multilist_sublist_unlock(mls);
788 uint64_t size = db->db.db_size + dmu_buf_user_size(&db->db);
789 (void) zfs_refcount_remove_many(
790 &dbuf_caches[DB_DBUF_CACHE].size, size, db);
791 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
792 DBUF_STAT_BUMPDOWN(cache_count);
793 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size);
794 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
795 db->db_caching_status = DB_NO_CACHE;
796 dbuf_destroy(db);
797 DBUF_STAT_BUMP(cache_total_evicts);
798 } else {
799 multilist_sublist_unlock(mls);
800 }
801 }
802
803 /*
804 * The dbuf evict thread is responsible for aging out dbufs from the
805 * cache. Once the cache has reached it's maximum size, dbufs are removed
806 * and destroyed. The eviction thread will continue running until the size
807 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
808 * out of the cache it is destroyed and becomes eligible for arc eviction.
809 */
810 static __attribute__((noreturn)) void
811 dbuf_evict_thread(void *unused)
812 {
813 (void) unused;
814 callb_cpr_t cpr;
815
816 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
817
818 mutex_enter(&dbuf_evict_lock);
819 while (!dbuf_evict_thread_exit) {
820 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
821 CALLB_CPR_SAFE_BEGIN(&cpr);
822 (void) cv_timedwait_idle_hires(&dbuf_evict_cv,
823 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
824 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
825 }
826 mutex_exit(&dbuf_evict_lock);
827
828 /*
829 * Keep evicting as long as we're above the low water mark
830 * for the cache. We do this without holding the locks to
831 * minimize lock contention.
832 */
833 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
834 dbuf_evict_one();
835 }
836
837 mutex_enter(&dbuf_evict_lock);
838 }
839
840 dbuf_evict_thread_exit = B_FALSE;
841 cv_broadcast(&dbuf_evict_cv);
842 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
843 thread_exit();
844 }
845
846 /*
847 * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
848 * If the dbuf cache is at its high water mark, then evict a dbuf from the
849 * dbuf cache using the caller's context.
850 */
851 static void
852 dbuf_evict_notify(uint64_t size)
853 {
854 /*
855 * We check if we should evict without holding the dbuf_evict_lock,
856 * because it's OK to occasionally make the wrong decision here,
857 * and grabbing the lock results in massive lock contention.
858 */
859 if (size > dbuf_cache_target_bytes()) {
860 if (size > dbuf_cache_hiwater_bytes())
861 dbuf_evict_one();
862 cv_signal(&dbuf_evict_cv);
863 }
864 }
865
866 static int
867 dbuf_kstat_update(kstat_t *ksp, int rw)
868 {
869 dbuf_stats_t *ds = ksp->ks_data;
870 dbuf_hash_table_t *h = &dbuf_hash_table;
871
872 if (rw == KSTAT_WRITE)
873 return (SET_ERROR(EACCES));
874
875 ds->cache_count.value.ui64 =
876 wmsum_value(&dbuf_sums.cache_count);
877 ds->cache_size_bytes.value.ui64 =
878 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
879 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
880 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
881 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
882 ds->cache_total_evicts.value.ui64 =
883 wmsum_value(&dbuf_sums.cache_total_evicts);
884 for (int i = 0; i < DN_MAX_LEVELS; i++) {
885 ds->cache_levels[i].value.ui64 =
886 wmsum_value(&dbuf_sums.cache_levels[i]);
887 ds->cache_levels_bytes[i].value.ui64 =
888 wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
889 }
890 ds->hash_hits.value.ui64 =
891 wmsum_value(&dbuf_sums.hash_hits);
892 ds->hash_misses.value.ui64 =
893 wmsum_value(&dbuf_sums.hash_misses);
894 ds->hash_collisions.value.ui64 =
895 wmsum_value(&dbuf_sums.hash_collisions);
896 ds->hash_chains.value.ui64 =
897 wmsum_value(&dbuf_sums.hash_chains);
898 ds->hash_insert_race.value.ui64 =
899 wmsum_value(&dbuf_sums.hash_insert_race);
900 ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
901 ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
902 ds->metadata_cache_count.value.ui64 =
903 wmsum_value(&dbuf_sums.metadata_cache_count);
904 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
905 &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
906 ds->metadata_cache_overflow.value.ui64 =
907 wmsum_value(&dbuf_sums.metadata_cache_overflow);
908 return (0);
909 }
910
911 void
912 dbuf_init(void)
913 {
914 uint64_t hmsize, hsize = 1ULL << 16;
915 dbuf_hash_table_t *h = &dbuf_hash_table;
916
917 /*
918 * The hash table is big enough to fill one eighth of physical memory
919 * with an average block size of zfs_arc_average_blocksize (default 8K).
920 * By default, the table will take up
921 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
922 */
923 while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
924 hsize <<= 1;
925
926 h->hash_table = NULL;
927 while (h->hash_table == NULL) {
928 h->hash_table_mask = hsize - 1;
929
930 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
931 if (h->hash_table == NULL)
932 hsize >>= 1;
933
934 ASSERT3U(hsize, >=, 1ULL << 10);
935 }
936
937 /*
938 * The hash table buckets are protected by an array of mutexes where
939 * each mutex is reponsible for protecting 128 buckets. A minimum
940 * array size of 8192 is targeted to avoid contention.
941 */
942 if (dbuf_mutex_cache_shift == 0)
943 hmsize = MAX(hsize >> 7, 1ULL << 13);
944 else
945 hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
946
947 h->hash_mutexes = NULL;
948 while (h->hash_mutexes == NULL) {
949 h->hash_mutex_mask = hmsize - 1;
950
951 h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
952 KM_SLEEP);
953 if (h->hash_mutexes == NULL)
954 hmsize >>= 1;
955 }
956
957 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
958 sizeof (dmu_buf_impl_t),
959 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
960
961 for (int i = 0; i < hmsize; i++)
962 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
963
964 dbuf_stats_init(h);
965
966 /*
967 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
968 * configuration is not required.
969 */
970 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
971
972 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
973 multilist_create(&dbuf_caches[dcs].cache,
974 sizeof (dmu_buf_impl_t),
975 offsetof(dmu_buf_impl_t, db_cache_link),
976 dbuf_cache_multilist_index_func);
977 zfs_refcount_create(&dbuf_caches[dcs].size);
978 }
979
980 dbuf_evict_thread_exit = B_FALSE;
981 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
982 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
983 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
984 NULL, 0, &p0, TS_RUN, minclsyspri);
985
986 wmsum_init(&dbuf_sums.cache_count, 0);
987 wmsum_init(&dbuf_sums.cache_total_evicts, 0);
988 for (int i = 0; i < DN_MAX_LEVELS; i++) {
989 wmsum_init(&dbuf_sums.cache_levels[i], 0);
990 wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
991 }
992 wmsum_init(&dbuf_sums.hash_hits, 0);
993 wmsum_init(&dbuf_sums.hash_misses, 0);
994 wmsum_init(&dbuf_sums.hash_collisions, 0);
995 wmsum_init(&dbuf_sums.hash_chains, 0);
996 wmsum_init(&dbuf_sums.hash_insert_race, 0);
997 wmsum_init(&dbuf_sums.metadata_cache_count, 0);
998 wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
999
1000 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
1001 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
1002 KSTAT_FLAG_VIRTUAL);
1003 if (dbuf_ksp != NULL) {
1004 for (int i = 0; i < DN_MAX_LEVELS; i++) {
1005 snprintf(dbuf_stats.cache_levels[i].name,
1006 KSTAT_STRLEN, "cache_level_%d", i);
1007 dbuf_stats.cache_levels[i].data_type =
1008 KSTAT_DATA_UINT64;
1009 snprintf(dbuf_stats.cache_levels_bytes[i].name,
1010 KSTAT_STRLEN, "cache_level_%d_bytes", i);
1011 dbuf_stats.cache_levels_bytes[i].data_type =
1012 KSTAT_DATA_UINT64;
1013 }
1014 dbuf_ksp->ks_data = &dbuf_stats;
1015 dbuf_ksp->ks_update = dbuf_kstat_update;
1016 kstat_install(dbuf_ksp);
1017 }
1018 }
1019
1020 void
1021 dbuf_fini(void)
1022 {
1023 dbuf_hash_table_t *h = &dbuf_hash_table;
1024
1025 dbuf_stats_destroy();
1026
1027 for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
1028 mutex_destroy(&h->hash_mutexes[i]);
1029
1030 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
1031 vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
1032 sizeof (kmutex_t));
1033
1034 kmem_cache_destroy(dbuf_kmem_cache);
1035 taskq_destroy(dbu_evict_taskq);
1036
1037 mutex_enter(&dbuf_evict_lock);
1038 dbuf_evict_thread_exit = B_TRUE;
1039 while (dbuf_evict_thread_exit) {
1040 cv_signal(&dbuf_evict_cv);
1041 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
1042 }
1043 mutex_exit(&dbuf_evict_lock);
1044
1045 mutex_destroy(&dbuf_evict_lock);
1046 cv_destroy(&dbuf_evict_cv);
1047
1048 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
1049 zfs_refcount_destroy(&dbuf_caches[dcs].size);
1050 multilist_destroy(&dbuf_caches[dcs].cache);
1051 }
1052
1053 if (dbuf_ksp != NULL) {
1054 kstat_delete(dbuf_ksp);
1055 dbuf_ksp = NULL;
1056 }
1057
1058 wmsum_fini(&dbuf_sums.cache_count);
1059 wmsum_fini(&dbuf_sums.cache_total_evicts);
1060 for (int i = 0; i < DN_MAX_LEVELS; i++) {
1061 wmsum_fini(&dbuf_sums.cache_levels[i]);
1062 wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
1063 }
1064 wmsum_fini(&dbuf_sums.hash_hits);
1065 wmsum_fini(&dbuf_sums.hash_misses);
1066 wmsum_fini(&dbuf_sums.hash_collisions);
1067 wmsum_fini(&dbuf_sums.hash_chains);
1068 wmsum_fini(&dbuf_sums.hash_insert_race);
1069 wmsum_fini(&dbuf_sums.metadata_cache_count);
1070 wmsum_fini(&dbuf_sums.metadata_cache_overflow);
1071 }
1072
1073 /*
1074 * Other stuff.
1075 */
1076
1077 #ifdef ZFS_DEBUG
1078 static void
1079 dbuf_verify(dmu_buf_impl_t *db)
1080 {
1081 dnode_t *dn;
1082 dbuf_dirty_record_t *dr;
1083 uint32_t txg_prev;
1084
1085 ASSERT(MUTEX_HELD(&db->db_mtx));
1086
1087 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
1088 return;
1089
1090 ASSERT(db->db_objset != NULL);
1091 DB_DNODE_ENTER(db);
1092 dn = DB_DNODE(db);
1093 if (dn == NULL) {
1094 ASSERT(db->db_parent == NULL);
1095 ASSERT(db->db_blkptr == NULL);
1096 } else {
1097 ASSERT3U(db->db.db_object, ==, dn->dn_object);
1098 ASSERT3P(db->db_objset, ==, dn->dn_objset);
1099 ASSERT3U(db->db_level, <, dn->dn_nlevels);
1100 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
1101 db->db_blkid == DMU_SPILL_BLKID ||
1102 !avl_is_empty(&dn->dn_dbufs));
1103 }
1104 if (db->db_blkid == DMU_BONUS_BLKID) {
1105 ASSERT(dn != NULL);
1106 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1107 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
1108 } else if (db->db_blkid == DMU_SPILL_BLKID) {
1109 ASSERT(dn != NULL);
1110 ASSERT0(db->db.db_offset);
1111 } else {
1112 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
1113 }
1114
1115 if ((dr = list_head(&db->db_dirty_records)) != NULL) {
1116 ASSERT(dr->dr_dbuf == db);
1117 txg_prev = dr->dr_txg;
1118 for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
1119 dr = list_next(&db->db_dirty_records, dr)) {
1120 ASSERT(dr->dr_dbuf == db);
1121 ASSERT(txg_prev > dr->dr_txg);
1122 txg_prev = dr->dr_txg;
1123 }
1124 }
1125
1126 /*
1127 * We can't assert that db_size matches dn_datablksz because it
1128 * can be momentarily different when another thread is doing
1129 * dnode_set_blksz().
1130 */
1131 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
1132 dr = db->db_data_pending;
1133 /*
1134 * It should only be modified in syncing context, so
1135 * make sure we only have one copy of the data.
1136 */
1137 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
1138 }
1139
1140 /* verify db->db_blkptr */
1141 if (db->db_blkptr) {
1142 if (db->db_parent == dn->dn_dbuf) {
1143 /* db is pointed to by the dnode */
1144 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1145 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1146 ASSERT(db->db_parent == NULL);
1147 else
1148 ASSERT(db->db_parent != NULL);
1149 if (db->db_blkid != DMU_SPILL_BLKID)
1150 ASSERT3P(db->db_blkptr, ==,
1151 &dn->dn_phys->dn_blkptr[db->db_blkid]);
1152 } else {
1153 /* db is pointed to by an indirect block */
1154 int epb __maybe_unused = db->db_parent->db.db_size >>
1155 SPA_BLKPTRSHIFT;
1156 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1157 ASSERT3U(db->db_parent->db.db_object, ==,
1158 db->db.db_object);
1159 /*
1160 * dnode_grow_indblksz() can make this fail if we don't
1161 * have the parent's rwlock. XXX indblksz no longer
1162 * grows. safe to do this now?
1163 */
1164 if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1165 ASSERT3P(db->db_blkptr, ==,
1166 ((blkptr_t *)db->db_parent->db.db_data +
1167 db->db_blkid % epb));
1168 }
1169 }
1170 }
1171 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1172 (db->db_buf == NULL || db->db_buf->b_data) &&
1173 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1174 db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) {
1175 /*
1176 * If the blkptr isn't set but they have nonzero data,
1177 * it had better be dirty, otherwise we'll lose that
1178 * data when we evict this buffer.
1179 *
1180 * There is an exception to this rule for indirect blocks; in
1181 * this case, if the indirect block is a hole, we fill in a few
1182 * fields on each of the child blocks (importantly, birth time)
1183 * to prevent hole birth times from being lost when you
1184 * partially fill in a hole.
1185 */
1186 if (db->db_dirtycnt == 0) {
1187 if (db->db_level == 0) {
1188 uint64_t *buf = db->db.db_data;
1189 int i;
1190
1191 for (i = 0; i < db->db.db_size >> 3; i++) {
1192 ASSERT(buf[i] == 0);
1193 }
1194 } else {
1195 blkptr_t *bps = db->db.db_data;
1196 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1197 db->db.db_size);
1198 /*
1199 * We want to verify that all the blkptrs in the
1200 * indirect block are holes, but we may have
1201 * automatically set up a few fields for them.
1202 * We iterate through each blkptr and verify
1203 * they only have those fields set.
1204 */
1205 for (int i = 0;
1206 i < db->db.db_size / sizeof (blkptr_t);
1207 i++) {
1208 blkptr_t *bp = &bps[i];
1209 ASSERT(ZIO_CHECKSUM_IS_ZERO(
1210 &bp->blk_cksum));
1211 ASSERT(
1212 DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1213 DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1214 DVA_IS_EMPTY(&bp->blk_dva[2]));
1215 ASSERT0(bp->blk_fill);
1216 ASSERT0(bp->blk_pad[0]);
1217 ASSERT0(bp->blk_pad[1]);
1218 ASSERT(!BP_IS_EMBEDDED(bp));
1219 ASSERT(BP_IS_HOLE(bp));
1220 ASSERT0(BP_GET_PHYSICAL_BIRTH(bp));
1221 }
1222 }
1223 }
1224 }
1225 DB_DNODE_EXIT(db);
1226 }
1227 #endif
1228
1229 static void
1230 dbuf_clear_data(dmu_buf_impl_t *db)
1231 {
1232 ASSERT(MUTEX_HELD(&db->db_mtx));
1233 dbuf_evict_user(db);
1234 ASSERT3P(db->db_buf, ==, NULL);
1235 db->db.db_data = NULL;
1236 if (db->db_state != DB_NOFILL) {
1237 db->db_state = DB_UNCACHED;
1238 DTRACE_SET_STATE(db, "clear data");
1239 }
1240 }
1241
1242 static void
1243 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1244 {
1245 ASSERT(MUTEX_HELD(&db->db_mtx));
1246 ASSERT(buf != NULL);
1247
1248 db->db_buf = buf;
1249 ASSERT(buf->b_data != NULL);
1250 db->db.db_data = buf->b_data;
1251 }
1252
1253 static arc_buf_t *
1254 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1255 {
1256 spa_t *spa = db->db_objset->os_spa;
1257
1258 return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1259 }
1260
1261 /*
1262 * Loan out an arc_buf for read. Return the loaned arc_buf.
1263 */
1264 arc_buf_t *
1265 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1266 {
1267 arc_buf_t *abuf;
1268
1269 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1270 mutex_enter(&db->db_mtx);
1271 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1272 int blksz = db->db.db_size;
1273 spa_t *spa = db->db_objset->os_spa;
1274
1275 mutex_exit(&db->db_mtx);
1276 abuf = arc_loan_buf(spa, B_FALSE, blksz);
1277 memcpy(abuf->b_data, db->db.db_data, blksz);
1278 } else {
1279 abuf = db->db_buf;
1280 arc_loan_inuse_buf(abuf, db);
1281 db->db_buf = NULL;
1282 dbuf_clear_data(db);
1283 mutex_exit(&db->db_mtx);
1284 }
1285 return (abuf);
1286 }
1287
1288 /*
1289 * Calculate which level n block references the data at the level 0 offset
1290 * provided.
1291 */
1292 uint64_t
1293 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1294 {
1295 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1296 /*
1297 * The level n blkid is equal to the level 0 blkid divided by
1298 * the number of level 0s in a level n block.
1299 *
1300 * The level 0 blkid is offset >> datablkshift =
1301 * offset / 2^datablkshift.
1302 *
1303 * The number of level 0s in a level n is the number of block
1304 * pointers in an indirect block, raised to the power of level.
1305 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1306 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1307 *
1308 * Thus, the level n blkid is: offset /
1309 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1310 * = offset / 2^(datablkshift + level *
1311 * (indblkshift - SPA_BLKPTRSHIFT))
1312 * = offset >> (datablkshift + level *
1313 * (indblkshift - SPA_BLKPTRSHIFT))
1314 */
1315
1316 const unsigned exp = dn->dn_datablkshift +
1317 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1318
1319 if (exp >= 8 * sizeof (offset)) {
1320 /* This only happens on the highest indirection level */
1321 ASSERT3U(level, ==, dn->dn_nlevels - 1);
1322 return (0);
1323 }
1324
1325 ASSERT3U(exp, <, 8 * sizeof (offset));
1326
1327 return (offset >> exp);
1328 } else {
1329 ASSERT3U(offset, <, dn->dn_datablksz);
1330 return (0);
1331 }
1332 }
1333
1334 /*
1335 * This function is used to lock the parent of the provided dbuf. This should be
1336 * used when modifying or reading db_blkptr.
1337 */
1338 db_lock_type_t
1339 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
1340 {
1341 enum db_lock_type ret = DLT_NONE;
1342 if (db->db_parent != NULL) {
1343 rw_enter(&db->db_parent->db_rwlock, rw);
1344 ret = DLT_PARENT;
1345 } else if (dmu_objset_ds(db->db_objset) != NULL) {
1346 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1347 tag);
1348 ret = DLT_OBJSET;
1349 }
1350 /*
1351 * We only return a DLT_NONE lock when it's the top-most indirect block
1352 * of the meta-dnode of the MOS.
1353 */
1354 return (ret);
1355 }
1356
1357 /*
1358 * We need to pass the lock type in because it's possible that the block will
1359 * move from being the topmost indirect block in a dnode (and thus, have no
1360 * parent) to not the top-most via an indirection increase. This would cause a
1361 * panic if we didn't pass the lock type in.
1362 */
1363 void
1364 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
1365 {
1366 if (type == DLT_PARENT)
1367 rw_exit(&db->db_parent->db_rwlock);
1368 else if (type == DLT_OBJSET)
1369 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1370 }
1371
1372 static void
1373 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1374 arc_buf_t *buf, void *vdb)
1375 {
1376 (void) zb, (void) bp;
1377 dmu_buf_impl_t *db = vdb;
1378
1379 mutex_enter(&db->db_mtx);
1380 ASSERT3U(db->db_state, ==, DB_READ);
1381 /*
1382 * All reads are synchronous, so we must have a hold on the dbuf
1383 */
1384 ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1385 ASSERT(db->db_buf == NULL);
1386 ASSERT(db->db.db_data == NULL);
1387 if (buf == NULL) {
1388 /* i/o error */
1389 ASSERT(zio == NULL || zio->io_error != 0);
1390 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1391 ASSERT3P(db->db_buf, ==, NULL);
1392 db->db_state = DB_UNCACHED;
1393 DTRACE_SET_STATE(db, "i/o error");
1394 } else if (db->db_level == 0 && db->db_freed_in_flight) {
1395 /* freed in flight */
1396 ASSERT(zio == NULL || zio->io_error == 0);
1397 arc_release(buf, db);
1398 memset(buf->b_data, 0, db->db.db_size);
1399 arc_buf_freeze(buf);
1400 db->db_freed_in_flight = FALSE;
1401 dbuf_set_data(db, buf);
1402 db->db_state = DB_CACHED;
1403 DTRACE_SET_STATE(db, "freed in flight");
1404 } else {
1405 /* success */
1406 ASSERT(zio == NULL || zio->io_error == 0);
1407 dbuf_set_data(db, buf);
1408 db->db_state = DB_CACHED;
1409 DTRACE_SET_STATE(db, "successful read");
1410 }
1411 cv_broadcast(&db->db_changed);
1412 dbuf_rele_and_unlock(db, NULL, B_FALSE);
1413 }
1414
1415 /*
1416 * Shortcut for performing reads on bonus dbufs. Returns
1417 * an error if we fail to verify the dnode associated with
1418 * a decrypted block. Otherwise success.
1419 */
1420 static int
1421 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1422 {
1423 int bonuslen, max_bonuslen, err;
1424
1425 err = dbuf_read_verify_dnode_crypt(db, flags);
1426 if (err)
1427 return (err);
1428
1429 bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1430 max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1431 ASSERT(MUTEX_HELD(&db->db_mtx));
1432 ASSERT(DB_DNODE_HELD(db));
1433 ASSERT3U(bonuslen, <=, db->db.db_size);
1434 db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1435 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1436 if (bonuslen < max_bonuslen)
1437 memset(db->db.db_data, 0, max_bonuslen);
1438 if (bonuslen)
1439 memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
1440 db->db_state = DB_CACHED;
1441 DTRACE_SET_STATE(db, "bonus buffer filled");
1442 return (0);
1443 }
1444
1445 static void
1446 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp)
1447 {
1448 blkptr_t *bps = db->db.db_data;
1449 uint32_t indbs = 1ULL << dn->dn_indblkshift;
1450 int n_bps = indbs >> SPA_BLKPTRSHIFT;
1451
1452 for (int i = 0; i < n_bps; i++) {
1453 blkptr_t *bp = &bps[i];
1454
1455 ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs);
1456 BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ?
1457 dn->dn_datablksz : BP_GET_LSIZE(dbbp));
1458 BP_SET_TYPE(bp, BP_GET_TYPE(dbbp));
1459 BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1);
1460 BP_SET_BIRTH(bp, BP_GET_LOGICAL_BIRTH(dbbp), 0);
1461 }
1462 }
1463
1464 /*
1465 * Handle reads on dbufs that are holes, if necessary. This function
1466 * requires that the dbuf's mutex is held. Returns success (0) if action
1467 * was taken, ENOENT if no action was taken.
1468 */
1469 static int
1470 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp)
1471 {
1472 ASSERT(MUTEX_HELD(&db->db_mtx));
1473
1474 int is_hole = bp == NULL || BP_IS_HOLE(bp);
1475 /*
1476 * For level 0 blocks only, if the above check fails:
1477 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1478 * processes the delete record and clears the bp while we are waiting
1479 * for the dn_mtx (resulting in a "no" from block_freed).
1480 */
1481 if (!is_hole && db->db_level == 0)
1482 is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp);
1483
1484 if (is_hole) {
1485 dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1486 memset(db->db.db_data, 0, db->db.db_size);
1487
1488 if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) &&
1489 BP_GET_LOGICAL_BIRTH(bp) != 0) {
1490 dbuf_handle_indirect_hole(db, dn, bp);
1491 }
1492 db->db_state = DB_CACHED;
1493 DTRACE_SET_STATE(db, "hole read satisfied");
1494 return (0);
1495 }
1496 return (ENOENT);
1497 }
1498
1499 /*
1500 * This function ensures that, when doing a decrypting read of a block,
1501 * we make sure we have decrypted the dnode associated with it. We must do
1502 * this so that we ensure we are fully authenticating the checksum-of-MACs
1503 * tree from the root of the objset down to this block. Indirect blocks are
1504 * always verified against their secure checksum-of-MACs assuming that the
1505 * dnode containing them is correct. Now that we are doing a decrypting read,
1506 * we can be sure that the key is loaded and verify that assumption. This is
1507 * especially important considering that we always read encrypted dnode
1508 * blocks as raw data (without verifying their MACs) to start, and
1509 * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1510 */
1511 static int
1512 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1513 {
1514 int err = 0;
1515 objset_t *os = db->db_objset;
1516 arc_buf_t *dnode_abuf;
1517 dnode_t *dn;
1518 zbookmark_phys_t zb;
1519
1520 ASSERT(MUTEX_HELD(&db->db_mtx));
1521
1522 if ((flags & DB_RF_NO_DECRYPT) != 0 ||
1523 !os->os_encrypted || os->os_raw_receive)
1524 return (0);
1525
1526 DB_DNODE_ENTER(db);
1527 dn = DB_DNODE(db);
1528 dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1529
1530 if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1531 DB_DNODE_EXIT(db);
1532 return (0);
1533 }
1534
1535 SET_BOOKMARK(&zb, dmu_objset_id(os),
1536 DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1537 err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1538
1539 /*
1540 * An error code of EACCES tells us that the key is still not
1541 * available. This is ok if we are only reading authenticated
1542 * (and therefore non-encrypted) blocks.
1543 */
1544 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1545 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1546 (db->db_blkid == DMU_BONUS_BLKID &&
1547 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1548 err = 0;
1549
1550 DB_DNODE_EXIT(db);
1551
1552 return (err);
1553 }
1554
1555 /*
1556 * Drops db_mtx and the parent lock specified by dblt and tag before
1557 * returning.
1558 */
1559 static int
1560 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
1561 db_lock_type_t dblt, const void *tag)
1562 {
1563 dnode_t *dn;
1564 zbookmark_phys_t zb;
1565 uint32_t aflags = ARC_FLAG_NOWAIT;
1566 int err, zio_flags;
1567 blkptr_t bp, *bpp;
1568
1569 DB_DNODE_ENTER(db);
1570 dn = DB_DNODE(db);
1571 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1572 ASSERT(MUTEX_HELD(&db->db_mtx));
1573 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1574 ASSERT(db->db_buf == NULL);
1575 ASSERT(db->db_parent == NULL ||
1576 RW_LOCK_HELD(&db->db_parent->db_rwlock));
1577
1578 if (db->db_blkid == DMU_BONUS_BLKID) {
1579 err = dbuf_read_bonus(db, dn, flags);
1580 goto early_unlock;
1581 }
1582
1583 if (db->db_state == DB_UNCACHED) {
1584 if (db->db_blkptr == NULL) {
1585 bpp = NULL;
1586 } else {
1587 bp = *db->db_blkptr;
1588 bpp = &bp;
1589 }
1590 } else {
1591 dbuf_dirty_record_t *dr;
1592
1593 ASSERT3S(db->db_state, ==, DB_NOFILL);
1594
1595 /*
1596 * Block cloning: If we have a pending block clone,
1597 * we don't want to read the underlying block, but the content
1598 * of the block being cloned, so we have the most recent data.
1599 */
1600 dr = list_head(&db->db_dirty_records);
1601 if (dr == NULL || !dr->dt.dl.dr_brtwrite) {
1602 err = EIO;
1603 goto early_unlock;
1604 }
1605 bp = dr->dt.dl.dr_overridden_by;
1606 bpp = &bp;
1607 }
1608
1609 err = dbuf_read_hole(db, dn, bpp);
1610 if (err == 0)
1611 goto early_unlock;
1612
1613 ASSERT(bpp != NULL);
1614
1615 /*
1616 * Any attempt to read a redacted block should result in an error. This
1617 * will never happen under normal conditions, but can be useful for
1618 * debugging purposes.
1619 */
1620 if (BP_IS_REDACTED(bpp)) {
1621 ASSERT(dsl_dataset_feature_is_active(
1622 db->db_objset->os_dsl_dataset,
1623 SPA_FEATURE_REDACTED_DATASETS));
1624 err = SET_ERROR(EIO);
1625 goto early_unlock;
1626 }
1627
1628 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1629 db->db.db_object, db->db_level, db->db_blkid);
1630
1631 /*
1632 * All bps of an encrypted os should have the encryption bit set.
1633 * If this is not true it indicates tampering and we report an error.
1634 */
1635 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) {
1636 spa_log_error(db->db_objset->os_spa, &zb,
1637 BP_GET_LOGICAL_BIRTH(bpp));
1638 err = SET_ERROR(EIO);
1639 goto early_unlock;
1640 }
1641
1642 err = dbuf_read_verify_dnode_crypt(db, flags);
1643 if (err != 0)
1644 goto early_unlock;
1645
1646 DB_DNODE_EXIT(db);
1647
1648 db->db_state = DB_READ;
1649 DTRACE_SET_STATE(db, "read issued");
1650 mutex_exit(&db->db_mtx);
1651
1652 if (!DBUF_IS_CACHEABLE(db))
1653 aflags |= ARC_FLAG_UNCACHED;
1654 else if (dbuf_is_l2cacheable(db))
1655 aflags |= ARC_FLAG_L2CACHE;
1656
1657 dbuf_add_ref(db, NULL);
1658
1659 zio_flags = (flags & DB_RF_CANFAIL) ?
1660 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1661
1662 if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1663 zio_flags |= ZIO_FLAG_RAW;
1664 /*
1665 * The zio layer will copy the provided blkptr later, but we have our
1666 * own copy so that we can release the parent's rwlock. We have to
1667 * do that so that if dbuf_read_done is called synchronously (on
1668 * an l1 cache hit) we don't acquire the db_mtx while holding the
1669 * parent's rwlock, which would be a lock ordering violation.
1670 */
1671 dmu_buf_unlock_parent(db, dblt, tag);
1672 (void) arc_read(zio, db->db_objset->os_spa, bpp,
1673 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1674 &aflags, &zb);
1675 return (err);
1676 early_unlock:
1677 DB_DNODE_EXIT(db);
1678 mutex_exit(&db->db_mtx);
1679 dmu_buf_unlock_parent(db, dblt, tag);
1680 return (err);
1681 }
1682
1683 /*
1684 * This is our just-in-time copy function. It makes a copy of buffers that
1685 * have been modified in a previous transaction group before we access them in
1686 * the current active group.
1687 *
1688 * This function is used in three places: when we are dirtying a buffer for the
1689 * first time in a txg, when we are freeing a range in a dnode that includes
1690 * this buffer, and when we are accessing a buffer which was received compressed
1691 * and later referenced in a WRITE_BYREF record.
1692 *
1693 * Note that when we are called from dbuf_free_range() we do not put a hold on
1694 * the buffer, we just traverse the active dbuf list for the dnode.
1695 */
1696 static void
1697 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1698 {
1699 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1700
1701 ASSERT(MUTEX_HELD(&db->db_mtx));
1702 ASSERT(db->db.db_data != NULL);
1703 ASSERT(db->db_level == 0);
1704 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1705
1706 if (dr == NULL ||
1707 (dr->dt.dl.dr_data !=
1708 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1709 return;
1710
1711 /*
1712 * If the last dirty record for this dbuf has not yet synced
1713 * and its referencing the dbuf data, either:
1714 * reset the reference to point to a new copy,
1715 * or (if there a no active holders)
1716 * just null out the current db_data pointer.
1717 */
1718 ASSERT3U(dr->dr_txg, >=, txg - 2);
1719 if (db->db_blkid == DMU_BONUS_BLKID) {
1720 dnode_t *dn = DB_DNODE(db);
1721 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1722 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1723 arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1724 memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
1725 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1726 dnode_t *dn = DB_DNODE(db);
1727 int size = arc_buf_size(db->db_buf);
1728 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1729 spa_t *spa = db->db_objset->os_spa;
1730 enum zio_compress compress_type =
1731 arc_get_compression(db->db_buf);
1732 uint8_t complevel = arc_get_complevel(db->db_buf);
1733
1734 if (arc_is_encrypted(db->db_buf)) {
1735 boolean_t byteorder;
1736 uint8_t salt[ZIO_DATA_SALT_LEN];
1737 uint8_t iv[ZIO_DATA_IV_LEN];
1738 uint8_t mac[ZIO_DATA_MAC_LEN];
1739
1740 arc_get_raw_params(db->db_buf, &byteorder, salt,
1741 iv, mac);
1742 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1743 dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1744 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1745 compress_type, complevel);
1746 } else if (compress_type != ZIO_COMPRESS_OFF) {
1747 ASSERT3U(type, ==, ARC_BUFC_DATA);
1748 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1749 size, arc_buf_lsize(db->db_buf), compress_type,
1750 complevel);
1751 } else {
1752 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1753 }
1754 memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
1755 } else {
1756 db->db_buf = NULL;
1757 dbuf_clear_data(db);
1758 }
1759 }
1760
1761 int
1762 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1763 {
1764 int err = 0;
1765 boolean_t prefetch;
1766 dnode_t *dn;
1767
1768 /*
1769 * We don't have to hold the mutex to check db_state because it
1770 * can't be freed while we have a hold on the buffer.
1771 */
1772 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1773
1774 DB_DNODE_ENTER(db);
1775 dn = DB_DNODE(db);
1776
1777 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1778 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL;
1779
1780 mutex_enter(&db->db_mtx);
1781 if (flags & DB_RF_PARTIAL_FIRST)
1782 db->db_partial_read = B_TRUE;
1783 else if (!(flags & DB_RF_PARTIAL_MORE))
1784 db->db_partial_read = B_FALSE;
1785 if (db->db_state == DB_CACHED) {
1786 /*
1787 * Ensure that this block's dnode has been decrypted if
1788 * the caller has requested decrypted data.
1789 */
1790 err = dbuf_read_verify_dnode_crypt(db, flags);
1791
1792 /*
1793 * If the arc buf is compressed or encrypted and the caller
1794 * requested uncompressed data, we need to untransform it
1795 * before returning. We also call arc_untransform() on any
1796 * unauthenticated blocks, which will verify their MAC if
1797 * the key is now available.
1798 */
1799 if (err == 0 && db->db_buf != NULL &&
1800 (flags & DB_RF_NO_DECRYPT) == 0 &&
1801 (arc_is_encrypted(db->db_buf) ||
1802 arc_is_unauthenticated(db->db_buf) ||
1803 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1804 spa_t *spa = dn->dn_objset->os_spa;
1805 zbookmark_phys_t zb;
1806
1807 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1808 db->db.db_object, db->db_level, db->db_blkid);
1809 dbuf_fix_old_data(db, spa_syncing_txg(spa));
1810 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1811 dbuf_set_data(db, db->db_buf);
1812 }
1813 mutex_exit(&db->db_mtx);
1814 if (err == 0 && prefetch) {
1815 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1816 B_FALSE, flags & DB_RF_HAVESTRUCT);
1817 }
1818 DB_DNODE_EXIT(db);
1819 DBUF_STAT_BUMP(hash_hits);
1820 } else if (db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL) {
1821 boolean_t need_wait = B_FALSE;
1822
1823 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1824
1825 if (zio == NULL && (db->db_state == DB_NOFILL ||
1826 (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)))) {
1827 spa_t *spa = dn->dn_objset->os_spa;
1828 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1829 need_wait = B_TRUE;
1830 }
1831 err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
1832 /*
1833 * dbuf_read_impl has dropped db_mtx and our parent's rwlock
1834 * for us
1835 */
1836 if (!err && prefetch) {
1837 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1838 db->db_state != DB_CACHED,
1839 flags & DB_RF_HAVESTRUCT);
1840 }
1841
1842 DB_DNODE_EXIT(db);
1843 DBUF_STAT_BUMP(hash_misses);
1844
1845 /*
1846 * If we created a zio_root we must execute it to avoid
1847 * leaking it, even if it isn't attached to any work due
1848 * to an error in dbuf_read_impl().
1849 */
1850 if (need_wait) {
1851 if (err == 0)
1852 err = zio_wait(zio);
1853 else
1854 VERIFY0(zio_wait(zio));
1855 }
1856 } else {
1857 /*
1858 * Another reader came in while the dbuf was in flight
1859 * between UNCACHED and CACHED. Either a writer will finish
1860 * writing the buffer (sending the dbuf to CACHED) or the
1861 * first reader's request will reach the read_done callback
1862 * and send the dbuf to CACHED. Otherwise, a failure
1863 * occurred and the dbuf went to UNCACHED.
1864 */
1865 mutex_exit(&db->db_mtx);
1866 if (prefetch) {
1867 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1868 B_TRUE, flags & DB_RF_HAVESTRUCT);
1869 }
1870 DB_DNODE_EXIT(db);
1871 DBUF_STAT_BUMP(hash_misses);
1872
1873 /* Skip the wait per the caller's request. */
1874 if ((flags & DB_RF_NEVERWAIT) == 0) {
1875 mutex_enter(&db->db_mtx);
1876 while (db->db_state == DB_READ ||
1877 db->db_state == DB_FILL) {
1878 ASSERT(db->db_state == DB_READ ||
1879 (flags & DB_RF_HAVESTRUCT) == 0);
1880 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1881 db, zio_t *, zio);
1882 cv_wait(&db->db_changed, &db->db_mtx);
1883 }
1884 if (db->db_state == DB_UNCACHED)
1885 err = SET_ERROR(EIO);
1886 mutex_exit(&db->db_mtx);
1887 }
1888 }
1889
1890 return (err);
1891 }
1892
1893 static void
1894 dbuf_noread(dmu_buf_impl_t *db)
1895 {
1896 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1897 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1898 mutex_enter(&db->db_mtx);
1899 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1900 cv_wait(&db->db_changed, &db->db_mtx);
1901 if (db->db_state == DB_UNCACHED) {
1902 ASSERT(db->db_buf == NULL);
1903 ASSERT(db->db.db_data == NULL);
1904 dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1905 db->db_state = DB_FILL;
1906 DTRACE_SET_STATE(db, "assigning filled buffer");
1907 } else if (db->db_state == DB_NOFILL) {
1908 dbuf_clear_data(db);
1909 } else {
1910 ASSERT3U(db->db_state, ==, DB_CACHED);
1911 }
1912 mutex_exit(&db->db_mtx);
1913 }
1914
1915 void
1916 dbuf_unoverride(dbuf_dirty_record_t *dr)
1917 {
1918 dmu_buf_impl_t *db = dr->dr_dbuf;
1919 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1920 uint64_t txg = dr->dr_txg;
1921
1922 ASSERT(MUTEX_HELD(&db->db_mtx));
1923 /*
1924 * This assert is valid because dmu_sync() expects to be called by
1925 * a zilog's get_data while holding a range lock. This call only
1926 * comes from dbuf_dirty() callers who must also hold a range lock.
1927 */
1928 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1929 ASSERT(db->db_level == 0);
1930
1931 if (db->db_blkid == DMU_BONUS_BLKID ||
1932 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1933 return;
1934
1935 ASSERT(db->db_data_pending != dr);
1936
1937 /* free this block */
1938 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1939 zio_free(db->db_objset->os_spa, txg, bp);
1940
1941 if (dr->dt.dl.dr_brtwrite) {
1942 ASSERT0P(dr->dt.dl.dr_data);
1943 dr->dt.dl.dr_data = db->db_buf;
1944 }
1945 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1946 dr->dt.dl.dr_nopwrite = B_FALSE;
1947 dr->dt.dl.dr_brtwrite = B_FALSE;
1948 dr->dt.dl.dr_has_raw_params = B_FALSE;
1949
1950 /*
1951 * Release the already-written buffer, so we leave it in
1952 * a consistent dirty state. Note that all callers are
1953 * modifying the buffer, so they will immediately do
1954 * another (redundant) arc_release(). Therefore, leave
1955 * the buf thawed to save the effort of freezing &
1956 * immediately re-thawing it.
1957 */
1958 if (dr->dt.dl.dr_data)
1959 arc_release(dr->dt.dl.dr_data, db);
1960 }
1961
1962 /*
1963 * Evict (if its unreferenced) or clear (if its referenced) any level-0
1964 * data blocks in the free range, so that any future readers will find
1965 * empty blocks.
1966 */
1967 void
1968 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1969 dmu_tx_t *tx)
1970 {
1971 dmu_buf_impl_t *db_search;
1972 dmu_buf_impl_t *db, *db_next;
1973 uint64_t txg = tx->tx_txg;
1974 avl_index_t where;
1975 dbuf_dirty_record_t *dr;
1976
1977 if (end_blkid > dn->dn_maxblkid &&
1978 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1979 end_blkid = dn->dn_maxblkid;
1980 dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
1981 (u_longlong_t)end_blkid);
1982
1983 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1984 db_search->db_level = 0;
1985 db_search->db_blkid = start_blkid;
1986 db_search->db_state = DB_SEARCH;
1987
1988 mutex_enter(&dn->dn_dbufs_mtx);
1989 db = avl_find(&dn->dn_dbufs, db_search, &where);
1990 ASSERT3P(db, ==, NULL);
1991
1992 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1993
1994 for (; db != NULL; db = db_next) {
1995 db_next = AVL_NEXT(&dn->dn_dbufs, db);
1996 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1997
1998 if (db->db_level != 0 || db->db_blkid > end_blkid) {
1999 break;
2000 }
2001 ASSERT3U(db->db_blkid, >=, start_blkid);
2002
2003 /* found a level 0 buffer in the range */
2004 mutex_enter(&db->db_mtx);
2005 if (dbuf_undirty(db, tx)) {
2006 /* mutex has been dropped and dbuf destroyed */
2007 continue;
2008 }
2009
2010 if (db->db_state == DB_UNCACHED ||
2011 db->db_state == DB_NOFILL ||
2012 db->db_state == DB_EVICTING) {
2013 ASSERT(db->db.db_data == NULL);
2014 mutex_exit(&db->db_mtx);
2015 continue;
2016 }
2017 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
2018 /* will be handled in dbuf_read_done or dbuf_rele */
2019 db->db_freed_in_flight = TRUE;
2020 mutex_exit(&db->db_mtx);
2021 continue;
2022 }
2023 if (zfs_refcount_count(&db->db_holds) == 0) {
2024 ASSERT(db->db_buf);
2025 dbuf_destroy(db);
2026 continue;
2027 }
2028 /* The dbuf is referenced */
2029
2030 dr = list_head(&db->db_dirty_records);
2031 if (dr != NULL) {
2032 if (dr->dr_txg == txg) {
2033 /*
2034 * This buffer is "in-use", re-adjust the file
2035 * size to reflect that this buffer may
2036 * contain new data when we sync.
2037 */
2038 if (db->db_blkid != DMU_SPILL_BLKID &&
2039 db->db_blkid > dn->dn_maxblkid)
2040 dn->dn_maxblkid = db->db_blkid;
2041 dbuf_unoverride(dr);
2042 } else {
2043 /*
2044 * This dbuf is not dirty in the open context.
2045 * Either uncache it (if its not referenced in
2046 * the open context) or reset its contents to
2047 * empty.
2048 */
2049 dbuf_fix_old_data(db, txg);
2050 }
2051 }
2052 /* clear the contents if its cached */
2053 if (db->db_state == DB_CACHED) {
2054 ASSERT(db->db.db_data != NULL);
2055 arc_release(db->db_buf, db);
2056 rw_enter(&db->db_rwlock, RW_WRITER);
2057 memset(db->db.db_data, 0, db->db.db_size);
2058 rw_exit(&db->db_rwlock);
2059 arc_buf_freeze(db->db_buf);
2060 }
2061
2062 mutex_exit(&db->db_mtx);
2063 }
2064
2065 mutex_exit(&dn->dn_dbufs_mtx);
2066 kmem_free(db_search, sizeof (dmu_buf_impl_t));
2067 }
2068
2069 void
2070 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
2071 {
2072 arc_buf_t *buf, *old_buf;
2073 dbuf_dirty_record_t *dr;
2074 int osize = db->db.db_size;
2075 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2076 dnode_t *dn;
2077
2078 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2079
2080 DB_DNODE_ENTER(db);
2081 dn = DB_DNODE(db);
2082
2083 /*
2084 * XXX we should be doing a dbuf_read, checking the return
2085 * value and returning that up to our callers
2086 */
2087 dmu_buf_will_dirty(&db->db, tx);
2088
2089 /* create the data buffer for the new block */
2090 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
2091
2092 /* copy old block data to the new block */
2093 old_buf = db->db_buf;
2094 memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
2095 /* zero the remainder */
2096 if (size > osize)
2097 memset((uint8_t *)buf->b_data + osize, 0, size - osize);
2098
2099 mutex_enter(&db->db_mtx);
2100 dbuf_set_data(db, buf);
2101 arc_buf_destroy(old_buf, db);
2102 db->db.db_size = size;
2103
2104 dr = list_head(&db->db_dirty_records);
2105 /* dirty record added by dmu_buf_will_dirty() */
2106 VERIFY(dr != NULL);
2107 if (db->db_level == 0)
2108 dr->dt.dl.dr_data = buf;
2109 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2110 ASSERT3U(dr->dr_accounted, ==, osize);
2111 dr->dr_accounted = size;
2112 mutex_exit(&db->db_mtx);
2113
2114 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
2115 DB_DNODE_EXIT(db);
2116 }
2117
2118 void
2119 dbuf_release_bp(dmu_buf_impl_t *db)
2120 {
2121 objset_t *os __maybe_unused = db->db_objset;
2122
2123 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
2124 ASSERT(arc_released(os->os_phys_buf) ||
2125 list_link_active(&os->os_dsl_dataset->ds_synced_link));
2126 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
2127
2128 (void) arc_release(db->db_buf, db);
2129 }
2130
2131 /*
2132 * We already have a dirty record for this TXG, and we are being
2133 * dirtied again.
2134 */
2135 static void
2136 dbuf_redirty(dbuf_dirty_record_t *dr)
2137 {
2138 dmu_buf_impl_t *db = dr->dr_dbuf;
2139
2140 ASSERT(MUTEX_HELD(&db->db_mtx));
2141
2142 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
2143 /*
2144 * If this buffer has already been written out,
2145 * we now need to reset its state.
2146 */
2147 dbuf_unoverride(dr);
2148 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
2149 db->db_state != DB_NOFILL) {
2150 /* Already released on initial dirty, so just thaw. */
2151 ASSERT(arc_released(db->db_buf));
2152 arc_buf_thaw(db->db_buf);
2153 }
2154 }
2155 }
2156
2157 dbuf_dirty_record_t *
2158 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
2159 {
2160 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2161 IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
2162 dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
2163 ASSERT(dn->dn_maxblkid >= blkid);
2164
2165 dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
2166 list_link_init(&dr->dr_dirty_node);
2167 list_link_init(&dr->dr_dbuf_node);
2168 dr->dr_dnode = dn;
2169 dr->dr_txg = tx->tx_txg;
2170 dr->dt.dll.dr_blkid = blkid;
2171 dr->dr_accounted = dn->dn_datablksz;
2172
2173 /*
2174 * There should not be any dbuf for the block that we're dirtying.
2175 * Otherwise the buffer contents could be inconsistent between the
2176 * dbuf and the lightweight dirty record.
2177 */
2178 ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid,
2179 NULL));
2180
2181 mutex_enter(&dn->dn_mtx);
2182 int txgoff = tx->tx_txg & TXG_MASK;
2183 if (dn->dn_free_ranges[txgoff] != NULL) {
2184 range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2185 }
2186
2187 if (dn->dn_nlevels == 1) {
2188 ASSERT3U(blkid, <, dn->dn_nblkptr);
2189 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2190 mutex_exit(&dn->dn_mtx);
2191 rw_exit(&dn->dn_struct_rwlock);
2192 dnode_setdirty(dn, tx);
2193 } else {
2194 mutex_exit(&dn->dn_mtx);
2195
2196 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2197 dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2198 1, blkid >> epbs, FTAG);
2199 rw_exit(&dn->dn_struct_rwlock);
2200 if (parent_db == NULL) {
2201 kmem_free(dr, sizeof (*dr));
2202 return (NULL);
2203 }
2204 int err = dbuf_read(parent_db, NULL,
2205 (DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2206 if (err != 0) {
2207 dbuf_rele(parent_db, FTAG);
2208 kmem_free(dr, sizeof (*dr));
2209 return (NULL);
2210 }
2211
2212 dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2213 dbuf_rele(parent_db, FTAG);
2214 mutex_enter(&parent_dr->dt.di.dr_mtx);
2215 ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2216 list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2217 mutex_exit(&parent_dr->dt.di.dr_mtx);
2218 dr->dr_parent = parent_dr;
2219 }
2220
2221 dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2222
2223 return (dr);
2224 }
2225
2226 dbuf_dirty_record_t *
2227 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2228 {
2229 dnode_t *dn;
2230 objset_t *os;
2231 dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2232 int txgoff = tx->tx_txg & TXG_MASK;
2233 boolean_t drop_struct_rwlock = B_FALSE;
2234
2235 ASSERT(tx->tx_txg != 0);
2236 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2237 DMU_TX_DIRTY_BUF(tx, db);
2238
2239 DB_DNODE_ENTER(db);
2240 dn = DB_DNODE(db);
2241 /*
2242 * Shouldn't dirty a regular buffer in syncing context. Private
2243 * objects may be dirtied in syncing context, but only if they
2244 * were already pre-dirtied in open context.
2245 */
2246 #ifdef ZFS_DEBUG
2247 if (dn->dn_objset->os_dsl_dataset != NULL) {
2248 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2249 RW_READER, FTAG);
2250 }
2251 ASSERT(!dmu_tx_is_syncing(tx) ||
2252 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2253 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2254 dn->dn_objset->os_dsl_dataset == NULL);
2255 if (dn->dn_objset->os_dsl_dataset != NULL)
2256 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2257 #endif
2258 /*
2259 * We make this assert for private objects as well, but after we
2260 * check if we're already dirty. They are allowed to re-dirty
2261 * in syncing context.
2262 */
2263 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2264 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2265 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2266
2267 mutex_enter(&db->db_mtx);
2268 /*
2269 * XXX make this true for indirects too? The problem is that
2270 * transactions created with dmu_tx_create_assigned() from
2271 * syncing context don't bother holding ahead.
2272 */
2273 ASSERT(db->db_level != 0 ||
2274 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2275 db->db_state == DB_NOFILL);
2276
2277 mutex_enter(&dn->dn_mtx);
2278 dnode_set_dirtyctx(dn, tx, db);
2279 if (tx->tx_txg > dn->dn_dirty_txg)
2280 dn->dn_dirty_txg = tx->tx_txg;
2281 mutex_exit(&dn->dn_mtx);
2282
2283 if (db->db_blkid == DMU_SPILL_BLKID)
2284 dn->dn_have_spill = B_TRUE;
2285
2286 /*
2287 * If this buffer is already dirty, we're done.
2288 */
2289 dr_head = list_head(&db->db_dirty_records);
2290 ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2291 db->db.db_object == DMU_META_DNODE_OBJECT);
2292 dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2293 if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2294 DB_DNODE_EXIT(db);
2295
2296 dbuf_redirty(dr_next);
2297 mutex_exit(&db->db_mtx);
2298 return (dr_next);
2299 }
2300
2301 /*
2302 * Only valid if not already dirty.
2303 */
2304 ASSERT(dn->dn_object == 0 ||
2305 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2306 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2307
2308 ASSERT3U(dn->dn_nlevels, >, db->db_level);
2309
2310 /*
2311 * We should only be dirtying in syncing context if it's the
2312 * mos or we're initializing the os or it's a special object.
2313 * However, we are allowed to dirty in syncing context provided
2314 * we already dirtied it in open context. Hence we must make
2315 * this assertion only if we're not already dirty.
2316 */
2317 os = dn->dn_objset;
2318 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2319 #ifdef ZFS_DEBUG
2320 if (dn->dn_objset->os_dsl_dataset != NULL)
2321 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2322 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2323 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2324 if (dn->dn_objset->os_dsl_dataset != NULL)
2325 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2326 #endif
2327 ASSERT(db->db.db_size != 0);
2328
2329 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2330
2331 if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2332 dmu_objset_willuse_space(os, db->db.db_size, tx);
2333 }
2334
2335 /*
2336 * If this buffer is dirty in an old transaction group we need
2337 * to make a copy of it so that the changes we make in this
2338 * transaction group won't leak out when we sync the older txg.
2339 */
2340 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2341 list_link_init(&dr->dr_dirty_node);
2342 list_link_init(&dr->dr_dbuf_node);
2343 dr->dr_dnode = dn;
2344 if (db->db_level == 0) {
2345 void *data_old = db->db_buf;
2346
2347 if (db->db_state != DB_NOFILL) {
2348 if (db->db_blkid == DMU_BONUS_BLKID) {
2349 dbuf_fix_old_data(db, tx->tx_txg);
2350 data_old = db->db.db_data;
2351 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2352 /*
2353 * Release the data buffer from the cache so
2354 * that we can modify it without impacting
2355 * possible other users of this cached data
2356 * block. Note that indirect blocks and
2357 * private objects are not released until the
2358 * syncing state (since they are only modified
2359 * then).
2360 */
2361 arc_release(db->db_buf, db);
2362 dbuf_fix_old_data(db, tx->tx_txg);
2363 data_old = db->db_buf;
2364 }
2365 ASSERT(data_old != NULL);
2366 }
2367 dr->dt.dl.dr_data = data_old;
2368 } else {
2369 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2370 list_create(&dr->dt.di.dr_children,
2371 sizeof (dbuf_dirty_record_t),
2372 offsetof(dbuf_dirty_record_t, dr_dirty_node));
2373 }
2374 if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2375 dr->dr_accounted = db->db.db_size;
2376 }
2377 dr->dr_dbuf = db;
2378 dr->dr_txg = tx->tx_txg;
2379 list_insert_before(&db->db_dirty_records, dr_next, dr);
2380
2381 /*
2382 * We could have been freed_in_flight between the dbuf_noread
2383 * and dbuf_dirty. We win, as though the dbuf_noread() had
2384 * happened after the free.
2385 */
2386 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2387 db->db_blkid != DMU_SPILL_BLKID) {
2388 mutex_enter(&dn->dn_mtx);
2389 if (dn->dn_free_ranges[txgoff] != NULL) {
2390 range_tree_clear(dn->dn_free_ranges[txgoff],
2391 db->db_blkid, 1);
2392 }
2393 mutex_exit(&dn->dn_mtx);
2394 db->db_freed_in_flight = FALSE;
2395 }
2396
2397 /*
2398 * This buffer is now part of this txg
2399 */
2400 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2401 db->db_dirtycnt += 1;
2402 ASSERT3U(db->db_dirtycnt, <=, 3);
2403
2404 mutex_exit(&db->db_mtx);
2405
2406 if (db->db_blkid == DMU_BONUS_BLKID ||
2407 db->db_blkid == DMU_SPILL_BLKID) {
2408 mutex_enter(&dn->dn_mtx);
2409 ASSERT(!list_link_active(&dr->dr_dirty_node));
2410 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2411 mutex_exit(&dn->dn_mtx);
2412 dnode_setdirty(dn, tx);
2413 DB_DNODE_EXIT(db);
2414 return (dr);
2415 }
2416
2417 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2418 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2419 drop_struct_rwlock = B_TRUE;
2420 }
2421
2422 /*
2423 * If we are overwriting a dedup BP, then unless it is snapshotted,
2424 * when we get to syncing context we will need to decrement its
2425 * refcount in the DDT. Prefetch the relevant DDT block so that
2426 * syncing context won't have to wait for the i/o.
2427 */
2428 if (db->db_blkptr != NULL) {
2429 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2430 ddt_prefetch(os->os_spa, db->db_blkptr);
2431 dmu_buf_unlock_parent(db, dblt, FTAG);
2432 }
2433
2434 /*
2435 * We need to hold the dn_struct_rwlock to make this assertion,
2436 * because it protects dn_phys / dn_next_nlevels from changing.
2437 */
2438 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2439 dn->dn_phys->dn_nlevels > db->db_level ||
2440 dn->dn_next_nlevels[txgoff] > db->db_level ||
2441 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2442 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2443
2444
2445 if (db->db_level == 0) {
2446 ASSERT(!db->db_objset->os_raw_receive ||
2447 dn->dn_maxblkid >= db->db_blkid);
2448 dnode_new_blkid(dn, db->db_blkid, tx,
2449 drop_struct_rwlock, B_FALSE);
2450 ASSERT(dn->dn_maxblkid >= db->db_blkid);
2451 }
2452
2453 if (db->db_level+1 < dn->dn_nlevels) {
2454 dmu_buf_impl_t *parent = db->db_parent;
2455 dbuf_dirty_record_t *di;
2456 int parent_held = FALSE;
2457
2458 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2459 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2460 parent = dbuf_hold_level(dn, db->db_level + 1,
2461 db->db_blkid >> epbs, FTAG);
2462 ASSERT(parent != NULL);
2463 parent_held = TRUE;
2464 }
2465 if (drop_struct_rwlock)
2466 rw_exit(&dn->dn_struct_rwlock);
2467 ASSERT3U(db->db_level + 1, ==, parent->db_level);
2468 di = dbuf_dirty(parent, tx);
2469 if (parent_held)
2470 dbuf_rele(parent, FTAG);
2471
2472 mutex_enter(&db->db_mtx);
2473 /*
2474 * Since we've dropped the mutex, it's possible that
2475 * dbuf_undirty() might have changed this out from under us.
2476 */
2477 if (list_head(&db->db_dirty_records) == dr ||
2478 dn->dn_object == DMU_META_DNODE_OBJECT) {
2479 mutex_enter(&di->dt.di.dr_mtx);
2480 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2481 ASSERT(!list_link_active(&dr->dr_dirty_node));
2482 list_insert_tail(&di->dt.di.dr_children, dr);
2483 mutex_exit(&di->dt.di.dr_mtx);
2484 dr->dr_parent = di;
2485 }
2486 mutex_exit(&db->db_mtx);
2487 } else {
2488 ASSERT(db->db_level + 1 == dn->dn_nlevels);
2489 ASSERT(db->db_blkid < dn->dn_nblkptr);
2490 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2491 mutex_enter(&dn->dn_mtx);
2492 ASSERT(!list_link_active(&dr->dr_dirty_node));
2493 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2494 mutex_exit(&dn->dn_mtx);
2495 if (drop_struct_rwlock)
2496 rw_exit(&dn->dn_struct_rwlock);
2497 }
2498
2499 dnode_setdirty(dn, tx);
2500 DB_DNODE_EXIT(db);
2501 return (dr);
2502 }
2503
2504 static void
2505 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2506 {
2507 dmu_buf_impl_t *db = dr->dr_dbuf;
2508
2509 if (dr->dt.dl.dr_data != db->db.db_data) {
2510 struct dnode *dn = dr->dr_dnode;
2511 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2512
2513 kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2514 arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2515 }
2516 db->db_data_pending = NULL;
2517 ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2518 list_remove(&db->db_dirty_records, dr);
2519 if (dr->dr_dbuf->db_level != 0) {
2520 mutex_destroy(&dr->dt.di.dr_mtx);
2521 list_destroy(&dr->dt.di.dr_children);
2522 }
2523 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2524 ASSERT3U(db->db_dirtycnt, >, 0);
2525 db->db_dirtycnt -= 1;
2526 }
2527
2528 /*
2529 * Undirty a buffer in the transaction group referenced by the given
2530 * transaction. Return whether this evicted the dbuf.
2531 */
2532 boolean_t
2533 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2534 {
2535 uint64_t txg = tx->tx_txg;
2536 boolean_t brtwrite;
2537
2538 ASSERT(txg != 0);
2539
2540 /*
2541 * Due to our use of dn_nlevels below, this can only be called
2542 * in open context, unless we are operating on the MOS.
2543 * From syncing context, dn_nlevels may be different from the
2544 * dn_nlevels used when dbuf was dirtied.
2545 */
2546 ASSERT(db->db_objset ==
2547 dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2548 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2549 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2550 ASSERT0(db->db_level);
2551 ASSERT(MUTEX_HELD(&db->db_mtx));
2552
2553 /*
2554 * If this buffer is not dirty, we're done.
2555 */
2556 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2557 if (dr == NULL)
2558 return (B_FALSE);
2559 ASSERT(dr->dr_dbuf == db);
2560
2561 brtwrite = dr->dt.dl.dr_brtwrite;
2562 if (brtwrite) {
2563 /*
2564 * We are freeing a block that we cloned in the same
2565 * transaction group.
2566 */
2567 brt_pending_remove(dmu_objset_spa(db->db_objset),
2568 &dr->dt.dl.dr_overridden_by, tx);
2569 }
2570
2571 dnode_t *dn = dr->dr_dnode;
2572
2573 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2574
2575 ASSERT(db->db.db_size != 0);
2576
2577 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2578 dr->dr_accounted, txg);
2579
2580 list_remove(&db->db_dirty_records, dr);
2581
2582 /*
2583 * Note that there are three places in dbuf_dirty()
2584 * where this dirty record may be put on a list.
2585 * Make sure to do a list_remove corresponding to
2586 * every one of those list_insert calls.
2587 */
2588 if (dr->dr_parent) {
2589 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2590 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2591 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2592 } else if (db->db_blkid == DMU_SPILL_BLKID ||
2593 db->db_level + 1 == dn->dn_nlevels) {
2594 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2595 mutex_enter(&dn->dn_mtx);
2596 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2597 mutex_exit(&dn->dn_mtx);
2598 }
2599
2600 if (db->db_state != DB_NOFILL && !brtwrite) {
2601 dbuf_unoverride(dr);
2602
2603 ASSERT(db->db_buf != NULL);
2604 ASSERT(dr->dt.dl.dr_data != NULL);
2605 if (dr->dt.dl.dr_data != db->db_buf)
2606 arc_buf_destroy(dr->dt.dl.dr_data, db);
2607 }
2608
2609 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2610
2611 ASSERT(db->db_dirtycnt > 0);
2612 db->db_dirtycnt -= 1;
2613
2614 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2615 ASSERT(db->db_state == DB_NOFILL || brtwrite ||
2616 arc_released(db->db_buf));
2617 dbuf_destroy(db);
2618 return (B_TRUE);
2619 }
2620
2621 return (B_FALSE);
2622 }
2623
2624 static void
2625 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2626 {
2627 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2628 boolean_t undirty = B_FALSE;
2629
2630 ASSERT(tx->tx_txg != 0);
2631 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2632
2633 /*
2634 * Quick check for dirtiness. For already dirty blocks, this
2635 * reduces runtime of this function by >90%, and overall performance
2636 * by 50% for some workloads (e.g. file deletion with indirect blocks
2637 * cached).
2638 */
2639 mutex_enter(&db->db_mtx);
2640
2641 if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) {
2642 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2643 /*
2644 * It's possible that it is already dirty but not cached,
2645 * because there are some calls to dbuf_dirty() that don't
2646 * go through dmu_buf_will_dirty().
2647 */
2648 if (dr != NULL) {
2649 if (dr->dt.dl.dr_brtwrite) {
2650 /*
2651 * Block cloning: If we are dirtying a cloned
2652 * block, we cannot simply redirty it, because
2653 * this dr has no data associated with it.
2654 * We will go through a full undirtying below,
2655 * before dirtying it again.
2656 */
2657 undirty = B_TRUE;
2658 } else {
2659 /* This dbuf is already dirty and cached. */
2660 dbuf_redirty(dr);
2661 mutex_exit(&db->db_mtx);
2662 return;
2663 }
2664 }
2665 }
2666 mutex_exit(&db->db_mtx);
2667
2668 DB_DNODE_ENTER(db);
2669 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2670 flags |= DB_RF_HAVESTRUCT;
2671 DB_DNODE_EXIT(db);
2672
2673 /*
2674 * Block cloning: Do the dbuf_read() before undirtying the dbuf, as we
2675 * want to make sure dbuf_read() will read the pending cloned block and
2676 * not the uderlying block that is being replaced. dbuf_undirty() will
2677 * do dbuf_unoverride(), so we will end up with cloned block content,
2678 * without overridden BP.
2679 */
2680 (void) dbuf_read(db, NULL, flags);
2681 if (undirty) {
2682 mutex_enter(&db->db_mtx);
2683 VERIFY(!dbuf_undirty(db, tx));
2684 mutex_exit(&db->db_mtx);
2685 }
2686 (void) dbuf_dirty(db, tx);
2687 }
2688
2689 void
2690 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2691 {
2692 dmu_buf_will_dirty_impl(db_fake,
2693 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2694 }
2695
2696 boolean_t
2697 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2698 {
2699 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2700 dbuf_dirty_record_t *dr;
2701
2702 mutex_enter(&db->db_mtx);
2703 dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2704 mutex_exit(&db->db_mtx);
2705 return (dr != NULL);
2706 }
2707
2708 void
2709 dmu_buf_will_clone(dmu_buf_t *db_fake, dmu_tx_t *tx)
2710 {
2711 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2712
2713 /*
2714 * Block cloning: We are going to clone into this block, so undirty
2715 * modifications done to this block so far in this txg. This includes
2716 * writes and clones into this block.
2717 */
2718 mutex_enter(&db->db_mtx);
2719 DBUF_VERIFY(db);
2720 VERIFY(!dbuf_undirty(db, tx));
2721 ASSERT0P(dbuf_find_dirty_eq(db, tx->tx_txg));
2722 if (db->db_buf != NULL) {
2723 arc_buf_destroy(db->db_buf, db);
2724 db->db_buf = NULL;
2725 dbuf_clear_data(db);
2726 }
2727
2728 db->db_state = DB_NOFILL;
2729 DTRACE_SET_STATE(db, "allocating NOFILL buffer for clone");
2730
2731 DBUF_VERIFY(db);
2732 mutex_exit(&db->db_mtx);
2733
2734 dbuf_noread(db);
2735 (void) dbuf_dirty(db, tx);
2736 }
2737
2738 void
2739 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2740 {
2741 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2742
2743 mutex_enter(&db->db_mtx);
2744 db->db_state = DB_NOFILL;
2745 DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2746 mutex_exit(&db->db_mtx);
2747
2748 dbuf_noread(db);
2749 (void) dbuf_dirty(db, tx);
2750 }
2751
2752 void
2753 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail)
2754 {
2755 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2756
2757 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2758 ASSERT(tx->tx_txg != 0);
2759 ASSERT(db->db_level == 0);
2760 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2761
2762 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2763 dmu_tx_private_ok(tx));
2764
2765 mutex_enter(&db->db_mtx);
2766 if (db->db_state == DB_NOFILL) {
2767 /*
2768 * Block cloning: We will be completely overwriting a block
2769 * cloned in this transaction group, so let's undirty the
2770 * pending clone and mark the block as uncached. This will be
2771 * as if the clone was never done. But if the fill can fail
2772 * we should have a way to return back to the cloned data.
2773 */
2774 if (canfail && dbuf_find_dirty_eq(db, tx->tx_txg) != NULL) {
2775 mutex_exit(&db->db_mtx);
2776 dmu_buf_will_dirty(db_fake, tx);
2777 return;
2778 }
2779 VERIFY(!dbuf_undirty(db, tx));
2780 db->db_state = DB_UNCACHED;
2781 }
2782 mutex_exit(&db->db_mtx);
2783
2784 dbuf_noread(db);
2785 (void) dbuf_dirty(db, tx);
2786 }
2787
2788 /*
2789 * This function is effectively the same as dmu_buf_will_dirty(), but
2790 * indicates the caller expects raw encrypted data in the db, and provides
2791 * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2792 * blkptr_t when this dbuf is written. This is only used for blocks of
2793 * dnodes, during raw receive.
2794 */
2795 void
2796 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2797 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2798 {
2799 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2800 dbuf_dirty_record_t *dr;
2801
2802 /*
2803 * dr_has_raw_params is only processed for blocks of dnodes
2804 * (see dbuf_sync_dnode_leaf_crypt()).
2805 */
2806 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2807 ASSERT3U(db->db_level, ==, 0);
2808 ASSERT(db->db_objset->os_raw_receive);
2809
2810 dmu_buf_will_dirty_impl(db_fake,
2811 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2812
2813 dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2814
2815 ASSERT3P(dr, !=, NULL);
2816
2817 dr->dt.dl.dr_has_raw_params = B_TRUE;
2818 dr->dt.dl.dr_byteorder = byteorder;
2819 memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
2820 memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
2821 memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
2822 }
2823
2824 static void
2825 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2826 {
2827 struct dirty_leaf *dl;
2828 dbuf_dirty_record_t *dr;
2829
2830 dr = list_head(&db->db_dirty_records);
2831 ASSERT3P(dr, !=, NULL);
2832 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2833 dl = &dr->dt.dl;
2834 dl->dr_overridden_by = *bp;
2835 dl->dr_override_state = DR_OVERRIDDEN;
2836 BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg);
2837 }
2838
2839 boolean_t
2840 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx, boolean_t failed)
2841 {
2842 (void) tx;
2843 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2844 mutex_enter(&db->db_mtx);
2845 DBUF_VERIFY(db);
2846
2847 if (db->db_state == DB_FILL) {
2848 if (db->db_level == 0 && db->db_freed_in_flight) {
2849 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2850 /* we were freed while filling */
2851 /* XXX dbuf_undirty? */
2852 memset(db->db.db_data, 0, db->db.db_size);
2853 db->db_freed_in_flight = FALSE;
2854 db->db_state = DB_CACHED;
2855 DTRACE_SET_STATE(db,
2856 "fill done handling freed in flight");
2857 failed = B_FALSE;
2858 } else if (failed) {
2859 VERIFY(!dbuf_undirty(db, tx));
2860 db->db_buf = NULL;
2861 dbuf_clear_data(db);
2862 DTRACE_SET_STATE(db, "fill failed");
2863 } else {
2864 db->db_state = DB_CACHED;
2865 DTRACE_SET_STATE(db, "fill done");
2866 }
2867 cv_broadcast(&db->db_changed);
2868 } else {
2869 db->db_state = DB_CACHED;
2870 failed = B_FALSE;
2871 }
2872 mutex_exit(&db->db_mtx);
2873 return (failed);
2874 }
2875
2876 void
2877 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2878 bp_embedded_type_t etype, enum zio_compress comp,
2879 int uncompressed_size, int compressed_size, int byteorder,
2880 dmu_tx_t *tx)
2881 {
2882 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2883 struct dirty_leaf *dl;
2884 dmu_object_type_t type;
2885 dbuf_dirty_record_t *dr;
2886
2887 if (etype == BP_EMBEDDED_TYPE_DATA) {
2888 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2889 SPA_FEATURE_EMBEDDED_DATA));
2890 }
2891
2892 DB_DNODE_ENTER(db);
2893 type = DB_DNODE(db)->dn_type;
2894 DB_DNODE_EXIT(db);
2895
2896 ASSERT0(db->db_level);
2897 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2898
2899 dmu_buf_will_not_fill(dbuf, tx);
2900
2901 dr = list_head(&db->db_dirty_records);
2902 ASSERT3P(dr, !=, NULL);
2903 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2904 dl = &dr->dt.dl;
2905 encode_embedded_bp_compressed(&dl->dr_overridden_by,
2906 data, comp, uncompressed_size, compressed_size);
2907 BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2908 BP_SET_TYPE(&dl->dr_overridden_by, type);
2909 BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2910 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2911
2912 dl->dr_override_state = DR_OVERRIDDEN;
2913 BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg);
2914 }
2915
2916 void
2917 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
2918 {
2919 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2920 dmu_object_type_t type;
2921 ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
2922 SPA_FEATURE_REDACTED_DATASETS));
2923
2924 DB_DNODE_ENTER(db);
2925 type = DB_DNODE(db)->dn_type;
2926 DB_DNODE_EXIT(db);
2927
2928 ASSERT0(db->db_level);
2929 dmu_buf_will_not_fill(dbuf, tx);
2930
2931 blkptr_t bp = { { { {0} } } };
2932 BP_SET_TYPE(&bp, type);
2933 BP_SET_LEVEL(&bp, 0);
2934 BP_SET_BIRTH(&bp, tx->tx_txg, 0);
2935 BP_SET_REDACTED(&bp);
2936 BPE_SET_LSIZE(&bp, dbuf->db_size);
2937
2938 dbuf_override_impl(db, &bp, tx);
2939 }
2940
2941 /*
2942 * Directly assign a provided arc buf to a given dbuf if it's not referenced
2943 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2944 */
2945 void
2946 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2947 {
2948 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2949 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2950 ASSERT(db->db_level == 0);
2951 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2952 ASSERT(buf != NULL);
2953 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2954 ASSERT(tx->tx_txg != 0);
2955
2956 arc_return_buf(buf, db);
2957 ASSERT(arc_released(buf));
2958
2959 mutex_enter(&db->db_mtx);
2960
2961 while (db->db_state == DB_READ || db->db_state == DB_FILL)
2962 cv_wait(&db->db_changed, &db->db_mtx);
2963
2964 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED ||
2965 db->db_state == DB_NOFILL);
2966
2967 if (db->db_state == DB_CACHED &&
2968 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2969 /*
2970 * In practice, we will never have a case where we have an
2971 * encrypted arc buffer while additional holds exist on the
2972 * dbuf. We don't handle this here so we simply assert that
2973 * fact instead.
2974 */
2975 ASSERT(!arc_is_encrypted(buf));
2976 mutex_exit(&db->db_mtx);
2977 (void) dbuf_dirty(db, tx);
2978 memcpy(db->db.db_data, buf->b_data, db->db.db_size);
2979 arc_buf_destroy(buf, db);
2980 return;
2981 }
2982
2983 if (db->db_state == DB_CACHED) {
2984 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2985
2986 ASSERT(db->db_buf != NULL);
2987 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2988 ASSERT(dr->dt.dl.dr_data == db->db_buf);
2989
2990 if (!arc_released(db->db_buf)) {
2991 ASSERT(dr->dt.dl.dr_override_state ==
2992 DR_OVERRIDDEN);
2993 arc_release(db->db_buf, db);
2994 }
2995 dr->dt.dl.dr_data = buf;
2996 arc_buf_destroy(db->db_buf, db);
2997 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2998 arc_release(db->db_buf, db);
2999 arc_buf_destroy(db->db_buf, db);
3000 }
3001 db->db_buf = NULL;
3002 } else if (db->db_state == DB_NOFILL) {
3003 /*
3004 * We will be completely replacing the cloned block. In case
3005 * it was cloned in this transaction group, let's undirty the
3006 * pending clone and mark the block as uncached. This will be
3007 * as if the clone was never done.
3008 */
3009 VERIFY(!dbuf_undirty(db, tx));
3010 db->db_state = DB_UNCACHED;
3011 }
3012 ASSERT(db->db_buf == NULL);
3013 dbuf_set_data(db, buf);
3014 db->db_state = DB_FILL;
3015 DTRACE_SET_STATE(db, "filling assigned arcbuf");
3016 mutex_exit(&db->db_mtx);
3017 (void) dbuf_dirty(db, tx);
3018 dmu_buf_fill_done(&db->db, tx, B_FALSE);
3019 }
3020
3021 void
3022 dbuf_destroy(dmu_buf_impl_t *db)
3023 {
3024 dnode_t *dn;
3025 dmu_buf_impl_t *parent = db->db_parent;
3026 dmu_buf_impl_t *dndb;
3027
3028 ASSERT(MUTEX_HELD(&db->db_mtx));
3029 ASSERT(zfs_refcount_is_zero(&db->db_holds));
3030
3031 if (db->db_buf != NULL) {
3032 arc_buf_destroy(db->db_buf, db);
3033 db->db_buf = NULL;
3034 }
3035
3036 if (db->db_blkid == DMU_BONUS_BLKID) {
3037 int slots = DB_DNODE(db)->dn_num_slots;
3038 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
3039 if (db->db.db_data != NULL) {
3040 kmem_free(db->db.db_data, bonuslen);
3041 arc_space_return(bonuslen, ARC_SPACE_BONUS);
3042 db->db_state = DB_UNCACHED;
3043 DTRACE_SET_STATE(db, "buffer cleared");
3044 }
3045 }
3046
3047 dbuf_clear_data(db);
3048
3049 if (multilist_link_active(&db->db_cache_link)) {
3050 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3051 db->db_caching_status == DB_DBUF_METADATA_CACHE);
3052
3053 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3054
3055 ASSERT0(dmu_buf_user_size(&db->db));
3056 (void) zfs_refcount_remove_many(
3057 &dbuf_caches[db->db_caching_status].size,
3058 db->db.db_size, db);
3059
3060 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3061 DBUF_STAT_BUMPDOWN(metadata_cache_count);
3062 } else {
3063 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3064 DBUF_STAT_BUMPDOWN(cache_count);
3065 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3066 db->db.db_size);
3067 }
3068 db->db_caching_status = DB_NO_CACHE;
3069 }
3070
3071 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
3072 ASSERT(db->db_data_pending == NULL);
3073 ASSERT(list_is_empty(&db->db_dirty_records));
3074
3075 db->db_state = DB_EVICTING;
3076 DTRACE_SET_STATE(db, "buffer eviction started");
3077 db->db_blkptr = NULL;
3078
3079 /*
3080 * Now that db_state is DB_EVICTING, nobody else can find this via
3081 * the hash table. We can now drop db_mtx, which allows us to
3082 * acquire the dn_dbufs_mtx.
3083 */
3084 mutex_exit(&db->db_mtx);
3085
3086 DB_DNODE_ENTER(db);
3087 dn = DB_DNODE(db);
3088 dndb = dn->dn_dbuf;
3089 if (db->db_blkid != DMU_BONUS_BLKID) {
3090 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
3091 if (needlock)
3092 mutex_enter_nested(&dn->dn_dbufs_mtx,
3093 NESTED_SINGLE);
3094 avl_remove(&dn->dn_dbufs, db);
3095 membar_producer();
3096 DB_DNODE_EXIT(db);
3097 if (needlock)
3098 mutex_exit(&dn->dn_dbufs_mtx);
3099 /*
3100 * Decrementing the dbuf count means that the hold corresponding
3101 * to the removed dbuf is no longer discounted in dnode_move(),
3102 * so the dnode cannot be moved until after we release the hold.
3103 * The membar_producer() ensures visibility of the decremented
3104 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
3105 * release any lock.
3106 */
3107 mutex_enter(&dn->dn_mtx);
3108 dnode_rele_and_unlock(dn, db, B_TRUE);
3109 db->db_dnode_handle = NULL;
3110
3111 dbuf_hash_remove(db);
3112 } else {
3113 DB_DNODE_EXIT(db);
3114 }
3115
3116 ASSERT(zfs_refcount_is_zero(&db->db_holds));
3117
3118 db->db_parent = NULL;
3119
3120 ASSERT(db->db_buf == NULL);
3121 ASSERT(db->db.db_data == NULL);
3122 ASSERT(db->db_hash_next == NULL);
3123 ASSERT(db->db_blkptr == NULL);
3124 ASSERT(db->db_data_pending == NULL);
3125 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3126 ASSERT(!multilist_link_active(&db->db_cache_link));
3127
3128 /*
3129 * If this dbuf is referenced from an indirect dbuf,
3130 * decrement the ref count on the indirect dbuf.
3131 */
3132 if (parent && parent != dndb) {
3133 mutex_enter(&parent->db_mtx);
3134 dbuf_rele_and_unlock(parent, db, B_TRUE);
3135 }
3136
3137 kmem_cache_free(dbuf_kmem_cache, db);
3138 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3139 }
3140
3141 /*
3142 * Note: While bpp will always be updated if the function returns success,
3143 * parentp will not be updated if the dnode does not have dn_dbuf filled in;
3144 * this happens when the dnode is the meta-dnode, or {user|group|project}used
3145 * object.
3146 */
3147 __attribute__((always_inline))
3148 static inline int
3149 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
3150 dmu_buf_impl_t **parentp, blkptr_t **bpp)
3151 {
3152 *parentp = NULL;
3153 *bpp = NULL;
3154
3155 ASSERT(blkid != DMU_BONUS_BLKID);
3156
3157 if (blkid == DMU_SPILL_BLKID) {
3158 mutex_enter(&dn->dn_mtx);
3159 if (dn->dn_have_spill &&
3160 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
3161 *bpp = DN_SPILL_BLKPTR(dn->dn_phys);
3162 else
3163 *bpp = NULL;
3164 dbuf_add_ref(dn->dn_dbuf, NULL);
3165 *parentp = dn->dn_dbuf;
3166 mutex_exit(&dn->dn_mtx);
3167 return (0);
3168 }
3169
3170 int nlevels =
3171 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
3172 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
3173
3174 ASSERT3U(level * epbs, <, 64);
3175 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3176 /*
3177 * This assertion shouldn't trip as long as the max indirect block size
3178 * is less than 1M. The reason for this is that up to that point,
3179 * the number of levels required to address an entire object with blocks
3180 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
3181 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
3182 * (i.e. we can address the entire object), objects will all use at most
3183 * N-1 levels and the assertion won't overflow. However, once epbs is
3184 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
3185 * enough to address an entire object, so objects will have 5 levels,
3186 * but then this assertion will overflow.
3187 *
3188 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
3189 * need to redo this logic to handle overflows.
3190 */
3191 ASSERT(level >= nlevels ||
3192 ((nlevels - level - 1) * epbs) +
3193 highbit64(dn->dn_phys->dn_nblkptr) <= 64);
3194 if (level >= nlevels ||
3195 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
3196 ((nlevels - level - 1) * epbs)) ||
3197 (fail_sparse &&
3198 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
3199 /* the buffer has no parent yet */
3200 return (SET_ERROR(ENOENT));
3201 } else if (level < nlevels-1) {
3202 /* this block is referenced from an indirect block */
3203 int err;
3204
3205 err = dbuf_hold_impl(dn, level + 1,
3206 blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
3207
3208 if (err)
3209 return (err);
3210 err = dbuf_read(*parentp, NULL,
3211 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
3212 if (err) {
3213 dbuf_rele(*parentp, NULL);
3214 *parentp = NULL;
3215 return (err);
3216 }
3217 rw_enter(&(*parentp)->db_rwlock, RW_READER);
3218 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
3219 (blkid & ((1ULL << epbs) - 1));
3220 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
3221 ASSERT(BP_IS_HOLE(*bpp));
3222 rw_exit(&(*parentp)->db_rwlock);
3223 return (0);
3224 } else {
3225 /* the block is referenced from the dnode */
3226 ASSERT3U(level, ==, nlevels-1);
3227 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
3228 blkid < dn->dn_phys->dn_nblkptr);
3229 if (dn->dn_dbuf) {
3230 dbuf_add_ref(dn->dn_dbuf, NULL);
3231 *parentp = dn->dn_dbuf;
3232 }
3233 *bpp = &dn->dn_phys->dn_blkptr[blkid];
3234 return (0);
3235 }
3236 }
3237
3238 static dmu_buf_impl_t *
3239 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
3240 dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash)
3241 {
3242 objset_t *os = dn->dn_objset;
3243 dmu_buf_impl_t *db, *odb;
3244
3245 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3246 ASSERT(dn->dn_type != DMU_OT_NONE);
3247
3248 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
3249
3250 list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
3251 offsetof(dbuf_dirty_record_t, dr_dbuf_node));
3252
3253 db->db_objset = os;
3254 db->db.db_object = dn->dn_object;
3255 db->db_level = level;
3256 db->db_blkid = blkid;
3257 db->db_dirtycnt = 0;
3258 db->db_dnode_handle = dn->dn_handle;
3259 db->db_parent = parent;
3260 db->db_blkptr = blkptr;
3261 db->db_hash = hash;
3262
3263 db->db_user = NULL;
3264 db->db_user_immediate_evict = FALSE;
3265 db->db_freed_in_flight = FALSE;
3266 db->db_pending_evict = FALSE;
3267
3268 if (blkid == DMU_BONUS_BLKID) {
3269 ASSERT3P(parent, ==, dn->dn_dbuf);
3270 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
3271 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
3272 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
3273 db->db.db_offset = DMU_BONUS_BLKID;
3274 db->db_state = DB_UNCACHED;
3275 DTRACE_SET_STATE(db, "bonus buffer created");
3276 db->db_caching_status = DB_NO_CACHE;
3277 /* the bonus dbuf is not placed in the hash table */
3278 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3279 return (db);
3280 } else if (blkid == DMU_SPILL_BLKID) {
3281 db->db.db_size = (blkptr != NULL) ?
3282 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
3283 db->db.db_offset = 0;
3284 } else {
3285 int blocksize =
3286 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
3287 db->db.db_size = blocksize;
3288 db->db.db_offset = db->db_blkid * blocksize;
3289 }
3290
3291 /*
3292 * Hold the dn_dbufs_mtx while we get the new dbuf
3293 * in the hash table *and* added to the dbufs list.
3294 * This prevents a possible deadlock with someone
3295 * trying to look up this dbuf before it's added to the
3296 * dn_dbufs list.
3297 */
3298 mutex_enter(&dn->dn_dbufs_mtx);
3299 db->db_state = DB_EVICTING; /* not worth logging this state change */
3300 if ((odb = dbuf_hash_insert(db)) != NULL) {
3301 /* someone else inserted it first */
3302 mutex_exit(&dn->dn_dbufs_mtx);
3303 kmem_cache_free(dbuf_kmem_cache, db);
3304 DBUF_STAT_BUMP(hash_insert_race);
3305 return (odb);
3306 }
3307 avl_add(&dn->dn_dbufs, db);
3308
3309 db->db_state = DB_UNCACHED;
3310 DTRACE_SET_STATE(db, "regular buffer created");
3311 db->db_caching_status = DB_NO_CACHE;
3312 mutex_exit(&dn->dn_dbufs_mtx);
3313 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3314
3315 if (parent && parent != dn->dn_dbuf)
3316 dbuf_add_ref(parent, db);
3317
3318 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3319 zfs_refcount_count(&dn->dn_holds) > 0);
3320 (void) zfs_refcount_add(&dn->dn_holds, db);
3321
3322 dprintf_dbuf(db, "db=%p\n", db);
3323
3324 return (db);
3325 }
3326
3327 /*
3328 * This function returns a block pointer and information about the object,
3329 * given a dnode and a block. This is a publicly accessible version of
3330 * dbuf_findbp that only returns some information, rather than the
3331 * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock
3332 * should be locked as (at least) a reader.
3333 */
3334 int
3335 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3336 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3337 {
3338 dmu_buf_impl_t *dbp = NULL;
3339 blkptr_t *bp2;
3340 int err = 0;
3341 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3342
3343 err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3344 if (err == 0) {
3345 ASSERT3P(bp2, !=, NULL);
3346 *bp = *bp2;
3347 if (dbp != NULL)
3348 dbuf_rele(dbp, NULL);
3349 if (datablkszsec != NULL)
3350 *datablkszsec = dn->dn_phys->dn_datablkszsec;
3351 if (indblkshift != NULL)
3352 *indblkshift = dn->dn_phys->dn_indblkshift;
3353 }
3354
3355 return (err);
3356 }
3357
3358 typedef struct dbuf_prefetch_arg {
3359 spa_t *dpa_spa; /* The spa to issue the prefetch in. */
3360 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3361 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3362 int dpa_curlevel; /* The current level that we're reading */
3363 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3364 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3365 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3366 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3367 dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3368 void *dpa_arg; /* prefetch completion arg */
3369 } dbuf_prefetch_arg_t;
3370
3371 static void
3372 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3373 {
3374 if (dpa->dpa_cb != NULL) {
3375 dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
3376 dpa->dpa_zb.zb_blkid, io_done);
3377 }
3378 kmem_free(dpa, sizeof (*dpa));
3379 }
3380
3381 static void
3382 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3383 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3384 {
3385 (void) zio, (void) zb, (void) iobp;
3386 dbuf_prefetch_arg_t *dpa = private;
3387
3388 if (abuf != NULL)
3389 arc_buf_destroy(abuf, private);
3390
3391 dbuf_prefetch_fini(dpa, B_TRUE);
3392 }
3393
3394 /*
3395 * Actually issue the prefetch read for the block given.
3396 */
3397 static void
3398 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3399 {
3400 ASSERT(!BP_IS_REDACTED(bp) ||
3401 dsl_dataset_feature_is_active(
3402 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3403 SPA_FEATURE_REDACTED_DATASETS));
3404
3405 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3406 return (dbuf_prefetch_fini(dpa, B_FALSE));
3407
3408 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3409 arc_flags_t aflags =
3410 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3411 ARC_FLAG_NO_BUF;
3412
3413 /* dnodes are always read as raw and then converted later */
3414 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3415 dpa->dpa_curlevel == 0)
3416 zio_flags |= ZIO_FLAG_RAW;
3417
3418 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3419 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3420 ASSERT(dpa->dpa_zio != NULL);
3421 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3422 dbuf_issue_final_prefetch_done, dpa,
3423 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3424 }
3425
3426 /*
3427 * Called when an indirect block above our prefetch target is read in. This
3428 * will either read in the next indirect block down the tree or issue the actual
3429 * prefetch if the next block down is our target.
3430 */
3431 static void
3432 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3433 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3434 {
3435 (void) zb, (void) iobp;
3436 dbuf_prefetch_arg_t *dpa = private;
3437
3438 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3439 ASSERT3S(dpa->dpa_curlevel, >, 0);
3440
3441 if (abuf == NULL) {
3442 ASSERT(zio == NULL || zio->io_error != 0);
3443 dbuf_prefetch_fini(dpa, B_TRUE);
3444 return;
3445 }
3446 ASSERT(zio == NULL || zio->io_error == 0);
3447
3448 /*
3449 * The dpa_dnode is only valid if we are called with a NULL
3450 * zio. This indicates that the arc_read() returned without
3451 * first calling zio_read() to issue a physical read. Once
3452 * a physical read is made the dpa_dnode must be invalidated
3453 * as the locks guarding it may have been dropped. If the
3454 * dpa_dnode is still valid, then we want to add it to the dbuf
3455 * cache. To do so, we must hold the dbuf associated with the block
3456 * we just prefetched, read its contents so that we associate it
3457 * with an arc_buf_t, and then release it.
3458 */
3459 if (zio != NULL) {
3460 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3461 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3462 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3463 } else {
3464 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3465 }
3466 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3467
3468 dpa->dpa_dnode = NULL;
3469 } else if (dpa->dpa_dnode != NULL) {
3470 uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3471 (dpa->dpa_epbs * (dpa->dpa_curlevel -
3472 dpa->dpa_zb.zb_level));
3473 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3474 dpa->dpa_curlevel, curblkid, FTAG);
3475 if (db == NULL) {
3476 arc_buf_destroy(abuf, private);
3477 dbuf_prefetch_fini(dpa, B_TRUE);
3478 return;
3479 }
3480 (void) dbuf_read(db, NULL,
3481 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3482 dbuf_rele(db, FTAG);
3483 }
3484
3485 dpa->dpa_curlevel--;
3486 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3487 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3488 blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3489 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3490
3491 ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode &&
3492 dsl_dataset_feature_is_active(
3493 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3494 SPA_FEATURE_REDACTED_DATASETS)));
3495 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3496 arc_buf_destroy(abuf, private);
3497 dbuf_prefetch_fini(dpa, B_TRUE);
3498 return;
3499 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3500 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3501 dbuf_issue_final_prefetch(dpa, bp);
3502 } else {
3503 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3504 zbookmark_phys_t zb;
3505
3506 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3507 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3508 iter_aflags |= ARC_FLAG_L2CACHE;
3509
3510 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3511
3512 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3513 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3514
3515 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3516 bp, dbuf_prefetch_indirect_done, dpa,
3517 ZIO_PRIORITY_SYNC_READ,
3518 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3519 &iter_aflags, &zb);
3520 }
3521
3522 arc_buf_destroy(abuf, private);
3523 }
3524
3525 /*
3526 * Issue prefetch reads for the given block on the given level. If the indirect
3527 * blocks above that block are not in memory, we will read them in
3528 * asynchronously. As a result, this call never blocks waiting for a read to
3529 * complete. Note that the prefetch might fail if the dataset is encrypted and
3530 * the encryption key is unmapped before the IO completes.
3531 */
3532 int
3533 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3534 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3535 void *arg)
3536 {
3537 blkptr_t bp;
3538 int epbs, nlevels, curlevel;
3539 uint64_t curblkid;
3540
3541 ASSERT(blkid != DMU_BONUS_BLKID);
3542 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3543
3544 if (blkid > dn->dn_maxblkid)
3545 goto no_issue;
3546
3547 if (level == 0 && dnode_block_freed(dn, blkid))
3548 goto no_issue;
3549
3550 /*
3551 * This dnode hasn't been written to disk yet, so there's nothing to
3552 * prefetch.
3553 */
3554 nlevels = dn->dn_phys->dn_nlevels;
3555 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3556 goto no_issue;
3557
3558 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3559 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3560 goto no_issue;
3561
3562 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3563 level, blkid, NULL);
3564 if (db != NULL) {
3565 mutex_exit(&db->db_mtx);
3566 /*
3567 * This dbuf already exists. It is either CACHED, or
3568 * (we assume) about to be read or filled.
3569 */
3570 goto no_issue;
3571 }
3572
3573 /*
3574 * Find the closest ancestor (indirect block) of the target block
3575 * that is present in the cache. In this indirect block, we will
3576 * find the bp that is at curlevel, curblkid.
3577 */
3578 curlevel = level;
3579 curblkid = blkid;
3580 while (curlevel < nlevels - 1) {
3581 int parent_level = curlevel + 1;
3582 uint64_t parent_blkid = curblkid >> epbs;
3583 dmu_buf_impl_t *db;
3584
3585 if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3586 FALSE, TRUE, FTAG, &db) == 0) {
3587 blkptr_t *bpp = db->db_buf->b_data;
3588 bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3589 dbuf_rele(db, FTAG);
3590 break;
3591 }
3592
3593 curlevel = parent_level;
3594 curblkid = parent_blkid;
3595 }
3596
3597 if (curlevel == nlevels - 1) {
3598 /* No cached indirect blocks found. */
3599 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3600 bp = dn->dn_phys->dn_blkptr[curblkid];
3601 }
3602 ASSERT(!BP_IS_REDACTED(&bp) ||
3603 dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3604 SPA_FEATURE_REDACTED_DATASETS));
3605 if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3606 goto no_issue;
3607
3608 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3609
3610 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3611 ZIO_FLAG_CANFAIL);
3612
3613 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3614 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3615 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3616 dn->dn_object, level, blkid);
3617 dpa->dpa_curlevel = curlevel;
3618 dpa->dpa_prio = prio;
3619 dpa->dpa_aflags = aflags;
3620 dpa->dpa_spa = dn->dn_objset->os_spa;
3621 dpa->dpa_dnode = dn;
3622 dpa->dpa_epbs = epbs;
3623 dpa->dpa_zio = pio;
3624 dpa->dpa_cb = cb;
3625 dpa->dpa_arg = arg;
3626
3627 if (!DNODE_LEVEL_IS_CACHEABLE(dn, level))
3628 dpa->dpa_aflags |= ARC_FLAG_UNCACHED;
3629 else if (dnode_level_is_l2cacheable(&bp, dn, level))
3630 dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3631
3632 /*
3633 * If we have the indirect just above us, no need to do the asynchronous
3634 * prefetch chain; we'll just run the last step ourselves. If we're at
3635 * a higher level, though, we want to issue the prefetches for all the
3636 * indirect blocks asynchronously, so we can go on with whatever we were
3637 * doing.
3638 */
3639 if (curlevel == level) {
3640 ASSERT3U(curblkid, ==, blkid);
3641 dbuf_issue_final_prefetch(dpa, &bp);
3642 } else {
3643 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3644 zbookmark_phys_t zb;
3645
3646 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3647 if (dnode_level_is_l2cacheable(&bp, dn, level))
3648 iter_aflags |= ARC_FLAG_L2CACHE;
3649
3650 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3651 dn->dn_object, curlevel, curblkid);
3652 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3653 &bp, dbuf_prefetch_indirect_done, dpa,
3654 ZIO_PRIORITY_SYNC_READ,
3655 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3656 &iter_aflags, &zb);
3657 }
3658 /*
3659 * We use pio here instead of dpa_zio since it's possible that
3660 * dpa may have already been freed.
3661 */
3662 zio_nowait(pio);
3663 return (1);
3664 no_issue:
3665 if (cb != NULL)
3666 cb(arg, level, blkid, B_FALSE);
3667 return (0);
3668 }
3669
3670 int
3671 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3672 arc_flags_t aflags)
3673 {
3674
3675 return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3676 }
3677
3678 /*
3679 * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3680 * the case of encrypted, compressed and uncompressed buffers by
3681 * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3682 * arc_alloc_compressed_buf() or arc_alloc_buf().*
3683 *
3684 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3685 */
3686 noinline static void
3687 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3688 {
3689 dbuf_dirty_record_t *dr = db->db_data_pending;
3690 arc_buf_t *data = dr->dt.dl.dr_data;
3691 enum zio_compress compress_type = arc_get_compression(data);
3692 uint8_t complevel = arc_get_complevel(data);
3693
3694 if (arc_is_encrypted(data)) {
3695 boolean_t byteorder;
3696 uint8_t salt[ZIO_DATA_SALT_LEN];
3697 uint8_t iv[ZIO_DATA_IV_LEN];
3698 uint8_t mac[ZIO_DATA_MAC_LEN];
3699
3700 arc_get_raw_params(data, &byteorder, salt, iv, mac);
3701 dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3702 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3703 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3704 compress_type, complevel));
3705 } else if (compress_type != ZIO_COMPRESS_OFF) {
3706 dbuf_set_data(db, arc_alloc_compressed_buf(
3707 dn->dn_objset->os_spa, db, arc_buf_size(data),
3708 arc_buf_lsize(data), compress_type, complevel));
3709 } else {
3710 dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3711 DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3712 }
3713
3714 rw_enter(&db->db_rwlock, RW_WRITER);
3715 memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
3716 rw_exit(&db->db_rwlock);
3717 }
3718
3719 /*
3720 * Returns with db_holds incremented, and db_mtx not held.
3721 * Note: dn_struct_rwlock must be held.
3722 */
3723 int
3724 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3725 boolean_t fail_sparse, boolean_t fail_uncached,
3726 const void *tag, dmu_buf_impl_t **dbp)
3727 {
3728 dmu_buf_impl_t *db, *parent = NULL;
3729 uint64_t hv;
3730
3731 /* If the pool has been created, verify the tx_sync_lock is not held */
3732 spa_t *spa = dn->dn_objset->os_spa;
3733 dsl_pool_t *dp = spa->spa_dsl_pool;
3734 if (dp != NULL) {
3735 ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3736 }
3737
3738 ASSERT(blkid != DMU_BONUS_BLKID);
3739 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3740 ASSERT3U(dn->dn_nlevels, >, level);
3741
3742 *dbp = NULL;
3743
3744 /* dbuf_find() returns with db_mtx held */
3745 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv);
3746
3747 if (db == NULL) {
3748 blkptr_t *bp = NULL;
3749 int err;
3750
3751 if (fail_uncached)
3752 return (SET_ERROR(ENOENT));
3753
3754 ASSERT3P(parent, ==, NULL);
3755 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3756 if (fail_sparse) {
3757 if (err == 0 && bp && BP_IS_HOLE(bp))
3758 err = SET_ERROR(ENOENT);
3759 if (err) {
3760 if (parent)
3761 dbuf_rele(parent, NULL);
3762 return (err);
3763 }
3764 }
3765 if (err && err != ENOENT)
3766 return (err);
3767 db = dbuf_create(dn, level, blkid, parent, bp, hv);
3768 }
3769
3770 if (fail_uncached && db->db_state != DB_CACHED) {
3771 mutex_exit(&db->db_mtx);
3772 return (SET_ERROR(ENOENT));
3773 }
3774
3775 if (db->db_buf != NULL) {
3776 arc_buf_access(db->db_buf);
3777 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3778 }
3779
3780 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3781
3782 /*
3783 * If this buffer is currently syncing out, and we are
3784 * still referencing it from db_data, we need to make a copy
3785 * of it in case we decide we want to dirty it again in this txg.
3786 */
3787 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3788 dn->dn_object != DMU_META_DNODE_OBJECT &&
3789 db->db_state == DB_CACHED && db->db_data_pending) {
3790 dbuf_dirty_record_t *dr = db->db_data_pending;
3791 if (dr->dt.dl.dr_data == db->db_buf) {
3792 ASSERT3P(db->db_buf, !=, NULL);
3793 dbuf_hold_copy(dn, db);
3794 }
3795 }
3796
3797 if (multilist_link_active(&db->db_cache_link)) {
3798 ASSERT(zfs_refcount_is_zero(&db->db_holds));
3799 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3800 db->db_caching_status == DB_DBUF_METADATA_CACHE);
3801
3802 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3803
3804 uint64_t size = db->db.db_size + dmu_buf_user_size(&db->db);
3805 (void) zfs_refcount_remove_many(
3806 &dbuf_caches[db->db_caching_status].size, size, db);
3807
3808 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3809 DBUF_STAT_BUMPDOWN(metadata_cache_count);
3810 } else {
3811 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3812 DBUF_STAT_BUMPDOWN(cache_count);
3813 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size);
3814 }
3815 db->db_caching_status = DB_NO_CACHE;
3816 }
3817 (void) zfs_refcount_add(&db->db_holds, tag);
3818 DBUF_VERIFY(db);
3819 mutex_exit(&db->db_mtx);
3820
3821 /* NOTE: we can't rele the parent until after we drop the db_mtx */
3822 if (parent)
3823 dbuf_rele(parent, NULL);
3824
3825 ASSERT3P(DB_DNODE(db), ==, dn);
3826 ASSERT3U(db->db_blkid, ==, blkid);
3827 ASSERT3U(db->db_level, ==, level);
3828 *dbp = db;
3829
3830 return (0);
3831 }
3832
3833 dmu_buf_impl_t *
3834 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
3835 {
3836 return (dbuf_hold_level(dn, 0, blkid, tag));
3837 }
3838
3839 dmu_buf_impl_t *
3840 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
3841 {
3842 dmu_buf_impl_t *db;
3843 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3844 return (err ? NULL : db);
3845 }
3846
3847 void
3848 dbuf_create_bonus(dnode_t *dn)
3849 {
3850 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3851
3852 ASSERT(dn->dn_bonus == NULL);
3853 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
3854 dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
3855 }
3856
3857 int
3858 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3859 {
3860 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3861
3862 if (db->db_blkid != DMU_SPILL_BLKID)
3863 return (SET_ERROR(ENOTSUP));
3864 if (blksz == 0)
3865 blksz = SPA_MINBLOCKSIZE;
3866 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3867 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3868
3869 dbuf_new_size(db, blksz, tx);
3870
3871 return (0);
3872 }
3873
3874 void
3875 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3876 {
3877 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3878 }
3879
3880 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3881 void
3882 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
3883 {
3884 int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3885 VERIFY3S(holds, >, 1);
3886 }
3887
3888 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3889 boolean_t
3890 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3891 const void *tag)
3892 {
3893 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3894 dmu_buf_impl_t *found_db;
3895 boolean_t result = B_FALSE;
3896
3897 if (blkid == DMU_BONUS_BLKID)
3898 found_db = dbuf_find_bonus(os, obj);
3899 else
3900 found_db = dbuf_find(os, obj, 0, blkid, NULL);
3901
3902 if (found_db != NULL) {
3903 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3904 (void) zfs_refcount_add(&db->db_holds, tag);
3905 result = B_TRUE;
3906 }
3907 mutex_exit(&found_db->db_mtx);
3908 }
3909 return (result);
3910 }
3911
3912 /*
3913 * If you call dbuf_rele() you had better not be referencing the dnode handle
3914 * unless you have some other direct or indirect hold on the dnode. (An indirect
3915 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3916 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3917 * dnode's parent dbuf evicting its dnode handles.
3918 */
3919 void
3920 dbuf_rele(dmu_buf_impl_t *db, const void *tag)
3921 {
3922 mutex_enter(&db->db_mtx);
3923 dbuf_rele_and_unlock(db, tag, B_FALSE);
3924 }
3925
3926 void
3927 dmu_buf_rele(dmu_buf_t *db, const void *tag)
3928 {
3929 dbuf_rele((dmu_buf_impl_t *)db, tag);
3930 }
3931
3932 /*
3933 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
3934 * db_dirtycnt and db_holds to be updated atomically. The 'evicting'
3935 * argument should be set if we are already in the dbuf-evicting code
3936 * path, in which case we don't want to recursively evict. This allows us to
3937 * avoid deeply nested stacks that would have a call flow similar to this:
3938 *
3939 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3940 * ^ |
3941 * | |
3942 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+
3943 *
3944 */
3945 void
3946 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
3947 {
3948 int64_t holds;
3949 uint64_t size;
3950
3951 ASSERT(MUTEX_HELD(&db->db_mtx));
3952 DBUF_VERIFY(db);
3953
3954 /*
3955 * Remove the reference to the dbuf before removing its hold on the
3956 * dnode so we can guarantee in dnode_move() that a referenced bonus
3957 * buffer has a corresponding dnode hold.
3958 */
3959 holds = zfs_refcount_remove(&db->db_holds, tag);
3960 ASSERT(holds >= 0);
3961
3962 /*
3963 * We can't freeze indirects if there is a possibility that they
3964 * may be modified in the current syncing context.
3965 */
3966 if (db->db_buf != NULL &&
3967 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3968 arc_buf_freeze(db->db_buf);
3969 }
3970
3971 if (holds == db->db_dirtycnt &&
3972 db->db_level == 0 && db->db_user_immediate_evict)
3973 dbuf_evict_user(db);
3974
3975 if (holds == 0) {
3976 if (db->db_blkid == DMU_BONUS_BLKID) {
3977 dnode_t *dn;
3978 boolean_t evict_dbuf = db->db_pending_evict;
3979
3980 /*
3981 * If the dnode moves here, we cannot cross this
3982 * barrier until the move completes.
3983 */
3984 DB_DNODE_ENTER(db);
3985
3986 dn = DB_DNODE(db);
3987 atomic_dec_32(&dn->dn_dbufs_count);
3988
3989 /*
3990 * Decrementing the dbuf count means that the bonus
3991 * buffer's dnode hold is no longer discounted in
3992 * dnode_move(). The dnode cannot move until after
3993 * the dnode_rele() below.
3994 */
3995 DB_DNODE_EXIT(db);
3996
3997 /*
3998 * Do not reference db after its lock is dropped.
3999 * Another thread may evict it.
4000 */
4001 mutex_exit(&db->db_mtx);
4002
4003 if (evict_dbuf)
4004 dnode_evict_bonus(dn);
4005
4006 dnode_rele(dn, db);
4007 } else if (db->db_buf == NULL) {
4008 /*
4009 * This is a special case: we never associated this
4010 * dbuf with any data allocated from the ARC.
4011 */
4012 ASSERT(db->db_state == DB_UNCACHED ||
4013 db->db_state == DB_NOFILL);
4014 dbuf_destroy(db);
4015 } else if (arc_released(db->db_buf)) {
4016 /*
4017 * This dbuf has anonymous data associated with it.
4018 */
4019 dbuf_destroy(db);
4020 } else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) ||
4021 db->db_pending_evict) {
4022 dbuf_destroy(db);
4023 } else if (!multilist_link_active(&db->db_cache_link)) {
4024 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4025
4026 dbuf_cached_state_t dcs =
4027 dbuf_include_in_metadata_cache(db) ?
4028 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
4029 db->db_caching_status = dcs;
4030
4031 multilist_insert(&dbuf_caches[dcs].cache, db);
4032 uint64_t db_size = db->db.db_size +
4033 dmu_buf_user_size(&db->db);
4034 size = zfs_refcount_add_many(
4035 &dbuf_caches[dcs].size, db_size, db);
4036 uint8_t db_level = db->db_level;
4037 mutex_exit(&db->db_mtx);
4038
4039 if (dcs == DB_DBUF_METADATA_CACHE) {
4040 DBUF_STAT_BUMP(metadata_cache_count);
4041 DBUF_STAT_MAX(metadata_cache_size_bytes_max,
4042 size);
4043 } else {
4044 DBUF_STAT_BUMP(cache_count);
4045 DBUF_STAT_MAX(cache_size_bytes_max, size);
4046 DBUF_STAT_BUMP(cache_levels[db_level]);
4047 DBUF_STAT_INCR(cache_levels_bytes[db_level],
4048 db_size);
4049 }
4050
4051 if (dcs == DB_DBUF_CACHE && !evicting)
4052 dbuf_evict_notify(size);
4053 }
4054 } else {
4055 mutex_exit(&db->db_mtx);
4056 }
4057
4058 }
4059
4060 #pragma weak dmu_buf_refcount = dbuf_refcount
4061 uint64_t
4062 dbuf_refcount(dmu_buf_impl_t *db)
4063 {
4064 return (zfs_refcount_count(&db->db_holds));
4065 }
4066
4067 uint64_t
4068 dmu_buf_user_refcount(dmu_buf_t *db_fake)
4069 {
4070 uint64_t holds;
4071 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4072
4073 mutex_enter(&db->db_mtx);
4074 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
4075 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
4076 mutex_exit(&db->db_mtx);
4077
4078 return (holds);
4079 }
4080
4081 void *
4082 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
4083 dmu_buf_user_t *new_user)
4084 {
4085 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4086
4087 mutex_enter(&db->db_mtx);
4088 dbuf_verify_user(db, DBVU_NOT_EVICTING);
4089 if (db->db_user == old_user)
4090 db->db_user = new_user;
4091 else
4092 old_user = db->db_user;
4093 dbuf_verify_user(db, DBVU_NOT_EVICTING);
4094 mutex_exit(&db->db_mtx);
4095
4096 return (old_user);
4097 }
4098
4099 void *
4100 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4101 {
4102 return (dmu_buf_replace_user(db_fake, NULL, user));
4103 }
4104
4105 void *
4106 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4107 {
4108 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4109
4110 db->db_user_immediate_evict = TRUE;
4111 return (dmu_buf_set_user(db_fake, user));
4112 }
4113
4114 void *
4115 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4116 {
4117 return (dmu_buf_replace_user(db_fake, user, NULL));
4118 }
4119
4120 void *
4121 dmu_buf_get_user(dmu_buf_t *db_fake)
4122 {
4123 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4124
4125 dbuf_verify_user(db, DBVU_NOT_EVICTING);
4126 return (db->db_user);
4127 }
4128
4129 uint64_t
4130 dmu_buf_user_size(dmu_buf_t *db_fake)
4131 {
4132 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4133 if (db->db_user == NULL)
4134 return (0);
4135 return (atomic_load_64(&db->db_user->dbu_size));
4136 }
4137
4138 void
4139 dmu_buf_add_user_size(dmu_buf_t *db_fake, uint64_t nadd)
4140 {
4141 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4142 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4143 ASSERT3P(db->db_user, !=, NULL);
4144 ASSERT3U(atomic_load_64(&db->db_user->dbu_size), <, UINT64_MAX - nadd);
4145 atomic_add_64(&db->db_user->dbu_size, nadd);
4146 }
4147
4148 void
4149 dmu_buf_sub_user_size(dmu_buf_t *db_fake, uint64_t nsub)
4150 {
4151 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4152 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4153 ASSERT3P(db->db_user, !=, NULL);
4154 ASSERT3U(atomic_load_64(&db->db_user->dbu_size), >=, nsub);
4155 atomic_sub_64(&db->db_user->dbu_size, nsub);
4156 }
4157
4158 void
4159 dmu_buf_user_evict_wait(void)
4160 {
4161 taskq_wait(dbu_evict_taskq);
4162 }
4163
4164 blkptr_t *
4165 dmu_buf_get_blkptr(dmu_buf_t *db)
4166 {
4167 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4168 return (dbi->db_blkptr);
4169 }
4170
4171 objset_t *
4172 dmu_buf_get_objset(dmu_buf_t *db)
4173 {
4174 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4175 return (dbi->db_objset);
4176 }
4177
4178 static void
4179 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
4180 {
4181 /* ASSERT(dmu_tx_is_syncing(tx) */
4182 ASSERT(MUTEX_HELD(&db->db_mtx));
4183
4184 if (db->db_blkptr != NULL)
4185 return;
4186
4187 if (db->db_blkid == DMU_SPILL_BLKID) {
4188 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
4189 BP_ZERO(db->db_blkptr);
4190 return;
4191 }
4192 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
4193 /*
4194 * This buffer was allocated at a time when there was
4195 * no available blkptrs from the dnode, or it was
4196 * inappropriate to hook it in (i.e., nlevels mismatch).
4197 */
4198 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
4199 ASSERT(db->db_parent == NULL);
4200 db->db_parent = dn->dn_dbuf;
4201 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
4202 DBUF_VERIFY(db);
4203 } else {
4204 dmu_buf_impl_t *parent = db->db_parent;
4205 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4206
4207 ASSERT(dn->dn_phys->dn_nlevels > 1);
4208 if (parent == NULL) {
4209 mutex_exit(&db->db_mtx);
4210 rw_enter(&dn->dn_struct_rwlock, RW_READER);
4211 parent = dbuf_hold_level(dn, db->db_level + 1,
4212 db->db_blkid >> epbs, db);
4213 rw_exit(&dn->dn_struct_rwlock);
4214 mutex_enter(&db->db_mtx);
4215 db->db_parent = parent;
4216 }
4217 db->db_blkptr = (blkptr_t *)parent->db.db_data +
4218 (db->db_blkid & ((1ULL << epbs) - 1));
4219 DBUF_VERIFY(db);
4220 }
4221 }
4222
4223 static void
4224 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4225 {
4226 dmu_buf_impl_t *db = dr->dr_dbuf;
4227 void *data = dr->dt.dl.dr_data;
4228
4229 ASSERT0(db->db_level);
4230 ASSERT(MUTEX_HELD(&db->db_mtx));
4231 ASSERT(db->db_blkid == DMU_BONUS_BLKID);
4232 ASSERT(data != NULL);
4233
4234 dnode_t *dn = dr->dr_dnode;
4235 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
4236 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
4237 memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
4238
4239 dbuf_sync_leaf_verify_bonus_dnode(dr);
4240
4241 dbuf_undirty_bonus(dr);
4242 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4243 }
4244
4245 /*
4246 * When syncing out a blocks of dnodes, adjust the block to deal with
4247 * encryption. Normally, we make sure the block is decrypted before writing
4248 * it. If we have crypt params, then we are writing a raw (encrypted) block,
4249 * from a raw receive. In this case, set the ARC buf's crypt params so
4250 * that the BP will be filled with the correct byteorder, salt, iv, and mac.
4251 */
4252 static void
4253 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
4254 {
4255 int err;
4256 dmu_buf_impl_t *db = dr->dr_dbuf;
4257
4258 ASSERT(MUTEX_HELD(&db->db_mtx));
4259 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
4260 ASSERT3U(db->db_level, ==, 0);
4261
4262 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
4263 zbookmark_phys_t zb;
4264
4265 /*
4266 * Unfortunately, there is currently no mechanism for
4267 * syncing context to handle decryption errors. An error
4268 * here is only possible if an attacker maliciously
4269 * changed a dnode block and updated the associated
4270 * checksums going up the block tree.
4271 */
4272 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
4273 db->db.db_object, db->db_level, db->db_blkid);
4274 err = arc_untransform(db->db_buf, db->db_objset->os_spa,
4275 &zb, B_TRUE);
4276 if (err)
4277 panic("Invalid dnode block MAC");
4278 } else if (dr->dt.dl.dr_has_raw_params) {
4279 (void) arc_release(dr->dt.dl.dr_data, db);
4280 arc_convert_to_raw(dr->dt.dl.dr_data,
4281 dmu_objset_id(db->db_objset),
4282 dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
4283 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
4284 }
4285 }
4286
4287 /*
4288 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
4289 * is critical the we not allow the compiler to inline this function in to
4290 * dbuf_sync_list() thereby drastically bloating the stack usage.
4291 */
4292 noinline static void
4293 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4294 {
4295 dmu_buf_impl_t *db = dr->dr_dbuf;
4296 dnode_t *dn = dr->dr_dnode;
4297
4298 ASSERT(dmu_tx_is_syncing(tx));
4299
4300 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4301
4302 mutex_enter(&db->db_mtx);
4303
4304 ASSERT(db->db_level > 0);
4305 DBUF_VERIFY(db);
4306
4307 /* Read the block if it hasn't been read yet. */
4308 if (db->db_buf == NULL) {
4309 mutex_exit(&db->db_mtx);
4310 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
4311 mutex_enter(&db->db_mtx);
4312 }
4313 ASSERT3U(db->db_state, ==, DB_CACHED);
4314 ASSERT(db->db_buf != NULL);
4315
4316 /* Indirect block size must match what the dnode thinks it is. */
4317 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4318 dbuf_check_blkptr(dn, db);
4319
4320 /* Provide the pending dirty record to child dbufs */
4321 db->db_data_pending = dr;
4322
4323 mutex_exit(&db->db_mtx);
4324
4325 dbuf_write(dr, db->db_buf, tx);
4326
4327 zio_t *zio = dr->dr_zio;
4328 mutex_enter(&dr->dt.di.dr_mtx);
4329 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4330 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4331 mutex_exit(&dr->dt.di.dr_mtx);
4332 zio_nowait(zio);
4333 }
4334
4335 /*
4336 * Verify that the size of the data in our bonus buffer does not exceed
4337 * its recorded size.
4338 *
4339 * The purpose of this verification is to catch any cases in development
4340 * where the size of a phys structure (i.e space_map_phys_t) grows and,
4341 * due to incorrect feature management, older pools expect to read more
4342 * data even though they didn't actually write it to begin with.
4343 *
4344 * For a example, this would catch an error in the feature logic where we
4345 * open an older pool and we expect to write the space map histogram of
4346 * a space map with size SPACE_MAP_SIZE_V0.
4347 */
4348 static void
4349 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4350 {
4351 #ifdef ZFS_DEBUG
4352 dnode_t *dn = dr->dr_dnode;
4353
4354 /*
4355 * Encrypted bonus buffers can have data past their bonuslen.
4356 * Skip the verification of these blocks.
4357 */
4358 if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4359 return;
4360
4361 uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4362 uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4363 ASSERT3U(bonuslen, <=, maxbonuslen);
4364
4365 arc_buf_t *datap = dr->dt.dl.dr_data;
4366 char *datap_end = ((char *)datap) + bonuslen;
4367 char *datap_max = ((char *)datap) + maxbonuslen;
4368
4369 /* ensure that everything is zero after our data */
4370 for (; datap_end < datap_max; datap_end++)
4371 ASSERT(*datap_end == 0);
4372 #endif
4373 }
4374
4375 static blkptr_t *
4376 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4377 {
4378 /* This must be a lightweight dirty record. */
4379 ASSERT3P(dr->dr_dbuf, ==, NULL);
4380 dnode_t *dn = dr->dr_dnode;
4381
4382 if (dn->dn_phys->dn_nlevels == 1) {
4383 VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4384 return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4385 } else {
4386 dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4387 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4388 VERIFY3U(parent_db->db_level, ==, 1);
4389 VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
4390 VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4391 blkptr_t *bp = parent_db->db.db_data;
4392 return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4393 }
4394 }
4395
4396 static void
4397 dbuf_lightweight_ready(zio_t *zio)
4398 {
4399 dbuf_dirty_record_t *dr = zio->io_private;
4400 blkptr_t *bp = zio->io_bp;
4401
4402 if (zio->io_error != 0)
4403 return;
4404
4405 dnode_t *dn = dr->dr_dnode;
4406
4407 blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4408 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4409 int64_t delta = bp_get_dsize_sync(spa, bp) -
4410 bp_get_dsize_sync(spa, bp_orig);
4411 dnode_diduse_space(dn, delta);
4412
4413 uint64_t blkid = dr->dt.dll.dr_blkid;
4414 mutex_enter(&dn->dn_mtx);
4415 if (blkid > dn->dn_phys->dn_maxblkid) {
4416 ASSERT0(dn->dn_objset->os_raw_receive);
4417 dn->dn_phys->dn_maxblkid = blkid;
4418 }
4419 mutex_exit(&dn->dn_mtx);
4420
4421 if (!BP_IS_EMBEDDED(bp)) {
4422 uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4423 BP_SET_FILL(bp, fill);
4424 }
4425
4426 dmu_buf_impl_t *parent_db;
4427 EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4428 if (dr->dr_parent == NULL) {
4429 parent_db = dn->dn_dbuf;
4430 } else {
4431 parent_db = dr->dr_parent->dr_dbuf;
4432 }
4433 rw_enter(&parent_db->db_rwlock, RW_WRITER);
4434 *bp_orig = *bp;
4435 rw_exit(&parent_db->db_rwlock);
4436 }
4437
4438 static void
4439 dbuf_lightweight_done(zio_t *zio)
4440 {
4441 dbuf_dirty_record_t *dr = zio->io_private;
4442
4443 VERIFY0(zio->io_error);
4444
4445 objset_t *os = dr->dr_dnode->dn_objset;
4446 dmu_tx_t *tx = os->os_synctx;
4447
4448 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4449 ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4450 } else {
4451 dsl_dataset_t *ds = os->os_dsl_dataset;
4452 (void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4453 dsl_dataset_block_born(ds, zio->io_bp, tx);
4454 }
4455
4456 dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
4457 zio->io_txg);
4458
4459 abd_free(dr->dt.dll.dr_abd);
4460 kmem_free(dr, sizeof (*dr));
4461 }
4462
4463 noinline static void
4464 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4465 {
4466 dnode_t *dn = dr->dr_dnode;
4467 zio_t *pio;
4468 if (dn->dn_phys->dn_nlevels == 1) {
4469 pio = dn->dn_zio;
4470 } else {
4471 pio = dr->dr_parent->dr_zio;
4472 }
4473
4474 zbookmark_phys_t zb = {
4475 .zb_objset = dmu_objset_id(dn->dn_objset),
4476 .zb_object = dn->dn_object,
4477 .zb_level = 0,
4478 .zb_blkid = dr->dt.dll.dr_blkid,
4479 };
4480
4481 /*
4482 * See comment in dbuf_write(). This is so that zio->io_bp_orig
4483 * will have the old BP in dbuf_lightweight_done().
4484 */
4485 dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4486
4487 dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4488 dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4489 dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4490 &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4491 dbuf_lightweight_done, dr, ZIO_PRIORITY_ASYNC_WRITE,
4492 ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4493
4494 zio_nowait(dr->dr_zio);
4495 }
4496
4497 /*
4498 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4499 * critical the we not allow the compiler to inline this function in to
4500 * dbuf_sync_list() thereby drastically bloating the stack usage.
4501 */
4502 noinline static void
4503 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4504 {
4505 arc_buf_t **datap = &dr->dt.dl.dr_data;
4506 dmu_buf_impl_t *db = dr->dr_dbuf;
4507 dnode_t *dn = dr->dr_dnode;
4508 objset_t *os;
4509 uint64_t txg = tx->tx_txg;
4510
4511 ASSERT(dmu_tx_is_syncing(tx));
4512
4513 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4514
4515 mutex_enter(&db->db_mtx);
4516 /*
4517 * To be synced, we must be dirtied. But we
4518 * might have been freed after the dirty.
4519 */
4520 if (db->db_state == DB_UNCACHED) {
4521 /* This buffer has been freed since it was dirtied */
4522 ASSERT(db->db.db_data == NULL);
4523 } else if (db->db_state == DB_FILL) {
4524 /* This buffer was freed and is now being re-filled */
4525 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4526 } else if (db->db_state == DB_READ) {
4527 /*
4528 * This buffer has a clone we need to write, and an in-flight
4529 * read on the BP we're about to clone. Its safe to issue the
4530 * write here because the read has already been issued and the
4531 * contents won't change.
4532 */
4533 ASSERT(dr->dt.dl.dr_brtwrite &&
4534 dr->dt.dl.dr_override_state == DR_OVERRIDDEN);
4535 } else {
4536 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4537 }
4538 DBUF_VERIFY(db);
4539
4540 if (db->db_blkid == DMU_SPILL_BLKID) {
4541 mutex_enter(&dn->dn_mtx);
4542 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4543 /*
4544 * In the previous transaction group, the bonus buffer
4545 * was entirely used to store the attributes for the
4546 * dnode which overrode the dn_spill field. However,
4547 * when adding more attributes to the file a spill
4548 * block was required to hold the extra attributes.
4549 *
4550 * Make sure to clear the garbage left in the dn_spill
4551 * field from the previous attributes in the bonus
4552 * buffer. Otherwise, after writing out the spill
4553 * block to the new allocated dva, it will free
4554 * the old block pointed to by the invalid dn_spill.
4555 */
4556 db->db_blkptr = NULL;
4557 }
4558 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4559 mutex_exit(&dn->dn_mtx);
4560 }
4561
4562 /*
4563 * If this is a bonus buffer, simply copy the bonus data into the
4564 * dnode. It will be written out when the dnode is synced (and it
4565 * will be synced, since it must have been dirty for dbuf_sync to
4566 * be called).
4567 */
4568 if (db->db_blkid == DMU_BONUS_BLKID) {
4569 ASSERT(dr->dr_dbuf == db);
4570 dbuf_sync_bonus(dr, tx);
4571 return;
4572 }
4573
4574 os = dn->dn_objset;
4575
4576 /*
4577 * This function may have dropped the db_mtx lock allowing a dmu_sync
4578 * operation to sneak in. As a result, we need to ensure that we
4579 * don't check the dr_override_state until we have returned from
4580 * dbuf_check_blkptr.
4581 */
4582 dbuf_check_blkptr(dn, db);
4583
4584 /*
4585 * If this buffer is in the middle of an immediate write,
4586 * wait for the synchronous IO to complete.
4587 */
4588 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4589 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4590 cv_wait(&db->db_changed, &db->db_mtx);
4591 }
4592
4593 /*
4594 * If this is a dnode block, ensure it is appropriately encrypted
4595 * or decrypted, depending on what we are writing to it this txg.
4596 */
4597 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4598 dbuf_prepare_encrypted_dnode_leaf(dr);
4599
4600 if (db->db_state != DB_NOFILL &&
4601 dn->dn_object != DMU_META_DNODE_OBJECT &&
4602 zfs_refcount_count(&db->db_holds) > 1 &&
4603 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
4604 *datap == db->db_buf) {
4605 /*
4606 * If this buffer is currently "in use" (i.e., there
4607 * are active holds and db_data still references it),
4608 * then make a copy before we start the write so that
4609 * any modifications from the open txg will not leak
4610 * into this write.
4611 *
4612 * NOTE: this copy does not need to be made for
4613 * objects only modified in the syncing context (e.g.
4614 * DNONE_DNODE blocks).
4615 */
4616 int psize = arc_buf_size(*datap);
4617 int lsize = arc_buf_lsize(*datap);
4618 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4619 enum zio_compress compress_type = arc_get_compression(*datap);
4620 uint8_t complevel = arc_get_complevel(*datap);
4621
4622 if (arc_is_encrypted(*datap)) {
4623 boolean_t byteorder;
4624 uint8_t salt[ZIO_DATA_SALT_LEN];
4625 uint8_t iv[ZIO_DATA_IV_LEN];
4626 uint8_t mac[ZIO_DATA_MAC_LEN];
4627
4628 arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4629 *datap = arc_alloc_raw_buf(os->os_spa, db,
4630 dmu_objset_id(os), byteorder, salt, iv, mac,
4631 dn->dn_type, psize, lsize, compress_type,
4632 complevel);
4633 } else if (compress_type != ZIO_COMPRESS_OFF) {
4634 ASSERT3U(type, ==, ARC_BUFC_DATA);
4635 *datap = arc_alloc_compressed_buf(os->os_spa, db,
4636 psize, lsize, compress_type, complevel);
4637 } else {
4638 *datap = arc_alloc_buf(os->os_spa, db, type, psize);
4639 }
4640 memcpy((*datap)->b_data, db->db.db_data, psize);
4641 }
4642 db->db_data_pending = dr;
4643
4644 mutex_exit(&db->db_mtx);
4645
4646 dbuf_write(dr, *datap, tx);
4647
4648 ASSERT(!list_link_active(&dr->dr_dirty_node));
4649 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4650 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4651 } else {
4652 zio_nowait(dr->dr_zio);
4653 }
4654 }
4655
4656 /*
4657 * Syncs out a range of dirty records for indirect or leaf dbufs. May be
4658 * called recursively from dbuf_sync_indirect().
4659 */
4660 void
4661 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4662 {
4663 dbuf_dirty_record_t *dr;
4664
4665 while ((dr = list_head(list))) {
4666 if (dr->dr_zio != NULL) {
4667 /*
4668 * If we find an already initialized zio then we
4669 * are processing the meta-dnode, and we have finished.
4670 * The dbufs for all dnodes are put back on the list
4671 * during processing, so that we can zio_wait()
4672 * these IOs after initiating all child IOs.
4673 */
4674 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4675 DMU_META_DNODE_OBJECT);
4676 break;
4677 }
4678 list_remove(list, dr);
4679 if (dr->dr_dbuf == NULL) {
4680 dbuf_sync_lightweight(dr, tx);
4681 } else {
4682 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4683 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4684 VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4685 }
4686 if (dr->dr_dbuf->db_level > 0)
4687 dbuf_sync_indirect(dr, tx);
4688 else
4689 dbuf_sync_leaf(dr, tx);
4690 }
4691 }
4692 }
4693
4694 static void
4695 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4696 {
4697 (void) buf;
4698 dmu_buf_impl_t *db = vdb;
4699 dnode_t *dn;
4700 blkptr_t *bp = zio->io_bp;
4701 blkptr_t *bp_orig = &zio->io_bp_orig;
4702 spa_t *spa = zio->io_spa;
4703 int64_t delta;
4704 uint64_t fill = 0;
4705 int i;
4706
4707 ASSERT3P(db->db_blkptr, !=, NULL);
4708 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4709
4710 DB_DNODE_ENTER(db);
4711 dn = DB_DNODE(db);
4712 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4713 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4714 zio->io_prev_space_delta = delta;
4715
4716 if (BP_GET_LOGICAL_BIRTH(bp) != 0) {
4717 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4718 BP_GET_TYPE(bp) == dn->dn_type) ||
4719 (db->db_blkid == DMU_SPILL_BLKID &&
4720 BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4721 BP_IS_EMBEDDED(bp));
4722 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4723 }
4724
4725 mutex_enter(&db->db_mtx);
4726
4727 #ifdef ZFS_DEBUG
4728 if (db->db_blkid == DMU_SPILL_BLKID) {
4729 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4730 ASSERT(!(BP_IS_HOLE(bp)) &&
4731 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4732 }
4733 #endif
4734
4735 if (db->db_level == 0) {
4736 mutex_enter(&dn->dn_mtx);
4737 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4738 db->db_blkid != DMU_SPILL_BLKID) {
4739 ASSERT0(db->db_objset->os_raw_receive);
4740 dn->dn_phys->dn_maxblkid = db->db_blkid;
4741 }
4742 mutex_exit(&dn->dn_mtx);
4743
4744 if (dn->dn_type == DMU_OT_DNODE) {
4745 i = 0;
4746 while (i < db->db.db_size) {
4747 dnode_phys_t *dnp =
4748 (void *)(((char *)db->db.db_data) + i);
4749
4750 i += DNODE_MIN_SIZE;
4751 if (dnp->dn_type != DMU_OT_NONE) {
4752 fill++;
4753 for (int j = 0; j < dnp->dn_nblkptr;
4754 j++) {
4755 (void) zfs_blkptr_verify(spa,
4756 &dnp->dn_blkptr[j],
4757 BLK_CONFIG_SKIP,
4758 BLK_VERIFY_HALT);
4759 }
4760 if (dnp->dn_flags &
4761 DNODE_FLAG_SPILL_BLKPTR) {
4762 (void) zfs_blkptr_verify(spa,
4763 DN_SPILL_BLKPTR(dnp),
4764 BLK_CONFIG_SKIP,
4765 BLK_VERIFY_HALT);
4766 }
4767 i += dnp->dn_extra_slots *
4768 DNODE_MIN_SIZE;
4769 }
4770 }
4771 } else {
4772 if (BP_IS_HOLE(bp)) {
4773 fill = 0;
4774 } else {
4775 fill = 1;
4776 }
4777 }
4778 } else {
4779 blkptr_t *ibp = db->db.db_data;
4780 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4781 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4782 if (BP_IS_HOLE(ibp))
4783 continue;
4784 (void) zfs_blkptr_verify(spa, ibp,
4785 BLK_CONFIG_SKIP, BLK_VERIFY_HALT);
4786 fill += BP_GET_FILL(ibp);
4787 }
4788 }
4789 DB_DNODE_EXIT(db);
4790
4791 if (!BP_IS_EMBEDDED(bp))
4792 BP_SET_FILL(bp, fill);
4793
4794 mutex_exit(&db->db_mtx);
4795
4796 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4797 *db->db_blkptr = *bp;
4798 dmu_buf_unlock_parent(db, dblt, FTAG);
4799 }
4800
4801 /*
4802 * This function gets called just prior to running through the compression
4803 * stage of the zio pipeline. If we're an indirect block comprised of only
4804 * holes, then we want this indirect to be compressed away to a hole. In
4805 * order to do that we must zero out any information about the holes that
4806 * this indirect points to prior to before we try to compress it.
4807 */
4808 static void
4809 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4810 {
4811 (void) zio, (void) buf;
4812 dmu_buf_impl_t *db = vdb;
4813 dnode_t *dn;
4814 blkptr_t *bp;
4815 unsigned int epbs, i;
4816
4817 ASSERT3U(db->db_level, >, 0);
4818 DB_DNODE_ENTER(db);
4819 dn = DB_DNODE(db);
4820 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4821 ASSERT3U(epbs, <, 31);
4822
4823 /* Determine if all our children are holes */
4824 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4825 if (!BP_IS_HOLE(bp))
4826 break;
4827 }
4828
4829 /*
4830 * If all the children are holes, then zero them all out so that
4831 * we may get compressed away.
4832 */
4833 if (i == 1ULL << epbs) {
4834 /*
4835 * We only found holes. Grab the rwlock to prevent
4836 * anybody from reading the blocks we're about to
4837 * zero out.
4838 */
4839 rw_enter(&db->db_rwlock, RW_WRITER);
4840 memset(db->db.db_data, 0, db->db.db_size);
4841 rw_exit(&db->db_rwlock);
4842 }
4843 DB_DNODE_EXIT(db);
4844 }
4845
4846 static void
4847 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4848 {
4849 (void) buf;
4850 dmu_buf_impl_t *db = vdb;
4851 blkptr_t *bp_orig = &zio->io_bp_orig;
4852 blkptr_t *bp = db->db_blkptr;
4853 objset_t *os = db->db_objset;
4854 dmu_tx_t *tx = os->os_synctx;
4855
4856 ASSERT0(zio->io_error);
4857 ASSERT(db->db_blkptr == bp);
4858
4859 /*
4860 * For nopwrites and rewrites we ensure that the bp matches our
4861 * original and bypass all the accounting.
4862 */
4863 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4864 ASSERT(BP_EQUAL(bp, bp_orig));
4865 } else {
4866 dsl_dataset_t *ds = os->os_dsl_dataset;
4867 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4868 dsl_dataset_block_born(ds, bp, tx);
4869 }
4870
4871 mutex_enter(&db->db_mtx);
4872
4873 DBUF_VERIFY(db);
4874
4875 dbuf_dirty_record_t *dr = db->db_data_pending;
4876 dnode_t *dn = dr->dr_dnode;
4877 ASSERT(!list_link_active(&dr->dr_dirty_node));
4878 ASSERT(dr->dr_dbuf == db);
4879 ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
4880 list_remove(&db->db_dirty_records, dr);
4881
4882 #ifdef ZFS_DEBUG
4883 if (db->db_blkid == DMU_SPILL_BLKID) {
4884 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4885 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
4886 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4887 }
4888 #endif
4889
4890 if (db->db_level == 0) {
4891 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
4892 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
4893 if (db->db_state != DB_NOFILL) {
4894 if (dr->dt.dl.dr_data != NULL &&
4895 dr->dt.dl.dr_data != db->db_buf) {
4896 arc_buf_destroy(dr->dt.dl.dr_data, db);
4897 }
4898 }
4899 } else {
4900 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4901 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
4902 if (!BP_IS_HOLE(db->db_blkptr)) {
4903 int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
4904 SPA_BLKPTRSHIFT;
4905 ASSERT3U(db->db_blkid, <=,
4906 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
4907 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4908 db->db.db_size);
4909 }
4910 mutex_destroy(&dr->dt.di.dr_mtx);
4911 list_destroy(&dr->dt.di.dr_children);
4912 }
4913
4914 cv_broadcast(&db->db_changed);
4915 ASSERT(db->db_dirtycnt > 0);
4916 db->db_dirtycnt -= 1;
4917 db->db_data_pending = NULL;
4918 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4919
4920 dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
4921 zio->io_txg);
4922
4923 kmem_free(dr, sizeof (dbuf_dirty_record_t));
4924 }
4925
4926 static void
4927 dbuf_write_nofill_ready(zio_t *zio)
4928 {
4929 dbuf_write_ready(zio, NULL, zio->io_private);
4930 }
4931
4932 static void
4933 dbuf_write_nofill_done(zio_t *zio)
4934 {
4935 dbuf_write_done(zio, NULL, zio->io_private);
4936 }
4937
4938 static void
4939 dbuf_write_override_ready(zio_t *zio)
4940 {
4941 dbuf_dirty_record_t *dr = zio->io_private;
4942 dmu_buf_impl_t *db = dr->dr_dbuf;
4943
4944 dbuf_write_ready(zio, NULL, db);
4945 }
4946
4947 static void
4948 dbuf_write_override_done(zio_t *zio)
4949 {
4950 dbuf_dirty_record_t *dr = zio->io_private;
4951 dmu_buf_impl_t *db = dr->dr_dbuf;
4952 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4953
4954 mutex_enter(&db->db_mtx);
4955 if (!BP_EQUAL(zio->io_bp, obp)) {
4956 if (!BP_IS_HOLE(obp))
4957 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4958 arc_release(dr->dt.dl.dr_data, db);
4959 }
4960 mutex_exit(&db->db_mtx);
4961
4962 dbuf_write_done(zio, NULL, db);
4963
4964 if (zio->io_abd != NULL)
4965 abd_free(zio->io_abd);
4966 }
4967
4968 typedef struct dbuf_remap_impl_callback_arg {
4969 objset_t *drica_os;
4970 uint64_t drica_blk_birth;
4971 dmu_tx_t *drica_tx;
4972 } dbuf_remap_impl_callback_arg_t;
4973
4974 static void
4975 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4976 void *arg)
4977 {
4978 dbuf_remap_impl_callback_arg_t *drica = arg;
4979 objset_t *os = drica->drica_os;
4980 spa_t *spa = dmu_objset_spa(os);
4981 dmu_tx_t *tx = drica->drica_tx;
4982
4983 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4984
4985 if (os == spa_meta_objset(spa)) {
4986 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4987 } else {
4988 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4989 size, drica->drica_blk_birth, tx);
4990 }
4991 }
4992
4993 static void
4994 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4995 {
4996 blkptr_t bp_copy = *bp;
4997 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4998 dbuf_remap_impl_callback_arg_t drica;
4999
5000 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5001
5002 drica.drica_os = dn->dn_objset;
5003 drica.drica_blk_birth = BP_GET_LOGICAL_BIRTH(bp);
5004 drica.drica_tx = tx;
5005 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
5006 &drica)) {
5007 /*
5008 * If the blkptr being remapped is tracked by a livelist,
5009 * then we need to make sure the livelist reflects the update.
5010 * First, cancel out the old blkptr by appending a 'FREE'
5011 * entry. Next, add an 'ALLOC' to track the new version. This
5012 * way we avoid trying to free an inaccurate blkptr at delete.
5013 * Note that embedded blkptrs are not tracked in livelists.
5014 */
5015 if (dn->dn_objset != spa_meta_objset(spa)) {
5016 dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
5017 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
5018 BP_GET_LOGICAL_BIRTH(bp) >
5019 ds->ds_dir->dd_origin_txg) {
5020 ASSERT(!BP_IS_EMBEDDED(bp));
5021 ASSERT(dsl_dir_is_clone(ds->ds_dir));
5022 ASSERT(spa_feature_is_enabled(spa,
5023 SPA_FEATURE_LIVELIST));
5024 bplist_append(&ds->ds_dir->dd_pending_frees,
5025 bp);
5026 bplist_append(&ds->ds_dir->dd_pending_allocs,
5027 &bp_copy);
5028 }
5029 }
5030
5031 /*
5032 * The db_rwlock prevents dbuf_read_impl() from
5033 * dereferencing the BP while we are changing it. To
5034 * avoid lock contention, only grab it when we are actually
5035 * changing the BP.
5036 */
5037 if (rw != NULL)
5038 rw_enter(rw, RW_WRITER);
5039 *bp = bp_copy;
5040 if (rw != NULL)
5041 rw_exit(rw);
5042 }
5043 }
5044
5045 /*
5046 * Remap any existing BP's to concrete vdevs, if possible.
5047 */
5048 static void
5049 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
5050 {
5051 spa_t *spa = dmu_objset_spa(db->db_objset);
5052 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5053
5054 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
5055 return;
5056
5057 if (db->db_level > 0) {
5058 blkptr_t *bp = db->db.db_data;
5059 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
5060 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
5061 }
5062 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
5063 dnode_phys_t *dnp = db->db.db_data;
5064 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
5065 DMU_OT_DNODE);
5066 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
5067 i += dnp[i].dn_extra_slots + 1) {
5068 for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
5069 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
5070 &dn->dn_dbuf->db_rwlock);
5071 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
5072 tx);
5073 }
5074 }
5075 }
5076 }
5077
5078
5079 /*
5080 * Populate dr->dr_zio with a zio to commit a dirty buffer to disk.
5081 * Caller is responsible for issuing the zio_[no]wait(dr->dr_zio).
5082 */
5083 static void
5084 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
5085 {
5086 dmu_buf_impl_t *db = dr->dr_dbuf;
5087 dnode_t *dn = dr->dr_dnode;
5088 objset_t *os;
5089 dmu_buf_impl_t *parent = db->db_parent;
5090 uint64_t txg = tx->tx_txg;
5091 zbookmark_phys_t zb;
5092 zio_prop_t zp;
5093 zio_t *pio; /* parent I/O */
5094 int wp_flag = 0;
5095
5096 ASSERT(dmu_tx_is_syncing(tx));
5097
5098 os = dn->dn_objset;
5099
5100 if (db->db_state != DB_NOFILL) {
5101 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
5102 /*
5103 * Private object buffers are released here rather
5104 * than in dbuf_dirty() since they are only modified
5105 * in the syncing context and we don't want the
5106 * overhead of making multiple copies of the data.
5107 */
5108 if (BP_IS_HOLE(db->db_blkptr)) {
5109 arc_buf_thaw(data);
5110 } else {
5111 dbuf_release_bp(db);
5112 }
5113 dbuf_remap(dn, db, tx);
5114 }
5115 }
5116
5117 if (parent != dn->dn_dbuf) {
5118 /* Our parent is an indirect block. */
5119 /* We have a dirty parent that has been scheduled for write. */
5120 ASSERT(parent && parent->db_data_pending);
5121 /* Our parent's buffer is one level closer to the dnode. */
5122 ASSERT(db->db_level == parent->db_level-1);
5123 /*
5124 * We're about to modify our parent's db_data by modifying
5125 * our block pointer, so the parent must be released.
5126 */
5127 ASSERT(arc_released(parent->db_buf));
5128 pio = parent->db_data_pending->dr_zio;
5129 } else {
5130 /* Our parent is the dnode itself. */
5131 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
5132 db->db_blkid != DMU_SPILL_BLKID) ||
5133 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
5134 if (db->db_blkid != DMU_SPILL_BLKID)
5135 ASSERT3P(db->db_blkptr, ==,
5136 &dn->dn_phys->dn_blkptr[db->db_blkid]);
5137 pio = dn->dn_zio;
5138 }
5139
5140 ASSERT(db->db_level == 0 || data == db->db_buf);
5141 ASSERT3U(BP_GET_LOGICAL_BIRTH(db->db_blkptr), <=, txg);
5142 ASSERT(pio);
5143
5144 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
5145 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
5146 db->db.db_object, db->db_level, db->db_blkid);
5147
5148 if (db->db_blkid == DMU_SPILL_BLKID)
5149 wp_flag = WP_SPILL;
5150 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
5151
5152 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
5153
5154 /*
5155 * We copy the blkptr now (rather than when we instantiate the dirty
5156 * record), because its value can change between open context and
5157 * syncing context. We do not need to hold dn_struct_rwlock to read
5158 * db_blkptr because we are in syncing context.
5159 */
5160 dr->dr_bp_copy = *db->db_blkptr;
5161
5162 if (db->db_level == 0 &&
5163 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
5164 /*
5165 * The BP for this block has been provided by open context
5166 * (by dmu_sync() or dmu_buf_write_embedded()).
5167 */
5168 abd_t *contents = (data != NULL) ?
5169 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
5170
5171 dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
5172 contents, db->db.db_size, db->db.db_size, &zp,
5173 dbuf_write_override_ready, NULL,
5174 dbuf_write_override_done,
5175 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5176 mutex_enter(&db->db_mtx);
5177 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
5178 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
5179 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite,
5180 dr->dt.dl.dr_brtwrite);
5181 mutex_exit(&db->db_mtx);
5182 } else if (db->db_state == DB_NOFILL) {
5183 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
5184 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
5185 dr->dr_zio = zio_write(pio, os->os_spa, txg,
5186 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
5187 dbuf_write_nofill_ready, NULL,
5188 dbuf_write_nofill_done, db,
5189 ZIO_PRIORITY_ASYNC_WRITE,
5190 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
5191 } else {
5192 ASSERT(arc_released(data));
5193
5194 /*
5195 * For indirect blocks, we want to setup the children
5196 * ready callback so that we can properly handle an indirect
5197 * block that only contains holes.
5198 */
5199 arc_write_done_func_t *children_ready_cb = NULL;
5200 if (db->db_level != 0)
5201 children_ready_cb = dbuf_write_children_ready;
5202
5203 dr->dr_zio = arc_write(pio, os->os_spa, txg,
5204 &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db),
5205 dbuf_is_l2cacheable(db), &zp, dbuf_write_ready,
5206 children_ready_cb, dbuf_write_done, db,
5207 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5208 }
5209 }
5210
5211 EXPORT_SYMBOL(dbuf_find);
5212 EXPORT_SYMBOL(dbuf_is_metadata);
5213 EXPORT_SYMBOL(dbuf_destroy);
5214 EXPORT_SYMBOL(dbuf_loan_arcbuf);
5215 EXPORT_SYMBOL(dbuf_whichblock);
5216 EXPORT_SYMBOL(dbuf_read);
5217 EXPORT_SYMBOL(dbuf_unoverride);
5218 EXPORT_SYMBOL(dbuf_free_range);
5219 EXPORT_SYMBOL(dbuf_new_size);
5220 EXPORT_SYMBOL(dbuf_release_bp);
5221 EXPORT_SYMBOL(dbuf_dirty);
5222 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
5223 EXPORT_SYMBOL(dmu_buf_will_dirty);
5224 EXPORT_SYMBOL(dmu_buf_is_dirty);
5225 EXPORT_SYMBOL(dmu_buf_will_clone);
5226 EXPORT_SYMBOL(dmu_buf_will_not_fill);
5227 EXPORT_SYMBOL(dmu_buf_will_fill);
5228 EXPORT_SYMBOL(dmu_buf_fill_done);
5229 EXPORT_SYMBOL(dmu_buf_rele);
5230 EXPORT_SYMBOL(dbuf_assign_arcbuf);
5231 EXPORT_SYMBOL(dbuf_prefetch);
5232 EXPORT_SYMBOL(dbuf_hold_impl);
5233 EXPORT_SYMBOL(dbuf_hold);
5234 EXPORT_SYMBOL(dbuf_hold_level);
5235 EXPORT_SYMBOL(dbuf_create_bonus);
5236 EXPORT_SYMBOL(dbuf_spill_set_blksz);
5237 EXPORT_SYMBOL(dbuf_rm_spill);
5238 EXPORT_SYMBOL(dbuf_add_ref);
5239 EXPORT_SYMBOL(dbuf_rele);
5240 EXPORT_SYMBOL(dbuf_rele_and_unlock);
5241 EXPORT_SYMBOL(dbuf_refcount);
5242 EXPORT_SYMBOL(dbuf_sync_list);
5243 EXPORT_SYMBOL(dmu_buf_set_user);
5244 EXPORT_SYMBOL(dmu_buf_set_user_ie);
5245 EXPORT_SYMBOL(dmu_buf_get_user);
5246 EXPORT_SYMBOL(dmu_buf_get_blkptr);
5247
5248 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
5249 "Maximum size in bytes of the dbuf cache.");
5250
5251 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
5252 "Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
5253
5254 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
5255 "Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
5256
5257 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
5258 "Maximum size in bytes of dbuf metadata cache.");
5259
5260 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
5261 "Set size of dbuf cache to log2 fraction of arc size.");
5262
5263 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
5264 "Set size of dbuf metadata cache to log2 fraction of arc size.");
5265
5266 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
5267 "Set size of dbuf cache mutex array as log2 shift.");