]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dbuf.c
Fix send/recv lost spill block
[mirror_zfs.git] / module / zfs / dbuf.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
ef3c1dea 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
c3bd3fb4 24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
3a17a7a9 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
0c66c32d 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
34dc7c2f
BB
27 */
28
34dc7c2f 29#include <sys/zfs_context.h>
c28b2279 30#include <sys/arc.h>
34dc7c2f 31#include <sys/dmu.h>
ea97f8ce 32#include <sys/dmu_send.h>
34dc7c2f
BB
33#include <sys/dmu_impl.h>
34#include <sys/dbuf.h>
35#include <sys/dmu_objset.h>
36#include <sys/dsl_dataset.h>
37#include <sys/dsl_dir.h>
38#include <sys/dmu_tx.h>
39#include <sys/spa.h>
40#include <sys/zio.h>
41#include <sys/dmu_zfetch.h>
428870ff
BB
42#include <sys/sa.h>
43#include <sys/sa_impl.h>
9b67f605
MA
44#include <sys/zfeature.h>
45#include <sys/blkptr.h>
9bd274dd 46#include <sys/range_tree.h>
49ee64e5 47#include <sys/trace_dbuf.h>
d3c2ae1c 48#include <sys/callb.h>
a6255b7f 49#include <sys/abd.h>
a1d477c2 50#include <sys/vdev.h>
37fb3e43 51#include <sys/cityhash.h>
2e5dc449 52#include <sys/spa_impl.h>
34dc7c2f 53
5e021f56
GDN
54kstat_t *dbuf_ksp;
55
56typedef struct dbuf_stats {
57 /*
58 * Various statistics about the size of the dbuf cache.
59 */
60 kstat_named_t cache_count;
61 kstat_named_t cache_size_bytes;
62 kstat_named_t cache_size_bytes_max;
63 /*
64 * Statistics regarding the bounds on the dbuf cache size.
65 */
66 kstat_named_t cache_target_bytes;
67 kstat_named_t cache_lowater_bytes;
68 kstat_named_t cache_hiwater_bytes;
69 /*
70 * Total number of dbuf cache evictions that have occurred.
71 */
72 kstat_named_t cache_total_evicts;
73 /*
74 * The distribution of dbuf levels in the dbuf cache and
75 * the total size of all dbufs at each level.
76 */
77 kstat_named_t cache_levels[DN_MAX_LEVELS];
78 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
79 /*
80 * Statistics about the dbuf hash table.
81 */
82 kstat_named_t hash_hits;
83 kstat_named_t hash_misses;
84 kstat_named_t hash_collisions;
85 kstat_named_t hash_elements;
86 kstat_named_t hash_elements_max;
87 /*
88 * Number of sublists containing more than one dbuf in the dbuf
89 * hash table. Keep track of the longest hash chain.
90 */
91 kstat_named_t hash_chains;
92 kstat_named_t hash_chain_max;
93 /*
94 * Number of times a dbuf_create() discovers that a dbuf was
95 * already created and in the dbuf hash table.
96 */
97 kstat_named_t hash_insert_race;
2e5dc449
MA
98 /*
99 * Statistics about the size of the metadata dbuf cache.
100 */
101 kstat_named_t metadata_cache_count;
102 kstat_named_t metadata_cache_size_bytes;
103 kstat_named_t metadata_cache_size_bytes_max;
104 /*
105 * For diagnostic purposes, this is incremented whenever we can't add
106 * something to the metadata cache because it's full, and instead put
107 * the data in the regular dbuf cache.
108 */
109 kstat_named_t metadata_cache_overflow;
5e021f56
GDN
110} dbuf_stats_t;
111
112dbuf_stats_t dbuf_stats = {
113 { "cache_count", KSTAT_DATA_UINT64 },
114 { "cache_size_bytes", KSTAT_DATA_UINT64 },
115 { "cache_size_bytes_max", KSTAT_DATA_UINT64 },
116 { "cache_target_bytes", KSTAT_DATA_UINT64 },
117 { "cache_lowater_bytes", KSTAT_DATA_UINT64 },
118 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
119 { "cache_total_evicts", KSTAT_DATA_UINT64 },
120 { { "cache_levels_N", KSTAT_DATA_UINT64 } },
121 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
122 { "hash_hits", KSTAT_DATA_UINT64 },
123 { "hash_misses", KSTAT_DATA_UINT64 },
124 { "hash_collisions", KSTAT_DATA_UINT64 },
125 { "hash_elements", KSTAT_DATA_UINT64 },
126 { "hash_elements_max", KSTAT_DATA_UINT64 },
127 { "hash_chains", KSTAT_DATA_UINT64 },
128 { "hash_chain_max", KSTAT_DATA_UINT64 },
2e5dc449
MA
129 { "hash_insert_race", KSTAT_DATA_UINT64 },
130 { "metadata_cache_count", KSTAT_DATA_UINT64 },
131 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
132 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
133 { "metadata_cache_overflow", KSTAT_DATA_UINT64 }
5e021f56
GDN
134};
135
136#define DBUF_STAT_INCR(stat, val) \
137 atomic_add_64(&dbuf_stats.stat.value.ui64, (val));
138#define DBUF_STAT_DECR(stat, val) \
139 DBUF_STAT_INCR(stat, -(val));
140#define DBUF_STAT_BUMP(stat) \
141 DBUF_STAT_INCR(stat, 1);
142#define DBUF_STAT_BUMPDOWN(stat) \
143 DBUF_STAT_INCR(stat, -1);
144#define DBUF_STAT_MAX(stat, v) { \
145 uint64_t _m; \
146 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
147 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
148 continue; \
149}
150
adb726eb 151typedef struct dbuf_hold_arg {
fc5bb51f
BB
152 /* Function arguments */
153 dnode_t *dh_dn;
154 uint8_t dh_level;
155 uint64_t dh_blkid;
fcff0f35
PD
156 boolean_t dh_fail_sparse;
157 boolean_t dh_fail_uncached;
fc5bb51f
BB
158 void *dh_tag;
159 dmu_buf_impl_t **dh_dbp;
160 /* Local variables */
161 dmu_buf_impl_t *dh_db;
162 dmu_buf_impl_t *dh_parent;
163 blkptr_t *dh_bp;
164 int dh_err;
165 dbuf_dirty_record_t *dh_dr;
adb726eb 166} dbuf_hold_arg_t;
fc5bb51f 167
adb726eb
MA
168static dbuf_hold_arg_t *dbuf_hold_arg_create(dnode_t *dn, uint8_t level,
169 uint64_t blkid, boolean_t fail_sparse, boolean_t fail_uncached,
170 void *tag, dmu_buf_impl_t **dbp);
171static int dbuf_hold_impl_arg(dbuf_hold_arg_t *dh);
172static void dbuf_hold_arg_destroy(dbuf_hold_arg_t *dh);
fc5bb51f 173
13fe0198 174static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
b128c09f 175static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
34dc7c2f 176
0c66c32d 177extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu,
39efbde7
GM
178 dmu_buf_evict_func_t *evict_func_sync,
179 dmu_buf_evict_func_t *evict_func_async,
180 dmu_buf_t **clear_on_evict_dbufp);
0c66c32d 181
34dc7c2f
BB
182/*
183 * Global data structures and functions for the dbuf cache.
184 */
d3c2ae1c 185static kmem_cache_t *dbuf_kmem_cache;
0c66c32d 186static taskq_t *dbu_evict_taskq;
34dc7c2f 187
d3c2ae1c
GW
188static kthread_t *dbuf_cache_evict_thread;
189static kmutex_t dbuf_evict_lock;
190static kcondvar_t dbuf_evict_cv;
191static boolean_t dbuf_evict_thread_exit;
192
193/*
2e5dc449
MA
194 * There are two dbuf caches; each dbuf can only be in one of them at a time.
195 *
196 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
197 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
198 * that represent the metadata that describes filesystems/snapshots/
199 * bookmarks/properties/etc. We only evict from this cache when we export a
200 * pool, to short-circuit as much I/O as possible for all administrative
201 * commands that need the metadata. There is no eviction policy for this
202 * cache, because we try to only include types in it which would occupy a
203 * very small amount of space per object but create a large impact on the
204 * performance of these commands. Instead, after it reaches a maximum size
205 * (which should only happen on very small memory systems with a very large
206 * number of filesystem objects), we stop taking new dbufs into the
207 * metadata cache, instead putting them in the normal dbuf cache.
208 *
209 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
210 * are not currently held but have been recently released. These dbufs
211 * are not eligible for arc eviction until they are aged out of the cache.
212 * Dbufs that are aged out of the cache will be immediately destroyed and
213 * become eligible for arc eviction.
214 *
215 * Dbufs are added to these caches once the last hold is released. If a dbuf is
216 * later accessed and still exists in the dbuf cache, then it will be removed
217 * from the cache and later re-added to the head of the cache.
218 *
219 * If a given dbuf meets the requirements for the metadata cache, it will go
220 * there, otherwise it will be considered for the generic LRU dbuf cache. The
221 * caches and the refcounts tracking their sizes are stored in an array indexed
222 * by those caches' matching enum values (from dbuf_cached_state_t).
d3c2ae1c 223 */
2e5dc449
MA
224typedef struct dbuf_cache {
225 multilist_t *cache;
c13060e4 226 zfs_refcount_t size;
2e5dc449
MA
227} dbuf_cache_t;
228dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
d3c2ae1c 229
2e5dc449
MA
230/* Size limits for the caches */
231unsigned long dbuf_cache_max_bytes = 0;
232unsigned long dbuf_metadata_cache_max_bytes = 0;
233/* Set the default sizes of the caches to log2 fraction of arc size */
de4f8d5d 234int dbuf_cache_shift = 5;
2e5dc449 235int dbuf_metadata_cache_shift = 6;
d3c2ae1c
GW
236
237/*
2e5dc449 238 * The LRU dbuf cache uses a three-stage eviction policy:
d3c2ae1c
GW
239 * - A low water marker designates when the dbuf eviction thread
240 * should stop evicting from the dbuf cache.
241 * - When we reach the maximum size (aka mid water mark), we
242 * signal the eviction thread to run.
243 * - The high water mark indicates when the eviction thread
244 * is unable to keep up with the incoming load and eviction must
245 * happen in the context of the calling thread.
246 *
247 * The dbuf cache:
248 * (max size)
249 * low water mid water hi water
250 * +----------------------------------------+----------+----------+
251 * | | | |
252 * | | | |
253 * | | | |
254 * | | | |
255 * +----------------------------------------+----------+----------+
256 * stop signal evict
257 * evicting eviction directly
258 * thread
259 *
260 * The high and low water marks indicate the operating range for the eviction
261 * thread. The low water mark is, by default, 90% of the total size of the
262 * cache and the high water mark is at 110% (both of these percentages can be
263 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
264 * respectively). The eviction thread will try to ensure that the cache remains
265 * within this range by waking up every second and checking if the cache is
266 * above the low water mark. The thread can also be woken up by callers adding
267 * elements into the cache if the cache is larger than the mid water (i.e max
268 * cache size). Once the eviction thread is woken up and eviction is required,
269 * it will continue evicting buffers until it's able to reduce the cache size
270 * to the low water mark. If the cache size continues to grow and hits the high
4e33ba4c 271 * water mark, then callers adding elements to the cache will begin to evict
d3c2ae1c
GW
272 * directly from the cache until the cache is no longer above the high water
273 * mark.
274 */
275
276/*
277 * The percentage above and below the maximum cache size.
278 */
279uint_t dbuf_cache_hiwater_pct = 10;
280uint_t dbuf_cache_lowater_pct = 10;
281
34dc7c2f
BB
282/* ARGSUSED */
283static int
284dbuf_cons(void *vdb, void *unused, int kmflag)
285{
286 dmu_buf_impl_t *db = vdb;
287 bzero(db, sizeof (dmu_buf_impl_t));
288
289 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
290 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
d3c2ae1c 291 multilist_link_init(&db->db_cache_link);
424fd7c3 292 zfs_refcount_create(&db->db_holds);
8951cb8d 293
34dc7c2f
BB
294 return (0);
295}
296
297/* ARGSUSED */
298static void
299dbuf_dest(void *vdb, void *unused)
300{
301 dmu_buf_impl_t *db = vdb;
302 mutex_destroy(&db->db_mtx);
303 cv_destroy(&db->db_changed);
d3c2ae1c 304 ASSERT(!multilist_link_active(&db->db_cache_link));
424fd7c3 305 zfs_refcount_destroy(&db->db_holds);
34dc7c2f
BB
306}
307
308/*
309 * dbuf hash table routines
310 */
311static dbuf_hash_table_t dbuf_hash_table;
312
313static uint64_t dbuf_hash_count;
314
37fb3e43
PD
315/*
316 * We use Cityhash for this. It's fast, and has good hash properties without
317 * requiring any large static buffers.
318 */
34dc7c2f
BB
319static uint64_t
320dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
321{
37fb3e43 322 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
34dc7c2f
BB
323}
324
34dc7c2f
BB
325#define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
326 ((dbuf)->db.db_object == (obj) && \
327 (dbuf)->db_objset == (os) && \
328 (dbuf)->db_level == (level) && \
329 (dbuf)->db_blkid == (blkid))
330
331dmu_buf_impl_t *
6ebebace 332dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
34dc7c2f
BB
333{
334 dbuf_hash_table_t *h = &dbuf_hash_table;
d6320ddb
BB
335 uint64_t hv;
336 uint64_t idx;
34dc7c2f
BB
337 dmu_buf_impl_t *db;
338
d3c2ae1c 339 hv = dbuf_hash(os, obj, level, blkid);
d6320ddb
BB
340 idx = hv & h->hash_table_mask;
341
34dc7c2f
BB
342 mutex_enter(DBUF_HASH_MUTEX(h, idx));
343 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
344 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
345 mutex_enter(&db->db_mtx);
346 if (db->db_state != DB_EVICTING) {
347 mutex_exit(DBUF_HASH_MUTEX(h, idx));
348 return (db);
349 }
350 mutex_exit(&db->db_mtx);
351 }
352 }
353 mutex_exit(DBUF_HASH_MUTEX(h, idx));
354 return (NULL);
355}
356
6ebebace
JG
357static dmu_buf_impl_t *
358dbuf_find_bonus(objset_t *os, uint64_t object)
359{
360 dnode_t *dn;
361 dmu_buf_impl_t *db = NULL;
362
363 if (dnode_hold(os, object, FTAG, &dn) == 0) {
364 rw_enter(&dn->dn_struct_rwlock, RW_READER);
365 if (dn->dn_bonus != NULL) {
366 db = dn->dn_bonus;
367 mutex_enter(&db->db_mtx);
368 }
369 rw_exit(&dn->dn_struct_rwlock);
370 dnode_rele(dn, FTAG);
371 }
372 return (db);
373}
374
34dc7c2f
BB
375/*
376 * Insert an entry into the hash table. If there is already an element
377 * equal to elem in the hash table, then the already existing element
378 * will be returned and the new element will not be inserted.
379 * Otherwise returns NULL.
380 */
381static dmu_buf_impl_t *
382dbuf_hash_insert(dmu_buf_impl_t *db)
383{
384 dbuf_hash_table_t *h = &dbuf_hash_table;
428870ff 385 objset_t *os = db->db_objset;
34dc7c2f
BB
386 uint64_t obj = db->db.db_object;
387 int level = db->db_level;
d6320ddb 388 uint64_t blkid, hv, idx;
34dc7c2f 389 dmu_buf_impl_t *dbf;
5e021f56 390 uint32_t i;
34dc7c2f 391
d6320ddb 392 blkid = db->db_blkid;
d3c2ae1c 393 hv = dbuf_hash(os, obj, level, blkid);
d6320ddb
BB
394 idx = hv & h->hash_table_mask;
395
34dc7c2f 396 mutex_enter(DBUF_HASH_MUTEX(h, idx));
5e021f56
GDN
397 for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
398 dbf = dbf->db_hash_next, i++) {
34dc7c2f
BB
399 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
400 mutex_enter(&dbf->db_mtx);
401 if (dbf->db_state != DB_EVICTING) {
402 mutex_exit(DBUF_HASH_MUTEX(h, idx));
403 return (dbf);
404 }
405 mutex_exit(&dbf->db_mtx);
406 }
407 }
408
5e021f56
GDN
409 if (i > 0) {
410 DBUF_STAT_BUMP(hash_collisions);
411 if (i == 1)
412 DBUF_STAT_BUMP(hash_chains);
413
414 DBUF_STAT_MAX(hash_chain_max, i);
415 }
416
34dc7c2f
BB
417 mutex_enter(&db->db_mtx);
418 db->db_hash_next = h->hash_table[idx];
419 h->hash_table[idx] = db;
420 mutex_exit(DBUF_HASH_MUTEX(h, idx));
bc89ac84 421 atomic_inc_64(&dbuf_hash_count);
5e021f56 422 DBUF_STAT_MAX(hash_elements_max, dbuf_hash_count);
34dc7c2f
BB
423
424 return (NULL);
425}
426
2e5dc449
MA
427/*
428 * This returns whether this dbuf should be stored in the metadata cache, which
429 * is based on whether it's from one of the dnode types that store data related
430 * to traversing dataset hierarchies.
431 */
432static boolean_t
433dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
434{
435 DB_DNODE_ENTER(db);
436 dmu_object_type_t type = DB_DNODE(db)->dn_type;
437 DB_DNODE_EXIT(db);
438
439 /* Check if this dbuf is one of the types we care about */
440 if (DMU_OT_IS_METADATA_CACHED(type)) {
441 /* If we hit this, then we set something up wrong in dmu_ot */
442 ASSERT(DMU_OT_IS_METADATA(type));
443
444 /*
445 * Sanity check for small-memory systems: don't allocate too
446 * much memory for this purpose.
447 */
424fd7c3
TS
448 if (zfs_refcount_count(
449 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
2e5dc449
MA
450 dbuf_metadata_cache_max_bytes) {
451 DBUF_STAT_BUMP(metadata_cache_overflow);
452 return (B_FALSE);
453 }
454
455 return (B_TRUE);
456 }
457
458 return (B_FALSE);
459}
460
34dc7c2f 461/*
bd089c54 462 * Remove an entry from the hash table. It must be in the EVICTING state.
34dc7c2f
BB
463 */
464static void
465dbuf_hash_remove(dmu_buf_impl_t *db)
466{
467 dbuf_hash_table_t *h = &dbuf_hash_table;
d6320ddb 468 uint64_t hv, idx;
34dc7c2f
BB
469 dmu_buf_impl_t *dbf, **dbp;
470
d3c2ae1c 471 hv = dbuf_hash(db->db_objset, db->db.db_object,
d6320ddb
BB
472 db->db_level, db->db_blkid);
473 idx = hv & h->hash_table_mask;
474
34dc7c2f 475 /*
4e33ba4c 476 * We mustn't hold db_mtx to maintain lock ordering:
34dc7c2f
BB
477 * DBUF_HASH_MUTEX > db_mtx.
478 */
424fd7c3 479 ASSERT(zfs_refcount_is_zero(&db->db_holds));
34dc7c2f
BB
480 ASSERT(db->db_state == DB_EVICTING);
481 ASSERT(!MUTEX_HELD(&db->db_mtx));
482
483 mutex_enter(DBUF_HASH_MUTEX(h, idx));
484 dbp = &h->hash_table[idx];
485 while ((dbf = *dbp) != db) {
486 dbp = &dbf->db_hash_next;
487 ASSERT(dbf != NULL);
488 }
489 *dbp = db->db_hash_next;
490 db->db_hash_next = NULL;
5e021f56
GDN
491 if (h->hash_table[idx] &&
492 h->hash_table[idx]->db_hash_next == NULL)
493 DBUF_STAT_BUMPDOWN(hash_chains);
34dc7c2f 494 mutex_exit(DBUF_HASH_MUTEX(h, idx));
bc89ac84 495 atomic_dec_64(&dbuf_hash_count);
34dc7c2f
BB
496}
497
0c66c32d
JG
498typedef enum {
499 DBVU_EVICTING,
500 DBVU_NOT_EVICTING
501} dbvu_verify_type_t;
502
503static void
504dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
505{
506#ifdef ZFS_DEBUG
507 int64_t holds;
508
509 if (db->db_user == NULL)
510 return;
511
512 /* Only data blocks support the attachment of user data. */
513 ASSERT(db->db_level == 0);
514
515 /* Clients must resolve a dbuf before attaching user data. */
516 ASSERT(db->db.db_data != NULL);
517 ASSERT3U(db->db_state, ==, DB_CACHED);
518
424fd7c3 519 holds = zfs_refcount_count(&db->db_holds);
0c66c32d
JG
520 if (verify_type == DBVU_EVICTING) {
521 /*
522 * Immediate eviction occurs when holds == dirtycnt.
523 * For normal eviction buffers, holds is zero on
524 * eviction, except when dbuf_fix_old_data() calls
525 * dbuf_clear_data(). However, the hold count can grow
526 * during eviction even though db_mtx is held (see
527 * dmu_bonus_hold() for an example), so we can only
528 * test the generic invariant that holds >= dirtycnt.
529 */
530 ASSERT3U(holds, >=, db->db_dirtycnt);
531 } else {
bc4501f7 532 if (db->db_user_immediate_evict == TRUE)
0c66c32d
JG
533 ASSERT3U(holds, >=, db->db_dirtycnt);
534 else
535 ASSERT3U(holds, >, 0);
536 }
537#endif
538}
539
34dc7c2f
BB
540static void
541dbuf_evict_user(dmu_buf_impl_t *db)
542{
0c66c32d
JG
543 dmu_buf_user_t *dbu = db->db_user;
544
34dc7c2f
BB
545 ASSERT(MUTEX_HELD(&db->db_mtx));
546
0c66c32d 547 if (dbu == NULL)
34dc7c2f
BB
548 return;
549
0c66c32d
JG
550 dbuf_verify_user(db, DBVU_EVICTING);
551 db->db_user = NULL;
552
553#ifdef ZFS_DEBUG
554 if (dbu->dbu_clear_on_evict_dbufp != NULL)
555 *dbu->dbu_clear_on_evict_dbufp = NULL;
556#endif
557
558 /*
39efbde7
GM
559 * There are two eviction callbacks - one that we call synchronously
560 * and one that we invoke via a taskq. The async one is useful for
561 * avoiding lock order reversals and limiting stack depth.
562 *
563 * Note that if we have a sync callback but no async callback,
564 * it's likely that the sync callback will free the structure
565 * containing the dbu. In that case we need to take care to not
566 * dereference dbu after calling the sync evict func.
0c66c32d 567 */
a7004725 568 boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
39efbde7
GM
569
570 if (dbu->dbu_evict_func_sync != NULL)
571 dbu->dbu_evict_func_sync(dbu);
572
573 if (has_async) {
574 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
575 dbu, 0, &dbu->dbu_tqent);
576 }
34dc7c2f
BB
577}
578
572e2857
BB
579boolean_t
580dbuf_is_metadata(dmu_buf_impl_t *db)
581{
cc79a5c2
BB
582 /*
583 * Consider indirect blocks and spill blocks to be meta data.
584 */
585 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
572e2857
BB
586 return (B_TRUE);
587 } else {
588 boolean_t is_metadata;
589
590 DB_DNODE_ENTER(db);
9ae529ec 591 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
572e2857
BB
592 DB_DNODE_EXIT(db);
593
594 return (is_metadata);
595 }
596}
597
d3c2ae1c
GW
598
599/*
600 * This function *must* return indices evenly distributed between all
601 * sublists of the multilist. This is needed due to how the dbuf eviction
602 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
603 * distributed between all sublists and uses this assumption when
604 * deciding which sublist to evict from and how much to evict from it.
605 */
606unsigned int
607dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
34dc7c2f 608{
d3c2ae1c
GW
609 dmu_buf_impl_t *db = obj;
610
611 /*
612 * The assumption here, is the hash value for a given
613 * dmu_buf_impl_t will remain constant throughout it's lifetime
614 * (i.e. it's objset, object, level and blkid fields don't change).
615 * Thus, we don't need to store the dbuf's sublist index
616 * on insertion, as this index can be recalculated on removal.
617 *
618 * Also, the low order bits of the hash value are thought to be
619 * distributed evenly. Otherwise, in the case that the multilist
620 * has a power of two number of sublists, each sublists' usage
621 * would not be evenly distributed.
622 */
623 return (dbuf_hash(db->db_objset, db->db.db_object,
624 db->db_level, db->db_blkid) %
625 multilist_get_num_sublists(ml));
626}
627
e71cade6 628static inline unsigned long
629dbuf_cache_target_bytes(void)
630{
631 return MIN(dbuf_cache_max_bytes,
de4f8d5d 632 arc_target_bytes() >> dbuf_cache_shift);
e71cade6 633}
634
5e021f56
GDN
635static inline uint64_t
636dbuf_cache_hiwater_bytes(void)
d3c2ae1c 637{
e71cade6 638 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
5e021f56
GDN
639 return (dbuf_cache_target +
640 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
641}
e71cade6 642
5e021f56
GDN
643static inline uint64_t
644dbuf_cache_lowater_bytes(void)
645{
646 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
647 return (dbuf_cache_target -
648 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
649}
d3c2ae1c 650
5e021f56
GDN
651static inline boolean_t
652dbuf_cache_above_hiwater(void)
653{
424fd7c3 654 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
2e5dc449 655 dbuf_cache_hiwater_bytes());
d3c2ae1c
GW
656}
657
658static inline boolean_t
659dbuf_cache_above_lowater(void)
660{
424fd7c3 661 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
2e5dc449 662 dbuf_cache_lowater_bytes());
d3c2ae1c
GW
663}
664
665/*
666 * Evict the oldest eligible dbuf from the dbuf cache.
667 */
668static void
669dbuf_evict_one(void)
670{
2e5dc449
MA
671 int idx = multilist_get_random_index(dbuf_caches[DB_DBUF_CACHE].cache);
672 multilist_sublist_t *mls = multilist_sublist_lock(
673 dbuf_caches[DB_DBUF_CACHE].cache, idx);
1c27024e 674
d3c2ae1c
GW
675 ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
676
1c27024e 677 dmu_buf_impl_t *db = multilist_sublist_tail(mls);
d3c2ae1c
GW
678 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
679 db = multilist_sublist_prev(mls, db);
680 }
681
682 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
683 multilist_sublist_t *, mls);
684
685 if (db != NULL) {
686 multilist_sublist_remove(mls, db);
687 multilist_sublist_unlock(mls);
424fd7c3
TS
688 (void) zfs_refcount_remove_many(
689 &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
5e021f56
GDN
690 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
691 DBUF_STAT_BUMPDOWN(cache_count);
692 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
693 db->db.db_size);
2e5dc449
MA
694 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
695 db->db_caching_status = DB_NO_CACHE;
d3c2ae1c 696 dbuf_destroy(db);
5e021f56 697 DBUF_STAT_MAX(cache_size_bytes_max,
424fd7c3 698 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size));
5e021f56 699 DBUF_STAT_BUMP(cache_total_evicts);
d3c2ae1c
GW
700 } else {
701 multilist_sublist_unlock(mls);
702 }
d3c2ae1c
GW
703}
704
705/*
706 * The dbuf evict thread is responsible for aging out dbufs from the
707 * cache. Once the cache has reached it's maximum size, dbufs are removed
708 * and destroyed. The eviction thread will continue running until the size
709 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
710 * out of the cache it is destroyed and becomes eligible for arc eviction.
711 */
867959b5 712/* ARGSUSED */
d3c2ae1c 713static void
c25b8f99 714dbuf_evict_thread(void *unused)
d3c2ae1c
GW
715{
716 callb_cpr_t cpr;
717
718 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
719
720 mutex_enter(&dbuf_evict_lock);
721 while (!dbuf_evict_thread_exit) {
722 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
723 CALLB_CPR_SAFE_BEGIN(&cpr);
724 (void) cv_timedwait_sig_hires(&dbuf_evict_cv,
725 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
726 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
727 }
728 mutex_exit(&dbuf_evict_lock);
729
730 /*
731 * Keep evicting as long as we're above the low water mark
732 * for the cache. We do this without holding the locks to
733 * minimize lock contention.
734 */
735 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
736 dbuf_evict_one();
737 }
738
739 mutex_enter(&dbuf_evict_lock);
740 }
741
742 dbuf_evict_thread_exit = B_FALSE;
743 cv_broadcast(&dbuf_evict_cv);
744 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
745 thread_exit();
746}
747
748/*
749 * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
750 * If the dbuf cache is at its high water mark, then evict a dbuf from the
751 * dbuf cache using the callers context.
752 */
753static void
754dbuf_evict_notify(void)
755{
38240ebd
MA
756 /*
757 * We check if we should evict without holding the dbuf_evict_lock,
758 * because it's OK to occasionally make the wrong decision here,
759 * and grabbing the lock results in massive lock contention.
760 */
424fd7c3 761 if (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
2e5dc449 762 dbuf_cache_target_bytes()) {
38240ebd 763 if (dbuf_cache_above_hiwater())
d3c2ae1c 764 dbuf_evict_one();
38240ebd 765 cv_signal(&dbuf_evict_cv);
d3c2ae1c 766 }
34dc7c2f
BB
767}
768
5e021f56
GDN
769static int
770dbuf_kstat_update(kstat_t *ksp, int rw)
771{
772 dbuf_stats_t *ds = ksp->ks_data;
d3c2ae1c 773
5e021f56
GDN
774 if (rw == KSTAT_WRITE) {
775 return (SET_ERROR(EACCES));
776 } else {
424fd7c3
TS
777 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
778 &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
5e021f56 779 ds->cache_size_bytes.value.ui64 =
424fd7c3 780 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
5e021f56
GDN
781 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
782 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
783 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
784 ds->hash_elements.value.ui64 = dbuf_hash_count;
785 }
786
787 return (0);
788}
d3c2ae1c 789
34dc7c2f
BB
790void
791dbuf_init(void)
792{
793 uint64_t hsize = 1ULL << 16;
794 dbuf_hash_table_t *h = &dbuf_hash_table;
795 int i;
796
797 /*
798 * The hash table is big enough to fill all of physical memory
69de3421
TC
799 * with an average block size of zfs_arc_average_blocksize (default 8K).
800 * By default, the table will take up
801 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
34dc7c2f 802 */
69de3421 803 while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE)
34dc7c2f
BB
804 hsize <<= 1;
805
806retry:
807 h->hash_table_mask = hsize - 1;
93ce2b4c 808#if defined(_KERNEL)
d1d7e268
MK
809 /*
810 * Large allocations which do not require contiguous pages
811 * should be using vmem_alloc() in the linux kernel
812 */
79c76d5b 813 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
00b46022 814#else
34dc7c2f 815 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
00b46022 816#endif
34dc7c2f
BB
817 if (h->hash_table == NULL) {
818 /* XXX - we should really return an error instead of assert */
819 ASSERT(hsize > (1ULL << 10));
820 hsize >>= 1;
821 goto retry;
822 }
823
d3c2ae1c 824 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
34dc7c2f
BB
825 sizeof (dmu_buf_impl_t),
826 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
827
828 for (i = 0; i < DBUF_MUTEXES; i++)
40d06e3c 829 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
e0b0ca98
BB
830
831 dbuf_stats_init(h);
0c66c32d 832
d3c2ae1c 833 /*
2e5dc449
MA
834 * Setup the parameters for the dbuf caches. We set the sizes of the
835 * dbuf cache and the metadata cache to 1/32nd and 1/16th (default)
836 * of the target size of the ARC. If the values has been specified as
837 * a module option and they're not greater than the target size of the
838 * ARC, then we honor that value.
d3c2ae1c 839 */
de4f8d5d
BB
840 if (dbuf_cache_max_bytes == 0 ||
841 dbuf_cache_max_bytes >= arc_target_bytes()) {
842 dbuf_cache_max_bytes = arc_target_bytes() >> dbuf_cache_shift;
843 }
2e5dc449
MA
844 if (dbuf_metadata_cache_max_bytes == 0 ||
845 dbuf_metadata_cache_max_bytes >= arc_target_bytes()) {
846 dbuf_metadata_cache_max_bytes =
847 arc_target_bytes() >> dbuf_metadata_cache_shift;
848 }
d3c2ae1c 849
0c66c32d
JG
850 /*
851 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
852 * configuration is not required.
853 */
1229323d 854 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
d3c2ae1c 855
2e5dc449
MA
856 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
857 dbuf_caches[dcs].cache =
858 multilist_create(sizeof (dmu_buf_impl_t),
859 offsetof(dmu_buf_impl_t, db_cache_link),
860 dbuf_cache_multilist_index_func);
424fd7c3 861 zfs_refcount_create(&dbuf_caches[dcs].size);
2e5dc449 862 }
d3c2ae1c 863
d3c2ae1c
GW
864 dbuf_evict_thread_exit = B_FALSE;
865 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
866 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
867 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
868 NULL, 0, &p0, TS_RUN, minclsyspri);
5e021f56
GDN
869
870 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
871 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
872 KSTAT_FLAG_VIRTUAL);
873 if (dbuf_ksp != NULL) {
874 dbuf_ksp->ks_data = &dbuf_stats;
875 dbuf_ksp->ks_update = dbuf_kstat_update;
876 kstat_install(dbuf_ksp);
877
878 for (i = 0; i < DN_MAX_LEVELS; i++) {
879 snprintf(dbuf_stats.cache_levels[i].name,
880 KSTAT_STRLEN, "cache_level_%d", i);
881 dbuf_stats.cache_levels[i].data_type =
882 KSTAT_DATA_UINT64;
883 snprintf(dbuf_stats.cache_levels_bytes[i].name,
884 KSTAT_STRLEN, "cache_level_%d_bytes", i);
885 dbuf_stats.cache_levels_bytes[i].data_type =
886 KSTAT_DATA_UINT64;
887 }
888 }
34dc7c2f
BB
889}
890
891void
892dbuf_fini(void)
893{
894 dbuf_hash_table_t *h = &dbuf_hash_table;
895 int i;
896
e0b0ca98
BB
897 dbuf_stats_destroy();
898
34dc7c2f
BB
899 for (i = 0; i < DBUF_MUTEXES; i++)
900 mutex_destroy(&h->hash_mutexes[i]);
93ce2b4c 901#if defined(_KERNEL)
d1d7e268
MK
902 /*
903 * Large allocations which do not require contiguous pages
904 * should be using vmem_free() in the linux kernel
905 */
00b46022
BB
906 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
907#else
34dc7c2f 908 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
00b46022 909#endif
d3c2ae1c 910 kmem_cache_destroy(dbuf_kmem_cache);
0c66c32d 911 taskq_destroy(dbu_evict_taskq);
d3c2ae1c
GW
912
913 mutex_enter(&dbuf_evict_lock);
914 dbuf_evict_thread_exit = B_TRUE;
915 while (dbuf_evict_thread_exit) {
916 cv_signal(&dbuf_evict_cv);
917 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
918 }
919 mutex_exit(&dbuf_evict_lock);
d3c2ae1c
GW
920
921 mutex_destroy(&dbuf_evict_lock);
922 cv_destroy(&dbuf_evict_cv);
923
2e5dc449 924 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
424fd7c3 925 zfs_refcount_destroy(&dbuf_caches[dcs].size);
2e5dc449
MA
926 multilist_destroy(dbuf_caches[dcs].cache);
927 }
5e021f56
GDN
928
929 if (dbuf_ksp != NULL) {
930 kstat_delete(dbuf_ksp);
931 dbuf_ksp = NULL;
932 }
34dc7c2f
BB
933}
934
935/*
936 * Other stuff.
937 */
938
939#ifdef ZFS_DEBUG
940static void
941dbuf_verify(dmu_buf_impl_t *db)
942{
572e2857 943 dnode_t *dn;
428870ff 944 dbuf_dirty_record_t *dr;
34dc7c2f
BB
945
946 ASSERT(MUTEX_HELD(&db->db_mtx));
947
948 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
949 return;
950
951 ASSERT(db->db_objset != NULL);
572e2857
BB
952 DB_DNODE_ENTER(db);
953 dn = DB_DNODE(db);
34dc7c2f
BB
954 if (dn == NULL) {
955 ASSERT(db->db_parent == NULL);
956 ASSERT(db->db_blkptr == NULL);
957 } else {
958 ASSERT3U(db->db.db_object, ==, dn->dn_object);
959 ASSERT3P(db->db_objset, ==, dn->dn_objset);
960 ASSERT3U(db->db_level, <, dn->dn_nlevels);
572e2857
BB
961 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
962 db->db_blkid == DMU_SPILL_BLKID ||
8951cb8d 963 !avl_is_empty(&dn->dn_dbufs));
34dc7c2f 964 }
428870ff
BB
965 if (db->db_blkid == DMU_BONUS_BLKID) {
966 ASSERT(dn != NULL);
967 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
968 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
969 } else if (db->db_blkid == DMU_SPILL_BLKID) {
34dc7c2f 970 ASSERT(dn != NULL);
c99c9001 971 ASSERT0(db->db.db_offset);
34dc7c2f
BB
972 } else {
973 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
974 }
975
428870ff
BB
976 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
977 ASSERT(dr->dr_dbuf == db);
978
979 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
980 ASSERT(dr->dr_dbuf == db);
981
b128c09f
BB
982 /*
983 * We can't assert that db_size matches dn_datablksz because it
984 * can be momentarily different when another thread is doing
985 * dnode_set_blksz().
986 */
987 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
428870ff 988 dr = db->db_data_pending;
b128c09f
BB
989 /*
990 * It should only be modified in syncing context, so
991 * make sure we only have one copy of the data.
992 */
993 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
34dc7c2f
BB
994 }
995
996 /* verify db->db_blkptr */
997 if (db->db_blkptr) {
998 if (db->db_parent == dn->dn_dbuf) {
999 /* db is pointed to by the dnode */
1000 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
9babb374 1001 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
34dc7c2f
BB
1002 ASSERT(db->db_parent == NULL);
1003 else
1004 ASSERT(db->db_parent != NULL);
428870ff
BB
1005 if (db->db_blkid != DMU_SPILL_BLKID)
1006 ASSERT3P(db->db_blkptr, ==,
1007 &dn->dn_phys->dn_blkptr[db->db_blkid]);
34dc7c2f
BB
1008 } else {
1009 /* db is pointed to by an indirect block */
1fde1e37 1010 ASSERTV(int epb = db->db_parent->db.db_size >>
02730c33 1011 SPA_BLKPTRSHIFT);
34dc7c2f
BB
1012 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1013 ASSERT3U(db->db_parent->db.db_object, ==,
1014 db->db.db_object);
1015 /*
1016 * dnode_grow_indblksz() can make this fail if we don't
1017 * have the struct_rwlock. XXX indblksz no longer
1018 * grows. safe to do this now?
1019 */
572e2857 1020 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
34dc7c2f
BB
1021 ASSERT3P(db->db_blkptr, ==,
1022 ((blkptr_t *)db->db_parent->db.db_data +
1023 db->db_blkid % epb));
1024 }
1025 }
1026 }
1027 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
428870ff
BB
1028 (db->db_buf == NULL || db->db_buf->b_data) &&
1029 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
34dc7c2f
BB
1030 db->db_state != DB_FILL && !dn->dn_free_txg) {
1031 /*
1032 * If the blkptr isn't set but they have nonzero data,
1033 * it had better be dirty, otherwise we'll lose that
1034 * data when we evict this buffer.
bc77ba73
PD
1035 *
1036 * There is an exception to this rule for indirect blocks; in
1037 * this case, if the indirect block is a hole, we fill in a few
1038 * fields on each of the child blocks (importantly, birth time)
1039 * to prevent hole birth times from being lost when you
1040 * partially fill in a hole.
34dc7c2f
BB
1041 */
1042 if (db->db_dirtycnt == 0) {
bc77ba73
PD
1043 if (db->db_level == 0) {
1044 uint64_t *buf = db->db.db_data;
1045 int i;
34dc7c2f 1046
bc77ba73
PD
1047 for (i = 0; i < db->db.db_size >> 3; i++) {
1048 ASSERT(buf[i] == 0);
1049 }
1050 } else {
bc77ba73
PD
1051 blkptr_t *bps = db->db.db_data;
1052 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1053 db->db.db_size);
1054 /*
1055 * We want to verify that all the blkptrs in the
1056 * indirect block are holes, but we may have
1057 * automatically set up a few fields for them.
1058 * We iterate through each blkptr and verify
1059 * they only have those fields set.
1060 */
1c27024e 1061 for (int i = 0;
bc77ba73
PD
1062 i < db->db.db_size / sizeof (blkptr_t);
1063 i++) {
1064 blkptr_t *bp = &bps[i];
1065 ASSERT(ZIO_CHECKSUM_IS_ZERO(
1066 &bp->blk_cksum));
1067 ASSERT(
1068 DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1069 DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1070 DVA_IS_EMPTY(&bp->blk_dva[2]));
1071 ASSERT0(bp->blk_fill);
1072 ASSERT0(bp->blk_pad[0]);
1073 ASSERT0(bp->blk_pad[1]);
1074 ASSERT(!BP_IS_EMBEDDED(bp));
1075 ASSERT(BP_IS_HOLE(bp));
1076 ASSERT0(bp->blk_phys_birth);
1077 }
34dc7c2f
BB
1078 }
1079 }
1080 }
572e2857 1081 DB_DNODE_EXIT(db);
34dc7c2f
BB
1082}
1083#endif
1084
0c66c32d
JG
1085static void
1086dbuf_clear_data(dmu_buf_impl_t *db)
1087{
1088 ASSERT(MUTEX_HELD(&db->db_mtx));
1089 dbuf_evict_user(db);
d3c2ae1c 1090 ASSERT3P(db->db_buf, ==, NULL);
0c66c32d
JG
1091 db->db.db_data = NULL;
1092 if (db->db_state != DB_NOFILL)
1093 db->db_state = DB_UNCACHED;
1094}
1095
34dc7c2f
BB
1096static void
1097dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1098{
1099 ASSERT(MUTEX_HELD(&db->db_mtx));
0c66c32d
JG
1100 ASSERT(buf != NULL);
1101
34dc7c2f 1102 db->db_buf = buf;
0c66c32d
JG
1103 ASSERT(buf->b_data != NULL);
1104 db->db.db_data = buf->b_data;
34dc7c2f
BB
1105}
1106
428870ff
BB
1107/*
1108 * Loan out an arc_buf for read. Return the loaned arc_buf.
1109 */
1110arc_buf_t *
1111dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1112{
1113 arc_buf_t *abuf;
1114
d3c2ae1c 1115 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
428870ff 1116 mutex_enter(&db->db_mtx);
424fd7c3 1117 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
428870ff 1118 int blksz = db->db.db_size;
b0bc7a84 1119 spa_t *spa = db->db_objset->os_spa;
572e2857 1120
428870ff 1121 mutex_exit(&db->db_mtx);
2aa34383 1122 abuf = arc_loan_buf(spa, B_FALSE, blksz);
428870ff
BB
1123 bcopy(db->db.db_data, abuf->b_data, blksz);
1124 } else {
1125 abuf = db->db_buf;
1126 arc_loan_inuse_buf(abuf, db);
d3c2ae1c 1127 db->db_buf = NULL;
0c66c32d 1128 dbuf_clear_data(db);
428870ff
BB
1129 mutex_exit(&db->db_mtx);
1130 }
1131 return (abuf);
1132}
1133
fcff0f35
PD
1134/*
1135 * Calculate which level n block references the data at the level 0 offset
1136 * provided.
1137 */
34dc7c2f 1138uint64_t
031d7c2f 1139dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
34dc7c2f 1140{
fcff0f35
PD
1141 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1142 /*
1143 * The level n blkid is equal to the level 0 blkid divided by
1144 * the number of level 0s in a level n block.
1145 *
1146 * The level 0 blkid is offset >> datablkshift =
1147 * offset / 2^datablkshift.
1148 *
1149 * The number of level 0s in a level n is the number of block
1150 * pointers in an indirect block, raised to the power of level.
1151 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1152 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1153 *
1154 * Thus, the level n blkid is: offset /
fe8a7982 1155 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
fcff0f35
PD
1156 * = offset / 2^(datablkshift + level *
1157 * (indblkshift - SPA_BLKPTRSHIFT))
1158 * = offset >> (datablkshift + level *
1159 * (indblkshift - SPA_BLKPTRSHIFT))
1160 */
031d7c2f
GN
1161
1162 const unsigned exp = dn->dn_datablkshift +
1163 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1164
1165 if (exp >= 8 * sizeof (offset)) {
1166 /* This only happens on the highest indirection level */
1167 ASSERT3U(level, ==, dn->dn_nlevels - 1);
1168 return (0);
1169 }
1170
1171 ASSERT3U(exp, <, 8 * sizeof (offset));
1172
1173 return (offset >> exp);
34dc7c2f
BB
1174 } else {
1175 ASSERT3U(offset, <, dn->dn_datablksz);
1176 return (0);
1177 }
1178}
1179
1180static void
d4a72f23
TC
1181dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1182 arc_buf_t *buf, void *vdb)
34dc7c2f
BB
1183{
1184 dmu_buf_impl_t *db = vdb;
1185
1186 mutex_enter(&db->db_mtx);
1187 ASSERT3U(db->db_state, ==, DB_READ);
1188 /*
1189 * All reads are synchronous, so we must have a hold on the dbuf
1190 */
424fd7c3 1191 ASSERT(zfs_refcount_count(&db->db_holds) > 0);
34dc7c2f
BB
1192 ASSERT(db->db_buf == NULL);
1193 ASSERT(db->db.db_data == NULL);
c3bd3fb4
TC
1194 if (buf == NULL) {
1195 /* i/o error */
1196 ASSERT(zio == NULL || zio->io_error != 0);
1197 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1198 ASSERT3P(db->db_buf, ==, NULL);
1199 db->db_state = DB_UNCACHED;
1200 } else if (db->db_level == 0 && db->db_freed_in_flight) {
1201 /* freed in flight */
1202 ASSERT(zio == NULL || zio->io_error == 0);
34dc7c2f
BB
1203 arc_release(buf, db);
1204 bzero(buf->b_data, db->db.db_size);
1205 arc_buf_freeze(buf);
1206 db->db_freed_in_flight = FALSE;
1207 dbuf_set_data(db, buf);
1208 db->db_state = DB_CACHED;
c3bd3fb4
TC
1209 } else {
1210 /* success */
1211 ASSERT(zio == NULL || zio->io_error == 0);
34dc7c2f
BB
1212 dbuf_set_data(db, buf);
1213 db->db_state = DB_CACHED;
34dc7c2f
BB
1214 }
1215 cv_broadcast(&db->db_changed);
3d503a76 1216 dbuf_rele_and_unlock(db, NULL, B_FALSE);
34dc7c2f
BB
1217}
1218
69830602
TC
1219
1220/*
1221 * This function ensures that, when doing a decrypting read of a block,
1222 * we make sure we have decrypted the dnode associated with it. We must do
1223 * this so that we ensure we are fully authenticating the checksum-of-MACs
1224 * tree from the root of the objset down to this block. Indirect blocks are
1225 * always verified against their secure checksum-of-MACs assuming that the
1226 * dnode containing them is correct. Now that we are doing a decrypting read,
1227 * we can be sure that the key is loaded and verify that assumption. This is
1228 * especially important considering that we always read encrypted dnode
1229 * blocks as raw data (without verifying their MACs) to start, and
1230 * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1231 */
1232static int
1233dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1234{
1235 int err = 0;
1236 objset_t *os = db->db_objset;
1237 arc_buf_t *dnode_abuf;
1238 dnode_t *dn;
1239 zbookmark_phys_t zb;
1240
1241 ASSERT(MUTEX_HELD(&db->db_mtx));
1242
1243 if (!os->os_encrypted || os->os_raw_receive ||
1244 (flags & DB_RF_NO_DECRYPT) != 0)
1245 return (0);
1246
1247 DB_DNODE_ENTER(db);
1248 dn = DB_DNODE(db);
1249 dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1250
1251 if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1252 DB_DNODE_EXIT(db);
1253 return (0);
1254 }
1255
1256 SET_BOOKMARK(&zb, dmu_objset_id(os),
1257 DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1258 err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1259
1260 /*
1261 * An error code of EACCES tells us that the key is still not
1262 * available. This is ok if we are only reading authenticated
1263 * (and therefore non-encrypted) blocks.
1264 */
1265 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1266 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1267 (db->db_blkid == DMU_BONUS_BLKID &&
1268 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1269 err = 0;
1270
69830602
TC
1271 DB_DNODE_EXIT(db);
1272
1273 return (err);
1274}
1275
5f6d0b6f 1276static int
7f60329a 1277dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
34dc7c2f 1278{
572e2857 1279 dnode_t *dn;
5dbd68a3 1280 zbookmark_phys_t zb;
2a432414 1281 uint32_t aflags = ARC_FLAG_NOWAIT;
b5256303 1282 int err, zio_flags = 0;
34dc7c2f 1283
572e2857
BB
1284 DB_DNODE_ENTER(db);
1285 dn = DB_DNODE(db);
424fd7c3 1286 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
34dc7c2f 1287 /* We need the struct_rwlock to prevent db_blkptr from changing. */
b128c09f 1288 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
34dc7c2f
BB
1289 ASSERT(MUTEX_HELD(&db->db_mtx));
1290 ASSERT(db->db_state == DB_UNCACHED);
1291 ASSERT(db->db_buf == NULL);
1292
428870ff 1293 if (db->db_blkid == DMU_BONUS_BLKID) {
50c957f7
NB
1294 /*
1295 * The bonus length stored in the dnode may be less than
1296 * the maximum available space in the bonus buffer.
1297 */
9babb374 1298 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
50c957f7 1299 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
b5256303
TC
1300
1301 /* if the underlying dnode block is encrypted, decrypt it */
69830602
TC
1302 err = dbuf_read_verify_dnode_crypt(db, flags);
1303 if (err != 0) {
1304 DB_DNODE_EXIT(db);
1305 mutex_exit(&db->db_mtx);
1306 return (err);
b5256303 1307 }
34dc7c2f
BB
1308
1309 ASSERT3U(bonuslen, <=, db->db.db_size);
a3fd9d9e 1310 db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
25458cbe 1311 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
50c957f7
NB
1312 if (bonuslen < max_bonuslen)
1313 bzero(db->db.db_data, max_bonuslen);
9babb374
BB
1314 if (bonuslen)
1315 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
572e2857 1316 DB_DNODE_EXIT(db);
34dc7c2f
BB
1317 db->db_state = DB_CACHED;
1318 mutex_exit(&db->db_mtx);
5f6d0b6f 1319 return (0);
34dc7c2f
BB
1320 }
1321
b128c09f
BB
1322 /*
1323 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1324 * processes the delete record and clears the bp while we are waiting
1325 * for the dn_mtx (resulting in a "no" from block_freed).
1326 */
1327 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
1328 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
1329 BP_IS_HOLE(db->db_blkptr)))) {
34dc7c2f
BB
1330 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1331
2aa34383
DK
1332 dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type,
1333 db->db.db_size));
34dc7c2f 1334 bzero(db->db.db_data, db->db.db_size);
bc77ba73
PD
1335
1336 if (db->db_blkptr != NULL && db->db_level > 0 &&
1337 BP_IS_HOLE(db->db_blkptr) &&
1338 db->db_blkptr->blk_birth != 0) {
1339 blkptr_t *bps = db->db.db_data;
1c27024e 1340 for (int i = 0; i < ((1 <<
bc77ba73
PD
1341 DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t));
1342 i++) {
1343 blkptr_t *bp = &bps[i];
1344 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
1345 1 << dn->dn_indblkshift);
1346 BP_SET_LSIZE(bp,
1347 BP_GET_LEVEL(db->db_blkptr) == 1 ?
1348 dn->dn_datablksz :
1349 BP_GET_LSIZE(db->db_blkptr));
1350 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr));
1351 BP_SET_LEVEL(bp,
1352 BP_GET_LEVEL(db->db_blkptr) - 1);
1353 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0);
1354 }
1355 }
1356 DB_DNODE_EXIT(db);
34dc7c2f 1357 db->db_state = DB_CACHED;
34dc7c2f 1358 mutex_exit(&db->db_mtx);
5f6d0b6f 1359 return (0);
34dc7c2f
BB
1360 }
1361
370bbf66
TC
1362
1363 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1364 db->db.db_object, db->db_level, db->db_blkid);
1365
b5256303
TC
1366 /*
1367 * All bps of an encrypted os should have the encryption bit set.
1368 * If this is not true it indicates tampering and we report an error.
1369 */
1370 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) {
1371 spa_log_error(db->db_objset->os_spa, &zb);
1372 zfs_panic_recover("unencrypted block in encrypted "
1373 "object set %llu", dmu_objset_id(db->db_objset));
69830602
TC
1374 DB_DNODE_EXIT(db);
1375 mutex_exit(&db->db_mtx);
b5256303
TC
1376 return (SET_ERROR(EIO));
1377 }
1378
69830602
TC
1379 err = dbuf_read_verify_dnode_crypt(db, flags);
1380 if (err != 0) {
1381 DB_DNODE_EXIT(db);
1382 mutex_exit(&db->db_mtx);
1383 return (err);
1384 }
1385
1386 DB_DNODE_EXIT(db);
1387
1388 db->db_state = DB_READ;
1389 mutex_exit(&db->db_mtx);
1390
1391 if (DBUF_IS_L2CACHEABLE(db))
1392 aflags |= ARC_FLAG_L2CACHE;
1393
34dc7c2f 1394 dbuf_add_ref(db, NULL);
b128c09f 1395
b5256303
TC
1396 zio_flags = (flags & DB_RF_CANFAIL) ?
1397 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1398
1399 if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1400 zio_flags |= ZIO_FLAG_RAW;
1401
5f6d0b6f 1402 err = arc_read(zio, db->db_objset->os_spa, db->db_blkptr,
b5256303 1403 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
34dc7c2f 1404 &aflags, &zb);
5f6d0b6f 1405
da8d5748 1406 return (err);
34dc7c2f
BB
1407}
1408
2aa34383
DK
1409/*
1410 * This is our just-in-time copy function. It makes a copy of buffers that
1411 * have been modified in a previous transaction group before we access them in
1412 * the current active group.
1413 *
1414 * This function is used in three places: when we are dirtying a buffer for the
1415 * first time in a txg, when we are freeing a range in a dnode that includes
1416 * this buffer, and when we are accessing a buffer which was received compressed
1417 * and later referenced in a WRITE_BYREF record.
1418 *
1419 * Note that when we are called from dbuf_free_range() we do not put a hold on
1420 * the buffer, we just traverse the active dbuf list for the dnode.
1421 */
1422static void
1423dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1424{
1425 dbuf_dirty_record_t *dr = db->db_last_dirty;
1426
1427 ASSERT(MUTEX_HELD(&db->db_mtx));
1428 ASSERT(db->db.db_data != NULL);
1429 ASSERT(db->db_level == 0);
1430 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1431
1432 if (dr == NULL ||
1433 (dr->dt.dl.dr_data !=
1434 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1435 return;
1436
1437 /*
1438 * If the last dirty record for this dbuf has not yet synced
1439 * and its referencing the dbuf data, either:
1440 * reset the reference to point to a new copy,
1441 * or (if there a no active holders)
1442 * just null out the current db_data pointer.
1443 */
4807c0ba 1444 ASSERT3U(dr->dr_txg, >=, txg - 2);
2aa34383 1445 if (db->db_blkid == DMU_BONUS_BLKID) {
2aa34383
DK
1446 dnode_t *dn = DB_DNODE(db);
1447 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
a3fd9d9e 1448 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
2aa34383
DK
1449 arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1450 bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
424fd7c3 1451 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
b5256303 1452 dnode_t *dn = DB_DNODE(db);
2aa34383
DK
1453 int size = arc_buf_size(db->db_buf);
1454 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1455 spa_t *spa = db->db_objset->os_spa;
1456 enum zio_compress compress_type =
1457 arc_get_compression(db->db_buf);
1458
b5256303
TC
1459 if (arc_is_encrypted(db->db_buf)) {
1460 boolean_t byteorder;
1461 uint8_t salt[ZIO_DATA_SALT_LEN];
1462 uint8_t iv[ZIO_DATA_IV_LEN];
1463 uint8_t mac[ZIO_DATA_MAC_LEN];
1464
1465 arc_get_raw_params(db->db_buf, &byteorder, salt,
1466 iv, mac);
1467 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1468 dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1469 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1470 compress_type);
1471 } else if (compress_type != ZIO_COMPRESS_OFF) {
2aa34383
DK
1472 ASSERT3U(type, ==, ARC_BUFC_DATA);
1473 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1474 size, arc_buf_lsize(db->db_buf), compress_type);
b5256303
TC
1475 } else {
1476 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
2aa34383
DK
1477 }
1478 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
1479 } else {
1480 db->db_buf = NULL;
1481 dbuf_clear_data(db);
1482 }
1483}
1484
34dc7c2f
BB
1485int
1486dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1487{
1488 int err = 0;
b0bc7a84 1489 boolean_t prefetch;
572e2857 1490 dnode_t *dn;
34dc7c2f
BB
1491
1492 /*
1493 * We don't have to hold the mutex to check db_state because it
1494 * can't be freed while we have a hold on the buffer.
1495 */
424fd7c3 1496 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
34dc7c2f 1497
b128c09f 1498 if (db->db_state == DB_NOFILL)
2e528b49 1499 return (SET_ERROR(EIO));
b128c09f 1500
572e2857
BB
1501 DB_DNODE_ENTER(db);
1502 dn = DB_DNODE(db);
34dc7c2f 1503 if ((flags & DB_RF_HAVESTRUCT) == 0)
572e2857 1504 rw_enter(&dn->dn_struct_rwlock, RW_READER);
34dc7c2f 1505
428870ff 1506 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
572e2857 1507 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
b128c09f 1508 DBUF_IS_CACHEABLE(db);
34dc7c2f
BB
1509
1510 mutex_enter(&db->db_mtx);
1511 if (db->db_state == DB_CACHED) {
b5256303
TC
1512 spa_t *spa = dn->dn_objset->os_spa;
1513
2aa34383 1514 /*
69830602
TC
1515 * Ensure that this block's dnode has been decrypted if
1516 * the caller has requested decrypted data.
2aa34383 1517 */
69830602
TC
1518 err = dbuf_read_verify_dnode_crypt(db, flags);
1519
1520 /*
1521 * If the arc buf is compressed or encrypted and the caller
1522 * requested uncompressed data, we need to untransform it
1523 * before returning. We also call arc_untransform() on any
1524 * unauthenticated blocks, which will verify their MAC if
1525 * the key is now available.
1526 */
1527 if (err == 0 && db->db_buf != NULL &&
1528 (flags & DB_RF_NO_DECRYPT) == 0 &&
b5256303 1529 (arc_is_encrypted(db->db_buf) ||
69830602 1530 arc_is_unauthenticated(db->db_buf) ||
b5256303 1531 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
a2c2ed1b
TC
1532 zbookmark_phys_t zb;
1533
1534 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1535 db->db.db_object, db->db_level, db->db_blkid);
b5256303 1536 dbuf_fix_old_data(db, spa_syncing_txg(spa));
a2c2ed1b 1537 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
2aa34383
DK
1538 dbuf_set_data(db, db->db_buf);
1539 }
34dc7c2f 1540 mutex_exit(&db->db_mtx);
69830602 1541 if (err == 0 && prefetch)
755065f3 1542 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE);
34dc7c2f 1543 if ((flags & DB_RF_HAVESTRUCT) == 0)
572e2857
BB
1544 rw_exit(&dn->dn_struct_rwlock);
1545 DB_DNODE_EXIT(db);
5e021f56 1546 DBUF_STAT_BUMP(hash_hits);
34dc7c2f 1547 } else if (db->db_state == DB_UNCACHED) {
572e2857 1548 spa_t *spa = dn->dn_objset->os_spa;
a0043383 1549 boolean_t need_wait = B_FALSE;
572e2857 1550
b0319c1f 1551 if (zio == NULL &&
a0043383 1552 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
572e2857 1553 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
a0043383
MA
1554 need_wait = B_TRUE;
1555 }
7f60329a 1556 err = dbuf_read_impl(db, zio, flags);
34dc7c2f
BB
1557
1558 /* dbuf_read_impl has dropped db_mtx for us */
1559
5f6d0b6f 1560 if (!err && prefetch)
755065f3 1561 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE);
34dc7c2f
BB
1562
1563 if ((flags & DB_RF_HAVESTRUCT) == 0)
572e2857
BB
1564 rw_exit(&dn->dn_struct_rwlock);
1565 DB_DNODE_EXIT(db);
5e021f56 1566 DBUF_STAT_BUMP(hash_misses);
34dc7c2f 1567
5e7f3ace
TC
1568 /*
1569 * If we created a zio_root we must execute it to avoid
1570 * leaking it, even if it isn't attached to any work due
1571 * to an error in dbuf_read_impl().
1572 */
1573 if (need_wait) {
1574 if (err == 0)
1575 err = zio_wait(zio);
1576 else
1577 VERIFY0(zio_wait(zio));
1578 }
34dc7c2f 1579 } else {
e49f1e20
WA
1580 /*
1581 * Another reader came in while the dbuf was in flight
1582 * between UNCACHED and CACHED. Either a writer will finish
1583 * writing the buffer (sending the dbuf to CACHED) or the
1584 * first reader's request will reach the read_done callback
1585 * and send the dbuf to CACHED. Otherwise, a failure
1586 * occurred and the dbuf went to UNCACHED.
1587 */
34dc7c2f
BB
1588 mutex_exit(&db->db_mtx);
1589 if (prefetch)
755065f3 1590 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE);
34dc7c2f 1591 if ((flags & DB_RF_HAVESTRUCT) == 0)
572e2857
BB
1592 rw_exit(&dn->dn_struct_rwlock);
1593 DB_DNODE_EXIT(db);
5e021f56 1594 DBUF_STAT_BUMP(hash_misses);
34dc7c2f 1595
e49f1e20 1596 /* Skip the wait per the caller's request. */
34dc7c2f
BB
1597 mutex_enter(&db->db_mtx);
1598 if ((flags & DB_RF_NEVERWAIT) == 0) {
1599 while (db->db_state == DB_READ ||
1600 db->db_state == DB_FILL) {
1601 ASSERT(db->db_state == DB_READ ||
1602 (flags & DB_RF_HAVESTRUCT) == 0);
64dbba36
AL
1603 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1604 db, zio_t *, zio);
34dc7c2f
BB
1605 cv_wait(&db->db_changed, &db->db_mtx);
1606 }
1607 if (db->db_state == DB_UNCACHED)
2e528b49 1608 err = SET_ERROR(EIO);
34dc7c2f
BB
1609 }
1610 mutex_exit(&db->db_mtx);
1611 }
1612
34dc7c2f
BB
1613 return (err);
1614}
1615
1616static void
1617dbuf_noread(dmu_buf_impl_t *db)
1618{
424fd7c3 1619 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
428870ff 1620 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
1621 mutex_enter(&db->db_mtx);
1622 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1623 cv_wait(&db->db_changed, &db->db_mtx);
1624 if (db->db_state == DB_UNCACHED) {
1625 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
b0bc7a84 1626 spa_t *spa = db->db_objset->os_spa;
34dc7c2f
BB
1627
1628 ASSERT(db->db_buf == NULL);
1629 ASSERT(db->db.db_data == NULL);
2aa34383 1630 dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size));
34dc7c2f 1631 db->db_state = DB_FILL;
b128c09f 1632 } else if (db->db_state == DB_NOFILL) {
0c66c32d 1633 dbuf_clear_data(db);
34dc7c2f
BB
1634 } else {
1635 ASSERT3U(db->db_state, ==, DB_CACHED);
1636 }
1637 mutex_exit(&db->db_mtx);
1638}
1639
34dc7c2f
BB
1640void
1641dbuf_unoverride(dbuf_dirty_record_t *dr)
1642{
1643 dmu_buf_impl_t *db = dr->dr_dbuf;
428870ff 1644 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
34dc7c2f
BB
1645 uint64_t txg = dr->dr_txg;
1646
1647 ASSERT(MUTEX_HELD(&db->db_mtx));
00710365
AS
1648 /*
1649 * This assert is valid because dmu_sync() expects to be called by
1650 * a zilog's get_data while holding a range lock. This call only
1651 * comes from dbuf_dirty() callers who must also hold a range lock.
1652 */
34dc7c2f
BB
1653 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1654 ASSERT(db->db_level == 0);
1655
428870ff 1656 if (db->db_blkid == DMU_BONUS_BLKID ||
34dc7c2f
BB
1657 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1658 return;
1659
428870ff
BB
1660 ASSERT(db->db_data_pending != dr);
1661
34dc7c2f 1662 /* free this block */
b0bc7a84
MG
1663 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1664 zio_free(db->db_objset->os_spa, txg, bp);
428870ff 1665
34dc7c2f 1666 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
03c6040b 1667 dr->dt.dl.dr_nopwrite = B_FALSE;
0c03d21a 1668 dr->dt.dl.dr_has_raw_params = B_FALSE;
03c6040b 1669
34dc7c2f
BB
1670 /*
1671 * Release the already-written buffer, so we leave it in
1672 * a consistent dirty state. Note that all callers are
1673 * modifying the buffer, so they will immediately do
1674 * another (redundant) arc_release(). Therefore, leave
1675 * the buf thawed to save the effort of freezing &
1676 * immediately re-thawing it.
1677 */
1678 arc_release(dr->dt.dl.dr_data, db);
1679}
1680
b128c09f
BB
1681/*
1682 * Evict (if its unreferenced) or clear (if its referenced) any level-0
1683 * data blocks in the free range, so that any future readers will find
b0bc7a84 1684 * empty blocks.
b128c09f 1685 */
34dc7c2f 1686void
8951cb8d
AR
1687dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1688 dmu_tx_t *tx)
34dc7c2f 1689{
0c66c32d
JG
1690 dmu_buf_impl_t *db_search;
1691 dmu_buf_impl_t *db, *db_next;
34dc7c2f 1692 uint64_t txg = tx->tx_txg;
8951cb8d 1693 avl_index_t where;
8951cb8d 1694
9c9531cb
GM
1695 if (end_blkid > dn->dn_maxblkid &&
1696 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
8951cb8d
AR
1697 end_blkid = dn->dn_maxblkid;
1698 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
34dc7c2f 1699
0c66c32d 1700 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
8951cb8d
AR
1701 db_search->db_level = 0;
1702 db_search->db_blkid = start_blkid;
9925c28c 1703 db_search->db_state = DB_SEARCH;
ea97f8ce 1704
b663a23d 1705 mutex_enter(&dn->dn_dbufs_mtx);
8951cb8d
AR
1706 db = avl_find(&dn->dn_dbufs, db_search, &where);
1707 ASSERT3P(db, ==, NULL);
9c9531cb 1708
8951cb8d
AR
1709 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1710
1711 for (; db != NULL; db = db_next) {
1712 db_next = AVL_NEXT(&dn->dn_dbufs, db);
428870ff 1713 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
b128c09f 1714
8951cb8d
AR
1715 if (db->db_level != 0 || db->db_blkid > end_blkid) {
1716 break;
1717 }
1718 ASSERT3U(db->db_blkid, >=, start_blkid);
34dc7c2f
BB
1719
1720 /* found a level 0 buffer in the range */
13fe0198
MA
1721 mutex_enter(&db->db_mtx);
1722 if (dbuf_undirty(db, tx)) {
1723 /* mutex has been dropped and dbuf destroyed */
34dc7c2f 1724 continue;
13fe0198 1725 }
34dc7c2f 1726
34dc7c2f 1727 if (db->db_state == DB_UNCACHED ||
b128c09f 1728 db->db_state == DB_NOFILL ||
34dc7c2f
BB
1729 db->db_state == DB_EVICTING) {
1730 ASSERT(db->db.db_data == NULL);
1731 mutex_exit(&db->db_mtx);
1732 continue;
1733 }
1734 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
1735 /* will be handled in dbuf_read_done or dbuf_rele */
1736 db->db_freed_in_flight = TRUE;
1737 mutex_exit(&db->db_mtx);
1738 continue;
1739 }
424fd7c3 1740 if (zfs_refcount_count(&db->db_holds) == 0) {
34dc7c2f 1741 ASSERT(db->db_buf);
d3c2ae1c 1742 dbuf_destroy(db);
34dc7c2f
BB
1743 continue;
1744 }
1745 /* The dbuf is referenced */
1746
1747 if (db->db_last_dirty != NULL) {
1748 dbuf_dirty_record_t *dr = db->db_last_dirty;
1749
1750 if (dr->dr_txg == txg) {
1751 /*
1752 * This buffer is "in-use", re-adjust the file
1753 * size to reflect that this buffer may
1754 * contain new data when we sync.
1755 */
428870ff
BB
1756 if (db->db_blkid != DMU_SPILL_BLKID &&
1757 db->db_blkid > dn->dn_maxblkid)
34dc7c2f
BB
1758 dn->dn_maxblkid = db->db_blkid;
1759 dbuf_unoverride(dr);
1760 } else {
1761 /*
1762 * This dbuf is not dirty in the open context.
1763 * Either uncache it (if its not referenced in
1764 * the open context) or reset its contents to
1765 * empty.
1766 */
1767 dbuf_fix_old_data(db, txg);
1768 }
1769 }
1770 /* clear the contents if its cached */
1771 if (db->db_state == DB_CACHED) {
1772 ASSERT(db->db.db_data != NULL);
1773 arc_release(db->db_buf, db);
1774 bzero(db->db.db_data, db->db.db_size);
1775 arc_buf_freeze(db->db_buf);
1776 }
1777
1778 mutex_exit(&db->db_mtx);
1779 }
8951cb8d 1780
8951cb8d 1781 kmem_free(db_search, sizeof (dmu_buf_impl_t));
34dc7c2f
BB
1782 mutex_exit(&dn->dn_dbufs_mtx);
1783}
1784
34dc7c2f
BB
1785void
1786dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
1787{
1788 arc_buf_t *buf, *obuf;
1789 int osize = db->db.db_size;
1790 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
572e2857 1791 dnode_t *dn;
34dc7c2f 1792
428870ff 1793 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f 1794
572e2857
BB
1795 DB_DNODE_ENTER(db);
1796 dn = DB_DNODE(db);
1797
34dc7c2f 1798 /* XXX does *this* func really need the lock? */
572e2857 1799 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
34dc7c2f
BB
1800
1801 /*
b0bc7a84 1802 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held
34dc7c2f
BB
1803 * is OK, because there can be no other references to the db
1804 * when we are changing its size, so no concurrent DB_FILL can
1805 * be happening.
1806 */
1807 /*
1808 * XXX we should be doing a dbuf_read, checking the return
1809 * value and returning that up to our callers
1810 */
b0bc7a84 1811 dmu_buf_will_dirty(&db->db, tx);
34dc7c2f
BB
1812
1813 /* create the data buffer for the new block */
2aa34383 1814 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
34dc7c2f
BB
1815
1816 /* copy old block data to the new block */
1817 obuf = db->db_buf;
1818 bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
1819 /* zero the remainder */
1820 if (size > osize)
1821 bzero((uint8_t *)buf->b_data + osize, size - osize);
1822
1823 mutex_enter(&db->db_mtx);
1824 dbuf_set_data(db, buf);
d3c2ae1c 1825 arc_buf_destroy(obuf, db);
34dc7c2f
BB
1826 db->db.db_size = size;
1827
1828 if (db->db_level == 0) {
1829 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1830 db->db_last_dirty->dt.dl.dr_data = buf;
1831 }
1832 mutex_exit(&db->db_mtx);
1833
3ec3bc21 1834 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
572e2857 1835 DB_DNODE_EXIT(db);
34dc7c2f
BB
1836}
1837
428870ff
BB
1838void
1839dbuf_release_bp(dmu_buf_impl_t *db)
1840{
b0bc7a84 1841 ASSERTV(objset_t *os = db->db_objset);
428870ff
BB
1842
1843 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1844 ASSERT(arc_released(os->os_phys_buf) ||
1845 list_link_active(&os->os_dsl_dataset->ds_synced_link));
1846 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1847
294f6806 1848 (void) arc_release(db->db_buf, db);
428870ff
BB
1849}
1850
5a28a973
MA
1851/*
1852 * We already have a dirty record for this TXG, and we are being
1853 * dirtied again.
1854 */
1855static void
1856dbuf_redirty(dbuf_dirty_record_t *dr)
1857{
1858 dmu_buf_impl_t *db = dr->dr_dbuf;
1859
1860 ASSERT(MUTEX_HELD(&db->db_mtx));
1861
1862 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1863 /*
1864 * If this buffer has already been written out,
1865 * we now need to reset its state.
1866 */
1867 dbuf_unoverride(dr);
1868 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1869 db->db_state != DB_NOFILL) {
1870 /* Already released on initial dirty, so just thaw. */
1871 ASSERT(arc_released(db->db_buf));
1872 arc_buf_thaw(db->db_buf);
1873 }
1874 }
1875}
1876
34dc7c2f
BB
1877dbuf_dirty_record_t *
1878dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1879{
572e2857
BB
1880 dnode_t *dn;
1881 objset_t *os;
34dc7c2f
BB
1882 dbuf_dirty_record_t **drp, *dr;
1883 int drop_struct_lock = FALSE;
1884 int txgoff = tx->tx_txg & TXG_MASK;
1885
1886 ASSERT(tx->tx_txg != 0);
424fd7c3 1887 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
34dc7c2f
BB
1888 DMU_TX_DIRTY_BUF(tx, db);
1889
572e2857
BB
1890 DB_DNODE_ENTER(db);
1891 dn = DB_DNODE(db);
34dc7c2f
BB
1892 /*
1893 * Shouldn't dirty a regular buffer in syncing context. Private
1894 * objects may be dirtied in syncing context, but only if they
1895 * were already pre-dirtied in open context.
34dc7c2f 1896 */
cc9bb3e5
GM
1897#ifdef DEBUG
1898 if (dn->dn_objset->os_dsl_dataset != NULL) {
1899 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
1900 RW_READER, FTAG);
1901 }
34dc7c2f
BB
1902 ASSERT(!dmu_tx_is_syncing(tx) ||
1903 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
9babb374
BB
1904 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1905 dn->dn_objset->os_dsl_dataset == NULL);
cc9bb3e5
GM
1906 if (dn->dn_objset->os_dsl_dataset != NULL)
1907 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
1908#endif
34dc7c2f
BB
1909 /*
1910 * We make this assert for private objects as well, but after we
1911 * check if we're already dirty. They are allowed to re-dirty
1912 * in syncing context.
1913 */
1914 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1915 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1916 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1917
1918 mutex_enter(&db->db_mtx);
1919 /*
1920 * XXX make this true for indirects too? The problem is that
1921 * transactions created with dmu_tx_create_assigned() from
1922 * syncing context don't bother holding ahead.
1923 */
1924 ASSERT(db->db_level != 0 ||
b128c09f
BB
1925 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1926 db->db_state == DB_NOFILL);
34dc7c2f
BB
1927
1928 mutex_enter(&dn->dn_mtx);
1929 /*
1930 * Don't set dirtyctx to SYNC if we're just modifying this as we
1931 * initialize the objset.
1932 */
cc9bb3e5
GM
1933 if (dn->dn_dirtyctx == DN_UNDIRTIED) {
1934 if (dn->dn_objset->os_dsl_dataset != NULL) {
1935 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
1936 RW_READER, FTAG);
1937 }
1938 if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1939 dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ?
1940 DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1941 ASSERT(dn->dn_dirtyctx_firstset == NULL);
1942 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1943 }
1944 if (dn->dn_objset->os_dsl_dataset != NULL) {
1945 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
1946 FTAG);
1947 }
34dc7c2f 1948 }
edc1e713
TC
1949
1950 if (tx->tx_txg > dn->dn_dirty_txg)
1951 dn->dn_dirty_txg = tx->tx_txg;
34dc7c2f
BB
1952 mutex_exit(&dn->dn_mtx);
1953
428870ff
BB
1954 if (db->db_blkid == DMU_SPILL_BLKID)
1955 dn->dn_have_spill = B_TRUE;
1956
34dc7c2f
BB
1957 /*
1958 * If this buffer is already dirty, we're done.
1959 */
1960 drp = &db->db_last_dirty;
1961 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1962 db->db.db_object == DMU_META_DNODE_OBJECT);
1963 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1964 drp = &dr->dr_next;
1965 if (dr && dr->dr_txg == tx->tx_txg) {
572e2857
BB
1966 DB_DNODE_EXIT(db);
1967
5a28a973 1968 dbuf_redirty(dr);
34dc7c2f
BB
1969 mutex_exit(&db->db_mtx);
1970 return (dr);
1971 }
1972
1973 /*
1974 * Only valid if not already dirty.
1975 */
9babb374
BB
1976 ASSERT(dn->dn_object == 0 ||
1977 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
34dc7c2f
BB
1978 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1979
1980 ASSERT3U(dn->dn_nlevels, >, db->db_level);
34dc7c2f
BB
1981
1982 /*
1983 * We should only be dirtying in syncing context if it's the
9babb374
BB
1984 * mos or we're initializing the os or it's a special object.
1985 * However, we are allowed to dirty in syncing context provided
1986 * we already dirtied it in open context. Hence we must make
1987 * this assertion only if we're not already dirty.
34dc7c2f 1988 */
572e2857 1989 os = dn->dn_objset;
3b7f360c 1990 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
cc9bb3e5
GM
1991#ifdef DEBUG
1992 if (dn->dn_objset->os_dsl_dataset != NULL)
1993 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
9babb374
BB
1994 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1995 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
cc9bb3e5
GM
1996 if (dn->dn_objset->os_dsl_dataset != NULL)
1997 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
1998#endif
34dc7c2f
BB
1999 ASSERT(db->db.db_size != 0);
2000
2001 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2002
428870ff 2003 if (db->db_blkid != DMU_BONUS_BLKID) {
3ec3bc21 2004 dmu_objset_willuse_space(os, db->db.db_size, tx);
34dc7c2f
BB
2005 }
2006
2007 /*
2008 * If this buffer is dirty in an old transaction group we need
2009 * to make a copy of it so that the changes we make in this
2010 * transaction group won't leak out when we sync the older txg.
2011 */
79c76d5b 2012 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
98f72a53 2013 list_link_init(&dr->dr_dirty_node);
34dc7c2f
BB
2014 if (db->db_level == 0) {
2015 void *data_old = db->db_buf;
2016
b128c09f 2017 if (db->db_state != DB_NOFILL) {
428870ff 2018 if (db->db_blkid == DMU_BONUS_BLKID) {
b128c09f
BB
2019 dbuf_fix_old_data(db, tx->tx_txg);
2020 data_old = db->db.db_data;
2021 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2022 /*
2023 * Release the data buffer from the cache so
2024 * that we can modify it without impacting
2025 * possible other users of this cached data
2026 * block. Note that indirect blocks and
2027 * private objects are not released until the
2028 * syncing state (since they are only modified
2029 * then).
2030 */
2031 arc_release(db->db_buf, db);
2032 dbuf_fix_old_data(db, tx->tx_txg);
2033 data_old = db->db_buf;
2034 }
2035 ASSERT(data_old != NULL);
34dc7c2f 2036 }
34dc7c2f
BB
2037 dr->dt.dl.dr_data = data_old;
2038 } else {
448d7aaa 2039 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
34dc7c2f
BB
2040 list_create(&dr->dt.di.dr_children,
2041 sizeof (dbuf_dirty_record_t),
2042 offsetof(dbuf_dirty_record_t, dr_dirty_node));
2043 }
e8b96c60
MA
2044 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
2045 dr->dr_accounted = db->db.db_size;
34dc7c2f
BB
2046 dr->dr_dbuf = db;
2047 dr->dr_txg = tx->tx_txg;
2048 dr->dr_next = *drp;
2049 *drp = dr;
2050
2051 /*
2052 * We could have been freed_in_flight between the dbuf_noread
2053 * and dbuf_dirty. We win, as though the dbuf_noread() had
2054 * happened after the free.
2055 */
428870ff
BB
2056 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2057 db->db_blkid != DMU_SPILL_BLKID) {
34dc7c2f 2058 mutex_enter(&dn->dn_mtx);
9bd274dd
MA
2059 if (dn->dn_free_ranges[txgoff] != NULL) {
2060 range_tree_clear(dn->dn_free_ranges[txgoff],
2061 db->db_blkid, 1);
2062 }
34dc7c2f
BB
2063 mutex_exit(&dn->dn_mtx);
2064 db->db_freed_in_flight = FALSE;
2065 }
2066
2067 /*
2068 * This buffer is now part of this txg
2069 */
2070 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2071 db->db_dirtycnt += 1;
2072 ASSERT3U(db->db_dirtycnt, <=, 3);
2073
2074 mutex_exit(&db->db_mtx);
2075
428870ff
BB
2076 if (db->db_blkid == DMU_BONUS_BLKID ||
2077 db->db_blkid == DMU_SPILL_BLKID) {
34dc7c2f
BB
2078 mutex_enter(&dn->dn_mtx);
2079 ASSERT(!list_link_active(&dr->dr_dirty_node));
2080 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2081 mutex_exit(&dn->dn_mtx);
2082 dnode_setdirty(dn, tx);
572e2857 2083 DB_DNODE_EXIT(db);
34dc7c2f 2084 return (dr);
98ace739
MA
2085 }
2086
2087 /*
2088 * The dn_struct_rwlock prevents db_blkptr from changing
2089 * due to a write from syncing context completing
2090 * while we are running, so we want to acquire it before
2091 * looking at db_blkptr.
2092 */
2093 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2094 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2095 drop_struct_lock = TRUE;
2096 }
2097
2ade4a99
MA
2098 /*
2099 * We need to hold the dn_struct_rwlock to make this assertion,
2100 * because it protects dn_phys / dn_next_nlevels from changing.
2101 */
2102 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2103 dn->dn_phys->dn_nlevels > db->db_level ||
2104 dn->dn_next_nlevels[txgoff] > db->db_level ||
2105 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2106 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2107
3ec3bc21
BB
2108 /*
2109 * If we are overwriting a dedup BP, then unless it is snapshotted,
2110 * when we get to syncing context we will need to decrement its
2111 * refcount in the DDT. Prefetch the relevant DDT block so that
2112 * syncing context won't have to wait for the i/o.
2113 */
2114 ddt_prefetch(os->os_spa, db->db_blkptr);
34dc7c2f 2115
b128c09f 2116 if (db->db_level == 0) {
69830602
TC
2117 ASSERT(!db->db_objset->os_raw_receive ||
2118 dn->dn_maxblkid >= db->db_blkid);
369aa501
TC
2119 dnode_new_blkid(dn, db->db_blkid, tx,
2120 drop_struct_lock, B_FALSE);
b128c09f
BB
2121 ASSERT(dn->dn_maxblkid >= db->db_blkid);
2122 }
2123
34dc7c2f
BB
2124 if (db->db_level+1 < dn->dn_nlevels) {
2125 dmu_buf_impl_t *parent = db->db_parent;
2126 dbuf_dirty_record_t *di;
2127 int parent_held = FALSE;
2128
2129 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2130 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2131
2132 parent = dbuf_hold_level(dn, db->db_level+1,
2133 db->db_blkid >> epbs, FTAG);
428870ff 2134 ASSERT(parent != NULL);
34dc7c2f
BB
2135 parent_held = TRUE;
2136 }
2137 if (drop_struct_lock)
2138 rw_exit(&dn->dn_struct_rwlock);
2139 ASSERT3U(db->db_level+1, ==, parent->db_level);
2140 di = dbuf_dirty(parent, tx);
2141 if (parent_held)
2142 dbuf_rele(parent, FTAG);
2143
2144 mutex_enter(&db->db_mtx);
e8b96c60
MA
2145 /*
2146 * Since we've dropped the mutex, it's possible that
2147 * dbuf_undirty() might have changed this out from under us.
2148 */
34dc7c2f
BB
2149 if (db->db_last_dirty == dr ||
2150 dn->dn_object == DMU_META_DNODE_OBJECT) {
2151 mutex_enter(&di->dt.di.dr_mtx);
2152 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2153 ASSERT(!list_link_active(&dr->dr_dirty_node));
2154 list_insert_tail(&di->dt.di.dr_children, dr);
2155 mutex_exit(&di->dt.di.dr_mtx);
2156 dr->dr_parent = di;
2157 }
2158 mutex_exit(&db->db_mtx);
2159 } else {
2160 ASSERT(db->db_level+1 == dn->dn_nlevels);
2161 ASSERT(db->db_blkid < dn->dn_nblkptr);
572e2857 2162 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
34dc7c2f
BB
2163 mutex_enter(&dn->dn_mtx);
2164 ASSERT(!list_link_active(&dr->dr_dirty_node));
2165 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2166 mutex_exit(&dn->dn_mtx);
2167 if (drop_struct_lock)
2168 rw_exit(&dn->dn_struct_rwlock);
2169 }
2170
2171 dnode_setdirty(dn, tx);
572e2857 2172 DB_DNODE_EXIT(db);
34dc7c2f
BB
2173 return (dr);
2174}
2175
13fe0198 2176/*
e49f1e20
WA
2177 * Undirty a buffer in the transaction group referenced by the given
2178 * transaction. Return whether this evicted the dbuf.
13fe0198
MA
2179 */
2180static boolean_t
34dc7c2f
BB
2181dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2182{
572e2857 2183 dnode_t *dn;
34dc7c2f
BB
2184 uint64_t txg = tx->tx_txg;
2185 dbuf_dirty_record_t *dr, **drp;
2186
2187 ASSERT(txg != 0);
4bda3bd0
MA
2188
2189 /*
2190 * Due to our use of dn_nlevels below, this can only be called
2191 * in open context, unless we are operating on the MOS.
2192 * From syncing context, dn_nlevels may be different from the
2193 * dn_nlevels used when dbuf was dirtied.
2194 */
2195 ASSERT(db->db_objset ==
2196 dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2197 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
428870ff 2198 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
13fe0198
MA
2199 ASSERT0(db->db_level);
2200 ASSERT(MUTEX_HELD(&db->db_mtx));
34dc7c2f 2201
34dc7c2f
BB
2202 /*
2203 * If this buffer is not dirty, we're done.
2204 */
2205 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
2206 if (dr->dr_txg <= txg)
2207 break;
13fe0198
MA
2208 if (dr == NULL || dr->dr_txg < txg)
2209 return (B_FALSE);
34dc7c2f 2210 ASSERT(dr->dr_txg == txg);
428870ff 2211 ASSERT(dr->dr_dbuf == db);
34dc7c2f 2212
572e2857
BB
2213 DB_DNODE_ENTER(db);
2214 dn = DB_DNODE(db);
2215
34dc7c2f
BB
2216 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2217
2218 ASSERT(db->db.db_size != 0);
2219
4bda3bd0
MA
2220 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2221 dr->dr_accounted, txg);
34dc7c2f
BB
2222
2223 *drp = dr->dr_next;
2224
ef3c1dea
GR
2225 /*
2226 * Note that there are three places in dbuf_dirty()
2227 * where this dirty record may be put on a list.
2228 * Make sure to do a list_remove corresponding to
2229 * every one of those list_insert calls.
2230 */
34dc7c2f
BB
2231 if (dr->dr_parent) {
2232 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2233 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2234 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
ef3c1dea 2235 } else if (db->db_blkid == DMU_SPILL_BLKID ||
4bda3bd0 2236 db->db_level + 1 == dn->dn_nlevels) {
b128c09f 2237 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
34dc7c2f
BB
2238 mutex_enter(&dn->dn_mtx);
2239 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2240 mutex_exit(&dn->dn_mtx);
2241 }
572e2857 2242 DB_DNODE_EXIT(db);
34dc7c2f 2243
13fe0198
MA
2244 if (db->db_state != DB_NOFILL) {
2245 dbuf_unoverride(dr);
34dc7c2f 2246
34dc7c2f 2247 ASSERT(db->db_buf != NULL);
13fe0198
MA
2248 ASSERT(dr->dt.dl.dr_data != NULL);
2249 if (dr->dt.dl.dr_data != db->db_buf)
d3c2ae1c 2250 arc_buf_destroy(dr->dt.dl.dr_data, db);
34dc7c2f 2251 }
58c4aa00 2252
34dc7c2f
BB
2253 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2254
2255 ASSERT(db->db_dirtycnt > 0);
2256 db->db_dirtycnt -= 1;
2257
424fd7c3 2258 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
d3c2ae1c
GW
2259 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
2260 dbuf_destroy(db);
13fe0198 2261 return (B_TRUE);
34dc7c2f
BB
2262 }
2263
13fe0198 2264 return (B_FALSE);
34dc7c2f
BB
2265}
2266
b5256303
TC
2267static void
2268dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
34dc7c2f 2269{
b0bc7a84 2270 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
34dc7c2f
BB
2271
2272 ASSERT(tx->tx_txg != 0);
424fd7c3 2273 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
34dc7c2f 2274
5a28a973
MA
2275 /*
2276 * Quick check for dirtyness. For already dirty blocks, this
2277 * reduces runtime of this function by >90%, and overall performance
2278 * by 50% for some workloads (e.g. file deletion with indirect blocks
2279 * cached).
2280 */
2281 mutex_enter(&db->db_mtx);
2282
1c27024e 2283 dbuf_dirty_record_t *dr;
5a28a973
MA
2284 for (dr = db->db_last_dirty;
2285 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) {
2286 /*
2287 * It's possible that it is already dirty but not cached,
2288 * because there are some calls to dbuf_dirty() that don't
2289 * go through dmu_buf_will_dirty().
2290 */
2291 if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) {
2292 /* This dbuf is already dirty and cached. */
2293 dbuf_redirty(dr);
2294 mutex_exit(&db->db_mtx);
2295 return;
2296 }
2297 }
2298 mutex_exit(&db->db_mtx);
2299
572e2857
BB
2300 DB_DNODE_ENTER(db);
2301 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
b5256303 2302 flags |= DB_RF_HAVESTRUCT;
572e2857 2303 DB_DNODE_EXIT(db);
b5256303 2304 (void) dbuf_read(db, NULL, flags);
34dc7c2f
BB
2305 (void) dbuf_dirty(db, tx);
2306}
2307
b5256303
TC
2308void
2309dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2310{
2311 dmu_buf_will_dirty_impl(db_fake,
2312 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2313}
2314
a73e8fdb
PZ
2315boolean_t
2316dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2317{
2318 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2319
2320 mutex_enter(&db->db_mtx);
2321 for (dbuf_dirty_record_t *dr = db->db_last_dirty;
2322 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) {
2323 if (dr->dr_txg == tx->tx_txg) {
2324 mutex_exit(&db->db_mtx);
2325 return (B_TRUE);
2326 }
2327 }
2328 mutex_exit(&db->db_mtx);
2329 return (B_FALSE);
2330}
2331
b128c09f
BB
2332void
2333dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2334{
2335 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2336
2337 db->db_state = DB_NOFILL;
2338
2339 dmu_buf_will_fill(db_fake, tx);
2340}
2341
34dc7c2f
BB
2342void
2343dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2344{
2345 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2346
428870ff 2347 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
2348 ASSERT(tx->tx_txg != 0);
2349 ASSERT(db->db_level == 0);
424fd7c3 2350 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
34dc7c2f
BB
2351
2352 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2353 dmu_tx_private_ok(tx));
2354
2355 dbuf_noread(db);
2356 (void) dbuf_dirty(db, tx);
2357}
2358
b5256303
TC
2359/*
2360 * This function is effectively the same as dmu_buf_will_dirty(), but
0c03d21a
MA
2361 * indicates the caller expects raw encrypted data in the db, and provides
2362 * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2363 * blkptr_t when this dbuf is written. This is only used for blocks of
2364 * dnodes, during raw receive.
b5256303
TC
2365 */
2366void
0c03d21a
MA
2367dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2368 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
b5256303
TC
2369{
2370 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2371 dbuf_dirty_record_t *dr;
2372
0c03d21a
MA
2373 /*
2374 * dr_has_raw_params is only processed for blocks of dnodes
2375 * (see dbuf_sync_dnode_leaf_crypt()).
2376 */
2377 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2378 ASSERT3U(db->db_level, ==, 0);
2379 ASSERT(db->db_objset->os_raw_receive);
2380
b5256303
TC
2381 dmu_buf_will_dirty_impl(db_fake,
2382 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2383
2384 dr = db->db_last_dirty;
2385 while (dr != NULL && dr->dr_txg > tx->tx_txg)
2386 dr = dr->dr_next;
2387
2388 ASSERT3P(dr, !=, NULL);
2389 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
0c03d21a
MA
2390
2391 dr->dt.dl.dr_has_raw_params = B_TRUE;
2392 dr->dt.dl.dr_byteorder = byteorder;
2393 bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN);
2394 bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN);
2395 bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN);
b5256303
TC
2396}
2397
34dc7c2f
BB
2398#pragma weak dmu_buf_fill_done = dbuf_fill_done
2399/* ARGSUSED */
2400void
2401dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
2402{
2403 mutex_enter(&db->db_mtx);
2404 DBUF_VERIFY(db);
2405
2406 if (db->db_state == DB_FILL) {
2407 if (db->db_level == 0 && db->db_freed_in_flight) {
428870ff 2408 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
2409 /* we were freed while filling */
2410 /* XXX dbuf_undirty? */
2411 bzero(db->db.db_data, db->db.db_size);
2412 db->db_freed_in_flight = FALSE;
2413 }
2414 db->db_state = DB_CACHED;
2415 cv_broadcast(&db->db_changed);
2416 }
2417 mutex_exit(&db->db_mtx);
2418}
2419
9b67f605
MA
2420void
2421dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2422 bp_embedded_type_t etype, enum zio_compress comp,
2423 int uncompressed_size, int compressed_size, int byteorder,
2424 dmu_tx_t *tx)
2425{
2426 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2427 struct dirty_leaf *dl;
2428 dmu_object_type_t type;
2429
241b5415
MA
2430 if (etype == BP_EMBEDDED_TYPE_DATA) {
2431 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2432 SPA_FEATURE_EMBEDDED_DATA));
2433 }
2434
9b67f605
MA
2435 DB_DNODE_ENTER(db);
2436 type = DB_DNODE(db)->dn_type;
2437 DB_DNODE_EXIT(db);
2438
2439 ASSERT0(db->db_level);
2440 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2441
2442 dmu_buf_will_not_fill(dbuf, tx);
2443
2444 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
2445 dl = &db->db_last_dirty->dt.dl;
2446 encode_embedded_bp_compressed(&dl->dr_overridden_by,
2447 data, comp, uncompressed_size, compressed_size);
2448 BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2449 BP_SET_TYPE(&dl->dr_overridden_by, type);
2450 BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2451 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2452
2453 dl->dr_override_state = DR_OVERRIDDEN;
2454 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg;
2455}
2456
9babb374
BB
2457/*
2458 * Directly assign a provided arc buf to a given dbuf if it's not referenced
2459 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2460 */
2461void
2462dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2463{
424fd7c3 2464 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
428870ff 2465 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
9babb374 2466 ASSERT(db->db_level == 0);
2aa34383 2467 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
9babb374 2468 ASSERT(buf != NULL);
caf9dd20 2469 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
9babb374
BB
2470 ASSERT(tx->tx_txg != 0);
2471
2472 arc_return_buf(buf, db);
2473 ASSERT(arc_released(buf));
2474
2475 mutex_enter(&db->db_mtx);
2476
2477 while (db->db_state == DB_READ || db->db_state == DB_FILL)
2478 cv_wait(&db->db_changed, &db->db_mtx);
2479
2480 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
2481
2482 if (db->db_state == DB_CACHED &&
424fd7c3 2483 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
440a3eb9
TC
2484 /*
2485 * In practice, we will never have a case where we have an
2486 * encrypted arc buffer while additional holds exist on the
2487 * dbuf. We don't handle this here so we simply assert that
2488 * fact instead.
2489 */
2490 ASSERT(!arc_is_encrypted(buf));
9babb374
BB
2491 mutex_exit(&db->db_mtx);
2492 (void) dbuf_dirty(db, tx);
2493 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
d3c2ae1c 2494 arc_buf_destroy(buf, db);
428870ff 2495 xuio_stat_wbuf_copied();
9babb374
BB
2496 return;
2497 }
2498
428870ff 2499 xuio_stat_wbuf_nocopy();
9babb374
BB
2500 if (db->db_state == DB_CACHED) {
2501 dbuf_dirty_record_t *dr = db->db_last_dirty;
2502
2503 ASSERT(db->db_buf != NULL);
2504 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2505 ASSERT(dr->dt.dl.dr_data == db->db_buf);
440a3eb9 2506
9babb374
BB
2507 if (!arc_released(db->db_buf)) {
2508 ASSERT(dr->dt.dl.dr_override_state ==
2509 DR_OVERRIDDEN);
2510 arc_release(db->db_buf, db);
2511 }
2512 dr->dt.dl.dr_data = buf;
d3c2ae1c 2513 arc_buf_destroy(db->db_buf, db);
9babb374
BB
2514 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2515 arc_release(db->db_buf, db);
d3c2ae1c 2516 arc_buf_destroy(db->db_buf, db);
9babb374
BB
2517 }
2518 db->db_buf = NULL;
2519 }
2520 ASSERT(db->db_buf == NULL);
2521 dbuf_set_data(db, buf);
2522 db->db_state = DB_FILL;
2523 mutex_exit(&db->db_mtx);
2524 (void) dbuf_dirty(db, tx);
b0bc7a84 2525 dmu_buf_fill_done(&db->db, tx);
9babb374
BB
2526}
2527
34dc7c2f 2528void
d3c2ae1c 2529dbuf_destroy(dmu_buf_impl_t *db)
34dc7c2f 2530{
572e2857 2531 dnode_t *dn;
34dc7c2f 2532 dmu_buf_impl_t *parent = db->db_parent;
572e2857 2533 dmu_buf_impl_t *dndb;
34dc7c2f
BB
2534
2535 ASSERT(MUTEX_HELD(&db->db_mtx));
424fd7c3 2536 ASSERT(zfs_refcount_is_zero(&db->db_holds));
34dc7c2f 2537
d3c2ae1c
GW
2538 if (db->db_buf != NULL) {
2539 arc_buf_destroy(db->db_buf, db);
2540 db->db_buf = NULL;
2541 }
34dc7c2f 2542
d3c2ae1c
GW
2543 if (db->db_blkid == DMU_BONUS_BLKID) {
2544 int slots = DB_DNODE(db)->dn_num_slots;
2545 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
b5256303
TC
2546 if (db->db.db_data != NULL) {
2547 kmem_free(db->db.db_data, bonuslen);
2548 arc_space_return(bonuslen, ARC_SPACE_BONUS);
2549 db->db_state = DB_UNCACHED;
2550 }
34dc7c2f
BB
2551 }
2552
d3c2ae1c
GW
2553 dbuf_clear_data(db);
2554
2555 if (multilist_link_active(&db->db_cache_link)) {
2e5dc449
MA
2556 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
2557 db->db_caching_status == DB_DBUF_METADATA_CACHE);
2558
2559 multilist_remove(dbuf_caches[db->db_caching_status].cache, db);
424fd7c3 2560 (void) zfs_refcount_remove_many(
2e5dc449 2561 &dbuf_caches[db->db_caching_status].size,
d3c2ae1c 2562 db->db.db_size, db);
2e5dc449
MA
2563
2564 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
2565 DBUF_STAT_BUMPDOWN(metadata_cache_count);
2566 } else {
2567 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
2568 DBUF_STAT_BUMPDOWN(cache_count);
2569 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
2570 db->db.db_size);
2571 }
2572 db->db_caching_status = DB_NO_CACHE;
d3c2ae1c
GW
2573 }
2574
b128c09f 2575 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
34dc7c2f
BB
2576 ASSERT(db->db_data_pending == NULL);
2577
2578 db->db_state = DB_EVICTING;
2579 db->db_blkptr = NULL;
2580
d3c2ae1c
GW
2581 /*
2582 * Now that db_state is DB_EVICTING, nobody else can find this via
2583 * the hash table. We can now drop db_mtx, which allows us to
2584 * acquire the dn_dbufs_mtx.
2585 */
2586 mutex_exit(&db->db_mtx);
2587
572e2857
BB
2588 DB_DNODE_ENTER(db);
2589 dn = DB_DNODE(db);
2590 dndb = dn->dn_dbuf;
d3c2ae1c
GW
2591 if (db->db_blkid != DMU_BONUS_BLKID) {
2592 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
2593 if (needlock)
2594 mutex_enter(&dn->dn_dbufs_mtx);
8951cb8d 2595 avl_remove(&dn->dn_dbufs, db);
73ad4a9f 2596 atomic_dec_32(&dn->dn_dbufs_count);
572e2857
BB
2597 membar_producer();
2598 DB_DNODE_EXIT(db);
d3c2ae1c
GW
2599 if (needlock)
2600 mutex_exit(&dn->dn_dbufs_mtx);
572e2857
BB
2601 /*
2602 * Decrementing the dbuf count means that the hold corresponding
2603 * to the removed dbuf is no longer discounted in dnode_move(),
2604 * so the dnode cannot be moved until after we release the hold.
2605 * The membar_producer() ensures visibility of the decremented
2606 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
2607 * release any lock.
2608 */
1fac63e5 2609 mutex_enter(&dn->dn_mtx);
3d503a76 2610 dnode_rele_and_unlock(dn, db, B_TRUE);
572e2857 2611 db->db_dnode_handle = NULL;
d3c2ae1c
GW
2612
2613 dbuf_hash_remove(db);
572e2857
BB
2614 } else {
2615 DB_DNODE_EXIT(db);
34dc7c2f
BB
2616 }
2617
424fd7c3 2618 ASSERT(zfs_refcount_is_zero(&db->db_holds));
34dc7c2f 2619
d3c2ae1c
GW
2620 db->db_parent = NULL;
2621
2622 ASSERT(db->db_buf == NULL);
2623 ASSERT(db->db.db_data == NULL);
2624 ASSERT(db->db_hash_next == NULL);
2625 ASSERT(db->db_blkptr == NULL);
2626 ASSERT(db->db_data_pending == NULL);
2e5dc449 2627 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
d3c2ae1c
GW
2628 ASSERT(!multilist_link_active(&db->db_cache_link));
2629
2630 kmem_cache_free(dbuf_kmem_cache, db);
2631 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
34dc7c2f
BB
2632
2633 /*
572e2857 2634 * If this dbuf is referenced from an indirect dbuf,
34dc7c2f
BB
2635 * decrement the ref count on the indirect dbuf.
2636 */
1fac63e5
MA
2637 if (parent && parent != dndb) {
2638 mutex_enter(&parent->db_mtx);
3d503a76 2639 dbuf_rele_and_unlock(parent, db, B_TRUE);
1fac63e5 2640 }
34dc7c2f
BB
2641}
2642
fcff0f35
PD
2643/*
2644 * Note: While bpp will always be updated if the function returns success,
2645 * parentp will not be updated if the dnode does not have dn_dbuf filled in;
9c5167d1 2646 * this happens when the dnode is the meta-dnode, or {user|group|project}used
fcff0f35
PD
2647 * object.
2648 */
bf701a83
BB
2649__attribute__((always_inline))
2650static inline int
34dc7c2f 2651dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
adb726eb 2652 dmu_buf_impl_t **parentp, blkptr_t **bpp)
34dc7c2f 2653{
34dc7c2f
BB
2654 *parentp = NULL;
2655 *bpp = NULL;
2656
428870ff
BB
2657 ASSERT(blkid != DMU_BONUS_BLKID);
2658
2659 if (blkid == DMU_SPILL_BLKID) {
2660 mutex_enter(&dn->dn_mtx);
2661 if (dn->dn_have_spill &&
2662 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
50c957f7 2663 *bpp = DN_SPILL_BLKPTR(dn->dn_phys);
428870ff
BB
2664 else
2665 *bpp = NULL;
2666 dbuf_add_ref(dn->dn_dbuf, NULL);
2667 *parentp = dn->dn_dbuf;
2668 mutex_exit(&dn->dn_mtx);
2669 return (0);
2670 }
34dc7c2f 2671
1c27024e 2672 int nlevels =
32d41fb7 2673 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
1c27024e 2674 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
34dc7c2f
BB
2675
2676 ASSERT3U(level * epbs, <, 64);
2677 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
32d41fb7
PD
2678 /*
2679 * This assertion shouldn't trip as long as the max indirect block size
2680 * is less than 1M. The reason for this is that up to that point,
2681 * the number of levels required to address an entire object with blocks
2682 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
2683 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
2684 * (i.e. we can address the entire object), objects will all use at most
2685 * N-1 levels and the assertion won't overflow. However, once epbs is
2686 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
2687 * enough to address an entire object, so objects will have 5 levels,
2688 * but then this assertion will overflow.
2689 *
2690 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
2691 * need to redo this logic to handle overflows.
2692 */
2693 ASSERT(level >= nlevels ||
2694 ((nlevels - level - 1) * epbs) +
2695 highbit64(dn->dn_phys->dn_nblkptr) <= 64);
34dc7c2f 2696 if (level >= nlevels ||
32d41fb7
PD
2697 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
2698 ((nlevels - level - 1) * epbs)) ||
2699 (fail_sparse &&
2700 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
34dc7c2f 2701 /* the buffer has no parent yet */
2e528b49 2702 return (SET_ERROR(ENOENT));
34dc7c2f
BB
2703 } else if (level < nlevels-1) {
2704 /* this block is referenced from an indirect block */
fc5bb51f 2705 int err;
adb726eb
MA
2706 dbuf_hold_arg_t *dh = dbuf_hold_arg_create(dn, level + 1,
2707 blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
2708 err = dbuf_hold_impl_arg(dh);
2709 dbuf_hold_arg_destroy(dh);
34dc7c2f
BB
2710 if (err)
2711 return (err);
2712 err = dbuf_read(*parentp, NULL,
2713 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2714 if (err) {
2715 dbuf_rele(*parentp, NULL);
2716 *parentp = NULL;
2717 return (err);
2718 }
2719 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
2720 (blkid & ((1ULL << epbs) - 1));
32d41fb7
PD
2721 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
2722 ASSERT(BP_IS_HOLE(*bpp));
34dc7c2f
BB
2723 return (0);
2724 } else {
2725 /* the block is referenced from the dnode */
2726 ASSERT3U(level, ==, nlevels-1);
2727 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
2728 blkid < dn->dn_phys->dn_nblkptr);
2729 if (dn->dn_dbuf) {
2730 dbuf_add_ref(dn->dn_dbuf, NULL);
2731 *parentp = dn->dn_dbuf;
2732 }
2733 *bpp = &dn->dn_phys->dn_blkptr[blkid];
2734 return (0);
2735 }
2736}
2737
2738static dmu_buf_impl_t *
2739dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
2740 dmu_buf_impl_t *parent, blkptr_t *blkptr)
2741{
428870ff 2742 objset_t *os = dn->dn_objset;
34dc7c2f
BB
2743 dmu_buf_impl_t *db, *odb;
2744
2745 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2746 ASSERT(dn->dn_type != DMU_OT_NONE);
2747
d3c2ae1c 2748 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
34dc7c2f
BB
2749
2750 db->db_objset = os;
2751 db->db.db_object = dn->dn_object;
2752 db->db_level = level;
2753 db->db_blkid = blkid;
2754 db->db_last_dirty = NULL;
2755 db->db_dirtycnt = 0;
572e2857 2756 db->db_dnode_handle = dn->dn_handle;
34dc7c2f
BB
2757 db->db_parent = parent;
2758 db->db_blkptr = blkptr;
2759
0c66c32d 2760 db->db_user = NULL;
bc4501f7
JG
2761 db->db_user_immediate_evict = FALSE;
2762 db->db_freed_in_flight = FALSE;
2763 db->db_pending_evict = FALSE;
34dc7c2f 2764
428870ff 2765 if (blkid == DMU_BONUS_BLKID) {
34dc7c2f 2766 ASSERT3P(parent, ==, dn->dn_dbuf);
50c957f7 2767 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
34dc7c2f
BB
2768 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
2769 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
428870ff 2770 db->db.db_offset = DMU_BONUS_BLKID;
34dc7c2f 2771 db->db_state = DB_UNCACHED;
2e5dc449 2772 db->db_caching_status = DB_NO_CACHE;
34dc7c2f 2773 /* the bonus dbuf is not placed in the hash table */
25458cbe 2774 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
34dc7c2f 2775 return (db);
428870ff
BB
2776 } else if (blkid == DMU_SPILL_BLKID) {
2777 db->db.db_size = (blkptr != NULL) ?
2778 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
2779 db->db.db_offset = 0;
34dc7c2f
BB
2780 } else {
2781 int blocksize =
e8b96c60 2782 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
34dc7c2f
BB
2783 db->db.db_size = blocksize;
2784 db->db.db_offset = db->db_blkid * blocksize;
2785 }
2786
2787 /*
2788 * Hold the dn_dbufs_mtx while we get the new dbuf
2789 * in the hash table *and* added to the dbufs list.
2790 * This prevents a possible deadlock with someone
2791 * trying to look up this dbuf before its added to the
2792 * dn_dbufs list.
2793 */
2794 mutex_enter(&dn->dn_dbufs_mtx);
2795 db->db_state = DB_EVICTING;
2796 if ((odb = dbuf_hash_insert(db)) != NULL) {
2797 /* someone else inserted it first */
d3c2ae1c 2798 kmem_cache_free(dbuf_kmem_cache, db);
34dc7c2f 2799 mutex_exit(&dn->dn_dbufs_mtx);
5e021f56 2800 DBUF_STAT_BUMP(hash_insert_race);
34dc7c2f
BB
2801 return (odb);
2802 }
8951cb8d 2803 avl_add(&dn->dn_dbufs, db);
9c9531cb 2804
34dc7c2f 2805 db->db_state = DB_UNCACHED;
2e5dc449 2806 db->db_caching_status = DB_NO_CACHE;
34dc7c2f 2807 mutex_exit(&dn->dn_dbufs_mtx);
25458cbe 2808 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
34dc7c2f
BB
2809
2810 if (parent && parent != dn->dn_dbuf)
2811 dbuf_add_ref(parent, db);
2812
2813 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
424fd7c3 2814 zfs_refcount_count(&dn->dn_holds) > 0);
c13060e4 2815 (void) zfs_refcount_add(&dn->dn_holds, db);
73ad4a9f 2816 atomic_inc_32(&dn->dn_dbufs_count);
34dc7c2f
BB
2817
2818 dprintf_dbuf(db, "db=%p\n", db);
2819
2820 return (db);
2821}
2822
fcff0f35
PD
2823typedef struct dbuf_prefetch_arg {
2824 spa_t *dpa_spa; /* The spa to issue the prefetch in. */
2825 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
2826 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
2827 int dpa_curlevel; /* The current level that we're reading */
d3c2ae1c 2828 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
fcff0f35
PD
2829 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
2830 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
2831 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
2832} dbuf_prefetch_arg_t;
2833
2834/*
2835 * Actually issue the prefetch read for the block given.
2836 */
2837static void
2838dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
2839{
fcff0f35
PD
2840 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
2841 return;
2842
4515b1d0 2843 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
1c27024e
DB
2844 arc_flags_t aflags =
2845 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
fcff0f35 2846
4515b1d0
TC
2847 /* dnodes are always read as raw and then converted later */
2848 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
2849 dpa->dpa_curlevel == 0)
2850 zio_flags |= ZIO_FLAG_RAW;
2851
fcff0f35
PD
2852 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
2853 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
2854 ASSERT(dpa->dpa_zio != NULL);
2855 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL,
4515b1d0 2856 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
fcff0f35
PD
2857}
2858
2859/*
2860 * Called when an indirect block above our prefetch target is read in. This
2861 * will either read in the next indirect block down the tree or issue the actual
2862 * prefetch if the next block down is our target.
2863 */
2864static void
d4a72f23
TC
2865dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
2866 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
fcff0f35
PD
2867{
2868 dbuf_prefetch_arg_t *dpa = private;
fcff0f35
PD
2869
2870 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
2871 ASSERT3S(dpa->dpa_curlevel, >, 0);
d3c2ae1c 2872
c3bd3fb4
TC
2873 if (abuf == NULL) {
2874 ASSERT(zio == NULL || zio->io_error != 0);
2875 kmem_free(dpa, sizeof (*dpa));
2876 return;
2877 }
2878 ASSERT(zio == NULL || zio->io_error == 0);
2879
d3c2ae1c
GW
2880 /*
2881 * The dpa_dnode is only valid if we are called with a NULL
2882 * zio. This indicates that the arc_read() returned without
2883 * first calling zio_read() to issue a physical read. Once
2884 * a physical read is made the dpa_dnode must be invalidated
2885 * as the locks guarding it may have been dropped. If the
2886 * dpa_dnode is still valid, then we want to add it to the dbuf
2887 * cache. To do so, we must hold the dbuf associated with the block
2888 * we just prefetched, read its contents so that we associate it
2889 * with an arc_buf_t, and then release it.
2890 */
fcff0f35
PD
2891 if (zio != NULL) {
2892 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
b5256303 2893 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
d3c2ae1c
GW
2894 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
2895 } else {
2896 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
2897 }
fcff0f35 2898 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
d3c2ae1c
GW
2899
2900 dpa->dpa_dnode = NULL;
2901 } else if (dpa->dpa_dnode != NULL) {
2902 uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
2903 (dpa->dpa_epbs * (dpa->dpa_curlevel -
2904 dpa->dpa_zb.zb_level));
2905 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
2906 dpa->dpa_curlevel, curblkid, FTAG);
305781da
TC
2907 if (db == NULL) {
2908 kmem_free(dpa, sizeof (*dpa));
2909 arc_buf_destroy(abuf, private);
2910 return;
2911 }
2912
d3c2ae1c
GW
2913 (void) dbuf_read(db, NULL,
2914 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
2915 dbuf_rele(db, FTAG);
fcff0f35
PD
2916 }
2917
d4a72f23 2918 dpa->dpa_curlevel--;
1c27024e 2919 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
fcff0f35 2920 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
1c27024e 2921 blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
fcff0f35 2922 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
d4a72f23
TC
2923
2924 if (BP_IS_HOLE(bp)) {
fcff0f35
PD
2925 kmem_free(dpa, sizeof (*dpa));
2926 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
2927 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
2928 dbuf_issue_final_prefetch(dpa, bp);
2929 kmem_free(dpa, sizeof (*dpa));
2930 } else {
2931 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
2932 zbookmark_phys_t zb;
2933
7c351e31 2934 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
2935 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
2936 iter_aflags |= ARC_FLAG_L2CACHE;
2937
fcff0f35
PD
2938 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
2939
2940 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
2941 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
2942
2943 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
2944 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio,
2945 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2946 &iter_aflags, &zb);
2947 }
d3c2ae1c
GW
2948
2949 arc_buf_destroy(abuf, private);
fcff0f35
PD
2950}
2951
2952/*
2953 * Issue prefetch reads for the given block on the given level. If the indirect
2954 * blocks above that block are not in memory, we will read them in
2955 * asynchronously. As a result, this call never blocks waiting for a read to
b5256303
TC
2956 * complete. Note that the prefetch might fail if the dataset is encrypted and
2957 * the encryption key is unmapped before the IO completes.
fcff0f35 2958 */
34dc7c2f 2959void
fcff0f35
PD
2960dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
2961 arc_flags_t aflags)
34dc7c2f 2962{
fcff0f35
PD
2963 blkptr_t bp;
2964 int epbs, nlevels, curlevel;
2965 uint64_t curblkid;
34dc7c2f 2966
428870ff 2967 ASSERT(blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
2968 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2969
7f60329a
MA
2970 if (blkid > dn->dn_maxblkid)
2971 return;
2972
34dc7c2f
BB
2973 if (dnode_block_freed(dn, blkid))
2974 return;
2975
fcff0f35
PD
2976 /*
2977 * This dnode hasn't been written to disk yet, so there's nothing to
2978 * prefetch.
2979 */
2980 nlevels = dn->dn_phys->dn_nlevels;
2981 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
2982 return;
2983
2984 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2985 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
2986 return;
2987
1c27024e 2988 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
fcff0f35
PD
2989 level, blkid);
2990 if (db != NULL) {
2991 mutex_exit(&db->db_mtx);
572e2857 2992 /*
fcff0f35
PD
2993 * This dbuf already exists. It is either CACHED, or
2994 * (we assume) about to be read or filled.
572e2857 2995 */
572e2857 2996 return;
34dc7c2f
BB
2997 }
2998
fcff0f35
PD
2999 /*
3000 * Find the closest ancestor (indirect block) of the target block
3001 * that is present in the cache. In this indirect block, we will
3002 * find the bp that is at curlevel, curblkid.
3003 */
3004 curlevel = level;
3005 curblkid = blkid;
3006 while (curlevel < nlevels - 1) {
3007 int parent_level = curlevel + 1;
3008 uint64_t parent_blkid = curblkid >> epbs;
3009 dmu_buf_impl_t *db;
3010
3011 if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3012 FALSE, TRUE, FTAG, &db) == 0) {
3013 blkptr_t *bpp = db->db_buf->b_data;
3014 bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3015 dbuf_rele(db, FTAG);
3016 break;
3017 }
428870ff 3018
fcff0f35
PD
3019 curlevel = parent_level;
3020 curblkid = parent_blkid;
3021 }
34dc7c2f 3022
fcff0f35
PD
3023 if (curlevel == nlevels - 1) {
3024 /* No cached indirect blocks found. */
3025 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3026 bp = dn->dn_phys->dn_blkptr[curblkid];
34dc7c2f 3027 }
fcff0f35
PD
3028 if (BP_IS_HOLE(&bp))
3029 return;
3030
3031 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3032
1c27024e 3033 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
fcff0f35
PD
3034 ZIO_FLAG_CANFAIL);
3035
1c27024e
DB
3036 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3037 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
fcff0f35
PD
3038 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3039 dn->dn_object, level, blkid);
3040 dpa->dpa_curlevel = curlevel;
3041 dpa->dpa_prio = prio;
3042 dpa->dpa_aflags = aflags;
3043 dpa->dpa_spa = dn->dn_objset->os_spa;
d3c2ae1c 3044 dpa->dpa_dnode = dn;
fcff0f35
PD
3045 dpa->dpa_epbs = epbs;
3046 dpa->dpa_zio = pio;
3047
7c351e31 3048 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3049 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
3050 dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3051
fcff0f35
PD
3052 /*
3053 * If we have the indirect just above us, no need to do the asynchronous
3054 * prefetch chain; we'll just run the last step ourselves. If we're at
3055 * a higher level, though, we want to issue the prefetches for all the
3056 * indirect blocks asynchronously, so we can go on with whatever we were
3057 * doing.
3058 */
3059 if (curlevel == level) {
3060 ASSERT3U(curblkid, ==, blkid);
3061 dbuf_issue_final_prefetch(dpa, &bp);
3062 kmem_free(dpa, sizeof (*dpa));
3063 } else {
3064 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3065 zbookmark_phys_t zb;
3066
7c351e31 3067 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3068 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
3069 iter_aflags |= ARC_FLAG_L2CACHE;
3070
fcff0f35
PD
3071 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3072 dn->dn_object, curlevel, curblkid);
3073 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3074 &bp, dbuf_prefetch_indirect_done, dpa, prio,
3075 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3076 &iter_aflags, &zb);
3077 }
3078 /*
3079 * We use pio here instead of dpa_zio since it's possible that
3080 * dpa may have already been freed.
3081 */
3082 zio_nowait(pio);
34dc7c2f
BB
3083}
3084
d1d7e268 3085#define DBUF_HOLD_IMPL_MAX_DEPTH 20
fc5bb51f 3086
71a24c3c 3087/*
adb726eb 3088 * Helper function for dbuf_hold_impl_arg() to copy a buffer. Handles
71a24c3c
TC
3089 * the case of encrypted, compressed and uncompressed buffers by
3090 * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3091 * arc_alloc_compressed_buf() or arc_alloc_buf().*
3092 *
adb726eb 3093 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl_arg().
71a24c3c
TC
3094 */
3095noinline static void
adb726eb 3096dbuf_hold_copy(struct dbuf_hold_arg *dh)
71a24c3c
TC
3097{
3098 dnode_t *dn = dh->dh_dn;
3099 dmu_buf_impl_t *db = dh->dh_db;
3100 dbuf_dirty_record_t *dr = dh->dh_dr;
3101 arc_buf_t *data = dr->dt.dl.dr_data;
3102
3103 enum zio_compress compress_type = arc_get_compression(data);
3104
3105 if (arc_is_encrypted(data)) {
3106 boolean_t byteorder;
3107 uint8_t salt[ZIO_DATA_SALT_LEN];
3108 uint8_t iv[ZIO_DATA_IV_LEN];
3109 uint8_t mac[ZIO_DATA_MAC_LEN];
3110
3111 arc_get_raw_params(data, &byteorder, salt, iv, mac);
3112 dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3113 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3114 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3115 compress_type));
3116 } else if (compress_type != ZIO_COMPRESS_OFF) {
3117 dbuf_set_data(db, arc_alloc_compressed_buf(
3118 dn->dn_objset->os_spa, db, arc_buf_size(data),
3119 arc_buf_lsize(data), compress_type));
3120 } else {
3121 dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3122 DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3123 }
3124
3125 bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
3126}
3127
34dc7c2f
BB
3128/*
3129 * Returns with db_holds incremented, and db_mtx not held.
3130 * Note: dn_struct_rwlock must be held.
3131 */
fc5bb51f 3132static int
adb726eb 3133dbuf_hold_impl_arg(struct dbuf_hold_arg *dh)
34dc7c2f 3134{
fc5bb51f 3135 dh->dh_parent = NULL;
34dc7c2f 3136
fc5bb51f
BB
3137 ASSERT(dh->dh_blkid != DMU_BONUS_BLKID);
3138 ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock));
3139 ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level);
34dc7c2f 3140
fc5bb51f 3141 *(dh->dh_dbp) = NULL;
d3c2ae1c 3142
34dc7c2f 3143 /* dbuf_find() returns with db_mtx held */
6ebebace
JG
3144 dh->dh_db = dbuf_find(dh->dh_dn->dn_objset, dh->dh_dn->dn_object,
3145 dh->dh_level, dh->dh_blkid);
fc5bb51f
BB
3146
3147 if (dh->dh_db == NULL) {
3148 dh->dh_bp = NULL;
3149
fcff0f35
PD
3150 if (dh->dh_fail_uncached)
3151 return (SET_ERROR(ENOENT));
3152
fc5bb51f
BB
3153 ASSERT3P(dh->dh_parent, ==, NULL);
3154 dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
adb726eb 3155 dh->dh_fail_sparse, &dh->dh_parent, &dh->dh_bp);
fc5bb51f 3156 if (dh->dh_fail_sparse) {
d1d7e268
MK
3157 if (dh->dh_err == 0 &&
3158 dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
2e528b49 3159 dh->dh_err = SET_ERROR(ENOENT);
fc5bb51f
BB
3160 if (dh->dh_err) {
3161 if (dh->dh_parent)
3162 dbuf_rele(dh->dh_parent, NULL);
3163 return (dh->dh_err);
34dc7c2f
BB
3164 }
3165 }
fc5bb51f
BB
3166 if (dh->dh_err && dh->dh_err != ENOENT)
3167 return (dh->dh_err);
3168 dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid,
02730c33 3169 dh->dh_parent, dh->dh_bp);
34dc7c2f
BB
3170 }
3171
fcff0f35
PD
3172 if (dh->dh_fail_uncached && dh->dh_db->db_state != DB_CACHED) {
3173 mutex_exit(&dh->dh_db->db_mtx);
3174 return (SET_ERROR(ENOENT));
3175 }
3176
0873bb63
BB
3177 if (dh->dh_db->db_buf != NULL) {
3178 arc_buf_access(dh->dh_db->db_buf);
fc5bb51f 3179 ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data);
0873bb63 3180 }
34dc7c2f 3181
fc5bb51f 3182 ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf));
34dc7c2f
BB
3183
3184 /*
3185 * If this buffer is currently syncing out, and we are are
3186 * still referencing it from db_data, we need to make a copy
3187 * of it in case we decide we want to dirty it again in this txg.
3188 */
fc5bb51f
BB
3189 if (dh->dh_db->db_level == 0 &&
3190 dh->dh_db->db_blkid != DMU_BONUS_BLKID &&
3191 dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT &&
3192 dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) {
3193 dh->dh_dr = dh->dh_db->db_data_pending;
71a24c3c
TC
3194 if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf)
3195 dbuf_hold_copy(dh);
34dc7c2f
BB
3196 }
3197
d3c2ae1c 3198 if (multilist_link_active(&dh->dh_db->db_cache_link)) {
424fd7c3 3199 ASSERT(zfs_refcount_is_zero(&dh->dh_db->db_holds));
2e5dc449
MA
3200 ASSERT(dh->dh_db->db_caching_status == DB_DBUF_CACHE ||
3201 dh->dh_db->db_caching_status == DB_DBUF_METADATA_CACHE);
3202
3203 multilist_remove(
3204 dbuf_caches[dh->dh_db->db_caching_status].cache,
3205 dh->dh_db);
424fd7c3 3206 (void) zfs_refcount_remove_many(
2e5dc449 3207 &dbuf_caches[dh->dh_db->db_caching_status].size,
d3c2ae1c 3208 dh->dh_db->db.db_size, dh->dh_db);
2e5dc449
MA
3209
3210 if (dh->dh_db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3211 DBUF_STAT_BUMPDOWN(metadata_cache_count);
3212 } else {
3213 DBUF_STAT_BUMPDOWN(cache_levels[dh->dh_db->db_level]);
3214 DBUF_STAT_BUMPDOWN(cache_count);
3215 DBUF_STAT_DECR(cache_levels_bytes[dh->dh_db->db_level],
3216 dh->dh_db->db.db_size);
3217 }
3218 dh->dh_db->db_caching_status = DB_NO_CACHE;
d3c2ae1c 3219 }
c13060e4 3220 (void) zfs_refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
fc5bb51f
BB
3221 DBUF_VERIFY(dh->dh_db);
3222 mutex_exit(&dh->dh_db->db_mtx);
34dc7c2f
BB
3223
3224 /* NOTE: we can't rele the parent until after we drop the db_mtx */
fc5bb51f
BB
3225 if (dh->dh_parent)
3226 dbuf_rele(dh->dh_parent, NULL);
34dc7c2f 3227
fc5bb51f
BB
3228 ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn);
3229 ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid);
3230 ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level);
3231 *(dh->dh_dbp) = dh->dh_db;
34dc7c2f
BB
3232
3233 return (0);
3234}
3235
fc5bb51f 3236/*
adb726eb
MA
3237 * dbuf_hold_impl_arg() is called recursively, via dbuf_findbp(). There can
3238 * be as many recursive calls as there are levels of on-disk indirect blocks,
3239 * but typically only 0-2 recursive calls. To minimize the stack frame size,
3240 * the recursive function's arguments and "local variables" are allocated on
3241 * the heap as the dbuf_hold_arg_t.
fc5bb51f
BB
3242 */
3243int
fcff0f35
PD
3244dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3245 boolean_t fail_sparse, boolean_t fail_uncached,
fc5bb51f
BB
3246 void *tag, dmu_buf_impl_t **dbp)
3247{
adb726eb
MA
3248 dbuf_hold_arg_t *dh = dbuf_hold_arg_create(dn, level, blkid,
3249 fail_sparse, fail_uncached, tag, dbp);
fc5bb51f 3250
adb726eb 3251 int error = dbuf_hold_impl_arg(dh);
fc5bb51f 3252
adb726eb 3253 dbuf_hold_arg_destroy(dh);
fc5bb51f
BB
3254
3255 return (error);
3256}
3257
adb726eb
MA
3258static dbuf_hold_arg_t *
3259dbuf_hold_arg_create(dnode_t *dn, uint8_t level, uint64_t blkid,
4ea3f864 3260 boolean_t fail_sparse, boolean_t fail_uncached,
adb726eb 3261 void *tag, dmu_buf_impl_t **dbp)
fc5bb51f 3262{
adb726eb 3263 dbuf_hold_arg_t *dh = kmem_alloc(sizeof (*dh), KM_SLEEP);
fc5bb51f
BB
3264 dh->dh_dn = dn;
3265 dh->dh_level = level;
3266 dh->dh_blkid = blkid;
fcff0f35 3267
fc5bb51f 3268 dh->dh_fail_sparse = fail_sparse;
fcff0f35
PD
3269 dh->dh_fail_uncached = fail_uncached;
3270
fc5bb51f
BB
3271 dh->dh_tag = tag;
3272 dh->dh_dbp = dbp;
d9eea113
MA
3273
3274 dh->dh_db = NULL;
3275 dh->dh_parent = NULL;
3276 dh->dh_bp = NULL;
3277 dh->dh_err = 0;
3278 dh->dh_dr = NULL;
d9eea113 3279
adb726eb
MA
3280 return (dh);
3281}
3282
3283static void
3284dbuf_hold_arg_destroy(dbuf_hold_arg_t *dh)
3285{
3286 kmem_free(dh, sizeof (*dh));
fc5bb51f
BB
3287}
3288
34dc7c2f
BB
3289dmu_buf_impl_t *
3290dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
3291{
fcff0f35 3292 return (dbuf_hold_level(dn, 0, blkid, tag));
34dc7c2f
BB
3293}
3294
3295dmu_buf_impl_t *
3296dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
3297{
3298 dmu_buf_impl_t *db;
fcff0f35 3299 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
34dc7c2f
BB
3300 return (err ? NULL : db);
3301}
3302
3303void
3304dbuf_create_bonus(dnode_t *dn)
3305{
3306 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3307
3308 ASSERT(dn->dn_bonus == NULL);
428870ff
BB
3309 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
3310}
3311
3312int
3313dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3314{
3315 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
572e2857
BB
3316 dnode_t *dn;
3317
428870ff 3318 if (db->db_blkid != DMU_SPILL_BLKID)
2e528b49 3319 return (SET_ERROR(ENOTSUP));
428870ff
BB
3320 if (blksz == 0)
3321 blksz = SPA_MINBLOCKSIZE;
f1512ee6
MA
3322 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3323 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
428870ff 3324
572e2857
BB
3325 DB_DNODE_ENTER(db);
3326 dn = DB_DNODE(db);
3327 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
428870ff 3328 dbuf_new_size(db, blksz, tx);
572e2857
BB
3329 rw_exit(&dn->dn_struct_rwlock);
3330 DB_DNODE_EXIT(db);
428870ff
BB
3331
3332 return (0);
3333}
3334
3335void
3336dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3337{
3338 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
34dc7c2f
BB
3339}
3340
3341#pragma weak dmu_buf_add_ref = dbuf_add_ref
3342void
3343dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
3344{
c13060e4 3345 int64_t holds = zfs_refcount_add(&db->db_holds, tag);
d3c2ae1c 3346 VERIFY3S(holds, >, 1);
34dc7c2f
BB
3347}
3348
6ebebace
JG
3349#pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3350boolean_t
3351dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3352 void *tag)
3353{
3354 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3355 dmu_buf_impl_t *found_db;
3356 boolean_t result = B_FALSE;
3357
d617648c 3358 if (blkid == DMU_BONUS_BLKID)
6ebebace
JG
3359 found_db = dbuf_find_bonus(os, obj);
3360 else
3361 found_db = dbuf_find(os, obj, 0, blkid);
3362
3363 if (found_db != NULL) {
3364 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
c13060e4 3365 (void) zfs_refcount_add(&db->db_holds, tag);
6ebebace
JG
3366 result = B_TRUE;
3367 }
d617648c 3368 mutex_exit(&found_db->db_mtx);
6ebebace
JG
3369 }
3370 return (result);
3371}
3372
572e2857
BB
3373/*
3374 * If you call dbuf_rele() you had better not be referencing the dnode handle
3375 * unless you have some other direct or indirect hold on the dnode. (An indirect
3376 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3377 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3378 * dnode's parent dbuf evicting its dnode handles.
3379 */
34dc7c2f
BB
3380void
3381dbuf_rele(dmu_buf_impl_t *db, void *tag)
428870ff
BB
3382{
3383 mutex_enter(&db->db_mtx);
3d503a76 3384 dbuf_rele_and_unlock(db, tag, B_FALSE);
428870ff
BB
3385}
3386
b0bc7a84
MG
3387void
3388dmu_buf_rele(dmu_buf_t *db, void *tag)
3389{
3390 dbuf_rele((dmu_buf_impl_t *)db, tag);
3391}
3392
428870ff
BB
3393/*
3394 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
1fac63e5
MA
3395 * db_dirtycnt and db_holds to be updated atomically. The 'evicting'
3396 * argument should be set if we are already in the dbuf-evicting code
3397 * path, in which case we don't want to recursively evict. This allows us to
3398 * avoid deeply nested stacks that would have a call flow similar to this:
3399 *
3400 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3401 * ^ |
3402 * | |
3403 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+
3404 *
428870ff
BB
3405 */
3406void
3d503a76 3407dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
34dc7c2f
BB
3408{
3409 int64_t holds;
3410
428870ff 3411 ASSERT(MUTEX_HELD(&db->db_mtx));
34dc7c2f
BB
3412 DBUF_VERIFY(db);
3413
572e2857
BB
3414 /*
3415 * Remove the reference to the dbuf before removing its hold on the
3416 * dnode so we can guarantee in dnode_move() that a referenced bonus
3417 * buffer has a corresponding dnode hold.
3418 */
424fd7c3 3419 holds = zfs_refcount_remove(&db->db_holds, tag);
34dc7c2f
BB
3420 ASSERT(holds >= 0);
3421
3422 /*
3423 * We can't freeze indirects if there is a possibility that they
3424 * may be modified in the current syncing context.
3425 */
d3c2ae1c
GW
3426 if (db->db_buf != NULL &&
3427 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
34dc7c2f 3428 arc_buf_freeze(db->db_buf);
d3c2ae1c 3429 }
34dc7c2f
BB
3430
3431 if (holds == db->db_dirtycnt &&
bc4501f7 3432 db->db_level == 0 && db->db_user_immediate_evict)
34dc7c2f
BB
3433 dbuf_evict_user(db);
3434
3435 if (holds == 0) {
428870ff 3436 if (db->db_blkid == DMU_BONUS_BLKID) {
4c7b7eed 3437 dnode_t *dn;
bc4501f7 3438 boolean_t evict_dbuf = db->db_pending_evict;
572e2857
BB
3439
3440 /*
4c7b7eed
JG
3441 * If the dnode moves here, we cannot cross this
3442 * barrier until the move completes.
572e2857
BB
3443 */
3444 DB_DNODE_ENTER(db);
4c7b7eed
JG
3445
3446 dn = DB_DNODE(db);
3447 atomic_dec_32(&dn->dn_dbufs_count);
3448
3449 /*
3450 * Decrementing the dbuf count means that the bonus
3451 * buffer's dnode hold is no longer discounted in
3452 * dnode_move(). The dnode cannot move until after
bc4501f7 3453 * the dnode_rele() below.
4c7b7eed 3454 */
572e2857 3455 DB_DNODE_EXIT(db);
4c7b7eed
JG
3456
3457 /*
3458 * Do not reference db after its lock is dropped.
3459 * Another thread may evict it.
3460 */
3461 mutex_exit(&db->db_mtx);
3462
bc4501f7 3463 if (evict_dbuf)
4c7b7eed 3464 dnode_evict_bonus(dn);
bc4501f7
JG
3465
3466 dnode_rele(dn, db);
34dc7c2f
BB
3467 } else if (db->db_buf == NULL) {
3468 /*
3469 * This is a special case: we never associated this
3470 * dbuf with any data allocated from the ARC.
3471 */
b128c09f
BB
3472 ASSERT(db->db_state == DB_UNCACHED ||
3473 db->db_state == DB_NOFILL);
d3c2ae1c 3474 dbuf_destroy(db);
34dc7c2f 3475 } else if (arc_released(db->db_buf)) {
34dc7c2f
BB
3476 /*
3477 * This dbuf has anonymous data associated with it.
3478 */
d3c2ae1c 3479 dbuf_destroy(db);
34dc7c2f 3480 } else {
d3c2ae1c
GW
3481 boolean_t do_arc_evict = B_FALSE;
3482 blkptr_t bp;
3483 spa_t *spa = dmu_objset_spa(db->db_objset);
3484
3485 if (!DBUF_IS_CACHEABLE(db) &&
3486 db->db_blkptr != NULL &&
3487 !BP_IS_HOLE(db->db_blkptr) &&
3488 !BP_IS_EMBEDDED(db->db_blkptr)) {
3489 do_arc_evict = B_TRUE;
3490 bp = *db->db_blkptr;
3491 }
1eb5bfa3 3492
d3c2ae1c
GW
3493 if (!DBUF_IS_CACHEABLE(db) ||
3494 db->db_pending_evict) {
3495 dbuf_destroy(db);
3496 } else if (!multilist_link_active(&db->db_cache_link)) {
2e5dc449
MA
3497 ASSERT3U(db->db_caching_status, ==,
3498 DB_NO_CACHE);
3499
3500 dbuf_cached_state_t dcs =
3501 dbuf_include_in_metadata_cache(db) ?
3502 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
3503 db->db_caching_status = dcs;
3504
3505 multilist_insert(dbuf_caches[dcs].cache, db);
424fd7c3
TS
3506 (void) zfs_refcount_add_many(
3507 &dbuf_caches[dcs].size,
d3c2ae1c 3508 db->db.db_size, db);
2e5dc449
MA
3509
3510 if (dcs == DB_DBUF_METADATA_CACHE) {
3511 DBUF_STAT_BUMP(metadata_cache_count);
3512 DBUF_STAT_MAX(
3513 metadata_cache_size_bytes_max,
424fd7c3 3514 zfs_refcount_count(
2e5dc449
MA
3515 &dbuf_caches[dcs].size));
3516 } else {
3517 DBUF_STAT_BUMP(
3518 cache_levels[db->db_level]);
3519 DBUF_STAT_BUMP(cache_count);
3520 DBUF_STAT_INCR(
3521 cache_levels_bytes[db->db_level],
3522 db->db.db_size);
3523 DBUF_STAT_MAX(cache_size_bytes_max,
424fd7c3 3524 zfs_refcount_count(
2e5dc449
MA
3525 &dbuf_caches[dcs].size));
3526 }
b128c09f 3527 mutex_exit(&db->db_mtx);
d3c2ae1c 3528
3d503a76
GW
3529 if (db->db_caching_status == DB_DBUF_CACHE &&
3530 !evicting) {
1fac63e5 3531 dbuf_evict_notify();
2e5dc449 3532 }
bd089c54 3533 }
d3c2ae1c
GW
3534
3535 if (do_arc_evict)
3536 arc_freed(spa, &bp);
34dc7c2f
BB
3537 }
3538 } else {
3539 mutex_exit(&db->db_mtx);
3540 }
d3c2ae1c 3541
34dc7c2f
BB
3542}
3543
3544#pragma weak dmu_buf_refcount = dbuf_refcount
3545uint64_t
3546dbuf_refcount(dmu_buf_impl_t *db)
3547{
424fd7c3 3548 return (zfs_refcount_count(&db->db_holds));
34dc7c2f
BB
3549}
3550
cd32e5db
TC
3551uint64_t
3552dmu_buf_user_refcount(dmu_buf_t *db_fake)
3553{
3554 uint64_t holds;
3555 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3556
3557 mutex_enter(&db->db_mtx);
424fd7c3
TS
3558 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
3559 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
cd32e5db
TC
3560 mutex_exit(&db->db_mtx);
3561
3562 return (holds);
3563}
3564
34dc7c2f 3565void *
0c66c32d
JG
3566dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
3567 dmu_buf_user_t *new_user)
34dc7c2f 3568{
0c66c32d
JG
3569 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3570
3571 mutex_enter(&db->db_mtx);
3572 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3573 if (db->db_user == old_user)
3574 db->db_user = new_user;
3575 else
3576 old_user = db->db_user;
3577 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3578 mutex_exit(&db->db_mtx);
3579
3580 return (old_user);
34dc7c2f
BB
3581}
3582
3583void *
0c66c32d 3584dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
34dc7c2f 3585{
0c66c32d 3586 return (dmu_buf_replace_user(db_fake, NULL, user));
34dc7c2f
BB
3587}
3588
3589void *
0c66c32d 3590dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
34dc7c2f
BB
3591{
3592 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
34dc7c2f 3593
bc4501f7 3594 db->db_user_immediate_evict = TRUE;
0c66c32d
JG
3595 return (dmu_buf_set_user(db_fake, user));
3596}
34dc7c2f 3597
0c66c32d
JG
3598void *
3599dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3600{
3601 return (dmu_buf_replace_user(db_fake, user, NULL));
34dc7c2f
BB
3602}
3603
3604void *
3605dmu_buf_get_user(dmu_buf_t *db_fake)
3606{
3607 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
34dc7c2f 3608
0c66c32d
JG
3609 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3610 return (db->db_user);
3611}
3612
3613void
3614dmu_buf_user_evict_wait()
3615{
3616 taskq_wait(dbu_evict_taskq);
34dc7c2f
BB
3617}
3618
03c6040b
GW
3619blkptr_t *
3620dmu_buf_get_blkptr(dmu_buf_t *db)
3621{
3622 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3623 return (dbi->db_blkptr);
3624}
3625
8bea9815
MA
3626objset_t *
3627dmu_buf_get_objset(dmu_buf_t *db)
3628{
3629 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3630 return (dbi->db_objset);
3631}
3632
2bce8049
MA
3633dnode_t *
3634dmu_buf_dnode_enter(dmu_buf_t *db)
3635{
3636 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3637 DB_DNODE_ENTER(dbi);
3638 return (DB_DNODE(dbi));
3639}
3640
3641void
3642dmu_buf_dnode_exit(dmu_buf_t *db)
3643{
3644 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3645 DB_DNODE_EXIT(dbi);
3646}
3647
34dc7c2f
BB
3648static void
3649dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
3650{
3651 /* ASSERT(dmu_tx_is_syncing(tx) */
3652 ASSERT(MUTEX_HELD(&db->db_mtx));
3653
3654 if (db->db_blkptr != NULL)
3655 return;
3656
428870ff 3657 if (db->db_blkid == DMU_SPILL_BLKID) {
50c957f7 3658 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
428870ff
BB
3659 BP_ZERO(db->db_blkptr);
3660 return;
3661 }
34dc7c2f
BB
3662 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
3663 /*
3664 * This buffer was allocated at a time when there was
3665 * no available blkptrs from the dnode, or it was
3666 * inappropriate to hook it in (i.e., nlevels mis-match).
3667 */
3668 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
3669 ASSERT(db->db_parent == NULL);
3670 db->db_parent = dn->dn_dbuf;
3671 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
3672 DBUF_VERIFY(db);
3673 } else {
3674 dmu_buf_impl_t *parent = db->db_parent;
3675 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3676
3677 ASSERT(dn->dn_phys->dn_nlevels > 1);
3678 if (parent == NULL) {
3679 mutex_exit(&db->db_mtx);
3680 rw_enter(&dn->dn_struct_rwlock, RW_READER);
fcff0f35
PD
3681 parent = dbuf_hold_level(dn, db->db_level + 1,
3682 db->db_blkid >> epbs, db);
34dc7c2f
BB
3683 rw_exit(&dn->dn_struct_rwlock);
3684 mutex_enter(&db->db_mtx);
3685 db->db_parent = parent;
3686 }
3687 db->db_blkptr = (blkptr_t *)parent->db.db_data +
3688 (db->db_blkid & ((1ULL << epbs) - 1));
3689 DBUF_VERIFY(db);
3690 }
3691}
3692
b5256303 3693/*
0c03d21a
MA
3694 * When syncing out a blocks of dnodes, adjust the block to deal with
3695 * encryption. Normally, we make sure the block is decrypted before writing
3696 * it. If we have crypt params, then we are writing a raw (encrypted) block,
3697 * from a raw receive. In this case, set the ARC buf's crypt params so
3698 * that the BP will be filled with the correct byteorder, salt, iv, and mac.
b5256303
TC
3699 */
3700static void
0c03d21a 3701dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
b5256303
TC
3702{
3703 int err;
3704 dmu_buf_impl_t *db = dr->dr_dbuf;
3705
3706 ASSERT(MUTEX_HELD(&db->db_mtx));
0c03d21a
MA
3707 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
3708 ASSERT3U(db->db_level, ==, 0);
b5256303 3709
0c03d21a 3710 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
a2c2ed1b
TC
3711 zbookmark_phys_t zb;
3712
b5256303
TC
3713 /*
3714 * Unfortunately, there is currently no mechanism for
3715 * syncing context to handle decryption errors. An error
3716 * here is only possible if an attacker maliciously
3717 * changed a dnode block and updated the associated
3718 * checksums going up the block tree.
3719 */
a2c2ed1b
TC
3720 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
3721 db->db.db_object, db->db_level, db->db_blkid);
b5256303 3722 err = arc_untransform(db->db_buf, db->db_objset->os_spa,
a2c2ed1b 3723 &zb, B_TRUE);
b5256303
TC
3724 if (err)
3725 panic("Invalid dnode block MAC");
0c03d21a
MA
3726 } else if (dr->dt.dl.dr_has_raw_params) {
3727 (void) arc_release(dr->dt.dl.dr_data, db);
3728 arc_convert_to_raw(dr->dt.dl.dr_data,
3729 dmu_objset_id(db->db_objset),
3730 dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
3731 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
b5256303
TC
3732 }
3733}
3734
d1d7e268
MK
3735/*
3736 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
60948de1
BB
3737 * is critical the we not allow the compiler to inline this function in to
3738 * dbuf_sync_list() thereby drastically bloating the stack usage.
3739 */
3740noinline static void
34dc7c2f
BB
3741dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
3742{
3743 dmu_buf_impl_t *db = dr->dr_dbuf;
572e2857 3744 dnode_t *dn;
34dc7c2f
BB
3745 zio_t *zio;
3746
3747 ASSERT(dmu_tx_is_syncing(tx));
3748
3749 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
3750
3751 mutex_enter(&db->db_mtx);
3752
3753 ASSERT(db->db_level > 0);
3754 DBUF_VERIFY(db);
3755
e49f1e20 3756 /* Read the block if it hasn't been read yet. */
34dc7c2f
BB
3757 if (db->db_buf == NULL) {
3758 mutex_exit(&db->db_mtx);
3759 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
3760 mutex_enter(&db->db_mtx);
3761 }
3762 ASSERT3U(db->db_state, ==, DB_CACHED);
34dc7c2f
BB
3763 ASSERT(db->db_buf != NULL);
3764
572e2857
BB
3765 DB_DNODE_ENTER(db);
3766 dn = DB_DNODE(db);
e49f1e20 3767 /* Indirect block size must match what the dnode thinks it is. */
572e2857 3768 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
34dc7c2f 3769 dbuf_check_blkptr(dn, db);
572e2857 3770 DB_DNODE_EXIT(db);
34dc7c2f 3771
e49f1e20 3772 /* Provide the pending dirty record to child dbufs */
34dc7c2f
BB
3773 db->db_data_pending = dr;
3774
34dc7c2f 3775 mutex_exit(&db->db_mtx);
a1d477c2 3776
b128c09f 3777 dbuf_write(dr, db->db_buf, tx);
34dc7c2f
BB
3778
3779 zio = dr->dr_zio;
3780 mutex_enter(&dr->dt.di.dr_mtx);
4bda3bd0 3781 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
34dc7c2f
BB
3782 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
3783 mutex_exit(&dr->dt.di.dr_mtx);
3784 zio_nowait(zio);
3785}
3786
d1d7e268
MK
3787/*
3788 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
60948de1
BB
3789 * critical the we not allow the compiler to inline this function in to
3790 * dbuf_sync_list() thereby drastically bloating the stack usage.
3791 */
3792noinline static void
34dc7c2f
BB
3793dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
3794{
3795 arc_buf_t **datap = &dr->dt.dl.dr_data;
3796 dmu_buf_impl_t *db = dr->dr_dbuf;
572e2857
BB
3797 dnode_t *dn;
3798 objset_t *os;
34dc7c2f 3799 uint64_t txg = tx->tx_txg;
34dc7c2f
BB
3800
3801 ASSERT(dmu_tx_is_syncing(tx));
3802
3803 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
3804
3805 mutex_enter(&db->db_mtx);
3806 /*
3807 * To be synced, we must be dirtied. But we
3808 * might have been freed after the dirty.
3809 */
3810 if (db->db_state == DB_UNCACHED) {
3811 /* This buffer has been freed since it was dirtied */
3812 ASSERT(db->db.db_data == NULL);
3813 } else if (db->db_state == DB_FILL) {
3814 /* This buffer was freed and is now being re-filled */
3815 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
3816 } else {
b128c09f 3817 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
34dc7c2f
BB
3818 }
3819 DBUF_VERIFY(db);
3820
572e2857
BB
3821 DB_DNODE_ENTER(db);
3822 dn = DB_DNODE(db);
3823
428870ff
BB
3824 if (db->db_blkid == DMU_SPILL_BLKID) {
3825 mutex_enter(&dn->dn_mtx);
81edd3e8
P
3826 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
3827 /*
3828 * In the previous transaction group, the bonus buffer
3829 * was entirely used to store the attributes for the
3830 * dnode which overrode the dn_spill field. However,
3831 * when adding more attributes to the file a spill
3832 * block was required to hold the extra attributes.
3833 *
3834 * Make sure to clear the garbage left in the dn_spill
3835 * field from the previous attributes in the bonus
3836 * buffer. Otherwise, after writing out the spill
3837 * block to the new allocated dva, it will free
3838 * the old block pointed to by the invalid dn_spill.
3839 */
3840 db->db_blkptr = NULL;
3841 }
428870ff
BB
3842 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
3843 mutex_exit(&dn->dn_mtx);
3844 }
3845
34dc7c2f
BB
3846 /*
3847 * If this is a bonus buffer, simply copy the bonus data into the
3848 * dnode. It will be written out when the dnode is synced (and it
3849 * will be synced, since it must have been dirty for dbuf_sync to
3850 * be called).
3851 */
428870ff 3852 if (db->db_blkid == DMU_BONUS_BLKID) {
34dc7c2f
BB
3853 dbuf_dirty_record_t **drp;
3854
3855 ASSERT(*datap != NULL);
c99c9001 3856 ASSERT0(db->db_level);
b5256303 3857 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
50c957f7 3858 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
b5256303
TC
3859 bcopy(*datap, DN_BONUS(dn->dn_phys),
3860 DN_MAX_BONUS_LEN(dn->dn_phys));
572e2857
BB
3861 DB_DNODE_EXIT(db);
3862
34dc7c2f 3863 if (*datap != db->db.db_data) {
50c957f7
NB
3864 int slots = DB_DNODE(db)->dn_num_slots;
3865 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
a3fd9d9e 3866 kmem_free(*datap, bonuslen);
25458cbe 3867 arc_space_return(bonuslen, ARC_SPACE_BONUS);
34dc7c2f
BB
3868 }
3869 db->db_data_pending = NULL;
3870 drp = &db->db_last_dirty;
3871 while (*drp != dr)
3872 drp = &(*drp)->dr_next;
3873 ASSERT(dr->dr_next == NULL);
428870ff 3874 ASSERT(dr->dr_dbuf == db);
34dc7c2f 3875 *drp = dr->dr_next;
753972fc
BB
3876 if (dr->dr_dbuf->db_level != 0) {
3877 mutex_destroy(&dr->dt.di.dr_mtx);
3878 list_destroy(&dr->dt.di.dr_children);
3879 }
34dc7c2f
BB
3880 kmem_free(dr, sizeof (dbuf_dirty_record_t));
3881 ASSERT(db->db_dirtycnt > 0);
3882 db->db_dirtycnt -= 1;
3d503a76 3883 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
34dc7c2f
BB
3884 return;
3885 }
3886
572e2857
BB
3887 os = dn->dn_objset;
3888
34dc7c2f
BB
3889 /*
3890 * This function may have dropped the db_mtx lock allowing a dmu_sync
3891 * operation to sneak in. As a result, we need to ensure that we
3892 * don't check the dr_override_state until we have returned from
3893 * dbuf_check_blkptr.
3894 */
3895 dbuf_check_blkptr(dn, db);
3896
3897 /*
572e2857 3898 * If this buffer is in the middle of an immediate write,
34dc7c2f
BB
3899 * wait for the synchronous IO to complete.
3900 */
3901 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
3902 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
3903 cv_wait(&db->db_changed, &db->db_mtx);
3904 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
3905 }
3906
b5256303
TC
3907 /*
3908 * If this is a dnode block, ensure it is appropriately encrypted
3909 * or decrypted, depending on what we are writing to it this txg.
3910 */
3911 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
0c03d21a 3912 dbuf_prepare_encrypted_dnode_leaf(dr);
b5256303 3913
9babb374
BB
3914 if (db->db_state != DB_NOFILL &&
3915 dn->dn_object != DMU_META_DNODE_OBJECT &&
424fd7c3 3916 zfs_refcount_count(&db->db_holds) > 1 &&
428870ff 3917 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
9babb374
BB
3918 *datap == db->db_buf) {
3919 /*
3920 * If this buffer is currently "in use" (i.e., there
3921 * are active holds and db_data still references it),
3922 * then make a copy before we start the write so that
3923 * any modifications from the open txg will not leak
3924 * into this write.
3925 *
3926 * NOTE: this copy does not need to be made for
3927 * objects only modified in the syncing context (e.g.
3928 * DNONE_DNODE blocks).
3929 */
2aa34383 3930 int psize = arc_buf_size(*datap);
b5256303 3931 int lsize = arc_buf_lsize(*datap);
9babb374 3932 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2aa34383
DK
3933 enum zio_compress compress_type = arc_get_compression(*datap);
3934
b5256303
TC
3935 if (arc_is_encrypted(*datap)) {
3936 boolean_t byteorder;
3937 uint8_t salt[ZIO_DATA_SALT_LEN];
3938 uint8_t iv[ZIO_DATA_IV_LEN];
3939 uint8_t mac[ZIO_DATA_MAC_LEN];
3940
3941 arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
3942 *datap = arc_alloc_raw_buf(os->os_spa, db,
3943 dmu_objset_id(os), byteorder, salt, iv, mac,
3944 dn->dn_type, psize, lsize, compress_type);
3945 } else if (compress_type != ZIO_COMPRESS_OFF) {
2aa34383
DK
3946 ASSERT3U(type, ==, ARC_BUFC_DATA);
3947 *datap = arc_alloc_compressed_buf(os->os_spa, db,
3948 psize, lsize, compress_type);
b5256303
TC
3949 } else {
3950 *datap = arc_alloc_buf(os->os_spa, db, type, psize);
2aa34383
DK
3951 }
3952 bcopy(db->db.db_data, (*datap)->b_data, psize);
b128c09f 3953 }
34dc7c2f
BB
3954 db->db_data_pending = dr;
3955
3956 mutex_exit(&db->db_mtx);
3957
b128c09f 3958 dbuf_write(dr, *datap, tx);
34dc7c2f
BB
3959
3960 ASSERT(!list_link_active(&dr->dr_dirty_node));
572e2857 3961 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
3fa93bb8 3962 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
572e2857
BB
3963 DB_DNODE_EXIT(db);
3964 } else {
3965 /*
3966 * Although zio_nowait() does not "wait for an IO", it does
3967 * initiate the IO. If this is an empty write it seems plausible
3968 * that the IO could actually be completed before the nowait
3969 * returns. We need to DB_DNODE_EXIT() first in case
3970 * zio_nowait() invalidates the dbuf.
3971 */
3972 DB_DNODE_EXIT(db);
34dc7c2f 3973 zio_nowait(dr->dr_zio);
572e2857 3974 }
34dc7c2f
BB
3975}
3976
3977void
4bda3bd0 3978dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
34dc7c2f
BB
3979{
3980 dbuf_dirty_record_t *dr;
3981
c65aa5b2 3982 while ((dr = list_head(list))) {
34dc7c2f
BB
3983 if (dr->dr_zio != NULL) {
3984 /*
3985 * If we find an already initialized zio then we
3986 * are processing the meta-dnode, and we have finished.
3987 * The dbufs for all dnodes are put back on the list
3988 * during processing, so that we can zio_wait()
3989 * these IOs after initiating all child IOs.
3990 */
3991 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
3992 DMU_META_DNODE_OBJECT);
3993 break;
3994 }
4bda3bd0
MA
3995 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
3996 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
3997 VERIFY3U(dr->dr_dbuf->db_level, ==, level);
3998 }
34dc7c2f
BB
3999 list_remove(list, dr);
4000 if (dr->dr_dbuf->db_level > 0)
4001 dbuf_sync_indirect(dr, tx);
4002 else
4003 dbuf_sync_leaf(dr, tx);
4004 }
4005}
4006
34dc7c2f
BB
4007/* ARGSUSED */
4008static void
4009dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4010{
4011 dmu_buf_impl_t *db = vdb;
572e2857 4012 dnode_t *dn;
b128c09f 4013 blkptr_t *bp = zio->io_bp;
34dc7c2f 4014 blkptr_t *bp_orig = &zio->io_bp_orig;
428870ff
BB
4015 spa_t *spa = zio->io_spa;
4016 int64_t delta;
34dc7c2f 4017 uint64_t fill = 0;
428870ff 4018 int i;
34dc7c2f 4019
463a8cfe
AR
4020 ASSERT3P(db->db_blkptr, !=, NULL);
4021 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
b128c09f 4022
572e2857
BB
4023 DB_DNODE_ENTER(db);
4024 dn = DB_DNODE(db);
428870ff
BB
4025 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4026 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4027 zio->io_prev_space_delta = delta;
34dc7c2f 4028
b0bc7a84
MG
4029 if (bp->blk_birth != 0) {
4030 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4031 BP_GET_TYPE(bp) == dn->dn_type) ||
4032 (db->db_blkid == DMU_SPILL_BLKID &&
9b67f605
MA
4033 BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4034 BP_IS_EMBEDDED(bp));
b0bc7a84 4035 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
34dc7c2f
BB
4036 }
4037
4038 mutex_enter(&db->db_mtx);
4039
428870ff
BB
4040#ifdef ZFS_DEBUG
4041 if (db->db_blkid == DMU_SPILL_BLKID) {
428870ff 4042 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
463a8cfe 4043 ASSERT(!(BP_IS_HOLE(bp)) &&
50c957f7 4044 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
428870ff
BB
4045 }
4046#endif
4047
34dc7c2f
BB
4048 if (db->db_level == 0) {
4049 mutex_enter(&dn->dn_mtx);
428870ff 4050 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
69830602
TC
4051 db->db_blkid != DMU_SPILL_BLKID) {
4052 ASSERT0(db->db_objset->os_raw_receive);
34dc7c2f 4053 dn->dn_phys->dn_maxblkid = db->db_blkid;
69830602 4054 }
34dc7c2f
BB
4055 mutex_exit(&dn->dn_mtx);
4056
4057 if (dn->dn_type == DMU_OT_DNODE) {
50c957f7
NB
4058 i = 0;
4059 while (i < db->db.db_size) {
817b1b6e
MA
4060 dnode_phys_t *dnp =
4061 (void *)(((char *)db->db.db_data) + i);
50c957f7
NB
4062
4063 i += DNODE_MIN_SIZE;
4064 if (dnp->dn_type != DMU_OT_NONE) {
34dc7c2f 4065 fill++;
50c957f7
NB
4066 i += dnp->dn_extra_slots *
4067 DNODE_MIN_SIZE;
4068 }
34dc7c2f
BB
4069 }
4070 } else {
b0bc7a84
MG
4071 if (BP_IS_HOLE(bp)) {
4072 fill = 0;
4073 } else {
4074 fill = 1;
4075 }
34dc7c2f
BB
4076 }
4077 } else {
b128c09f 4078 blkptr_t *ibp = db->db.db_data;
34dc7c2f 4079 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
b128c09f
BB
4080 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4081 if (BP_IS_HOLE(ibp))
34dc7c2f 4082 continue;
9b67f605 4083 fill += BP_GET_FILL(ibp);
34dc7c2f
BB
4084 }
4085 }
572e2857 4086 DB_DNODE_EXIT(db);
34dc7c2f 4087
9b67f605 4088 if (!BP_IS_EMBEDDED(bp))
b5256303 4089 BP_SET_FILL(bp, fill);
34dc7c2f
BB
4090
4091 mutex_exit(&db->db_mtx);
463a8cfe
AR
4092
4093 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
4094 *db->db_blkptr = *bp;
4095 rw_exit(&dn->dn_struct_rwlock);
34dc7c2f
BB
4096}
4097
bc77ba73
PD
4098/* ARGSUSED */
4099/*
4100 * This function gets called just prior to running through the compression
4101 * stage of the zio pipeline. If we're an indirect block comprised of only
4102 * holes, then we want this indirect to be compressed away to a hole. In
4103 * order to do that we must zero out any information about the holes that
4104 * this indirect points to prior to before we try to compress it.
4105 */
4106static void
4107dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4108{
4109 dmu_buf_impl_t *db = vdb;
4110 dnode_t *dn;
4111 blkptr_t *bp;
721ed0ee 4112 unsigned int epbs, i;
bc77ba73
PD
4113
4114 ASSERT3U(db->db_level, >, 0);
4115 DB_DNODE_ENTER(db);
4116 dn = DB_DNODE(db);
4117 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
721ed0ee 4118 ASSERT3U(epbs, <, 31);
bc77ba73
PD
4119
4120 /* Determine if all our children are holes */
3f93077b 4121 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
bc77ba73
PD
4122 if (!BP_IS_HOLE(bp))
4123 break;
4124 }
4125
4126 /*
4127 * If all the children are holes, then zero them all out so that
4128 * we may get compressed away.
4129 */
3f93077b 4130 if (i == 1ULL << epbs) {
721ed0ee
GM
4131 /*
4132 * We only found holes. Grab the rwlock to prevent
4133 * anybody from reading the blocks we're about to
4134 * zero out.
4135 */
4136 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
bc77ba73 4137 bzero(db->db.db_data, db->db.db_size);
721ed0ee 4138 rw_exit(&dn->dn_struct_rwlock);
bc77ba73
PD
4139 }
4140 DB_DNODE_EXIT(db);
4141}
4142
e8b96c60
MA
4143/*
4144 * The SPA will call this callback several times for each zio - once
4145 * for every physical child i/o (zio->io_phys_children times). This
4146 * allows the DMU to monitor the progress of each logical i/o. For example,
4147 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
4148 * block. There may be a long delay before all copies/fragments are completed,
4149 * so this callback allows us to retire dirty space gradually, as the physical
4150 * i/os complete.
4151 */
4152/* ARGSUSED */
4153static void
4154dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
4155{
4156 dmu_buf_impl_t *db = arg;
4157 objset_t *os = db->db_objset;
4158 dsl_pool_t *dp = dmu_objset_pool(os);
4159 dbuf_dirty_record_t *dr;
4160 int delta = 0;
4161
4162 dr = db->db_data_pending;
4163 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4164
4165 /*
4166 * The callback will be called io_phys_children times. Retire one
4167 * portion of our dirty space each time we are called. Any rounding
4168 * error will be cleaned up by dsl_pool_sync()'s call to
4169 * dsl_pool_undirty_space().
4170 */
4171 delta = dr->dr_accounted / zio->io_phys_children;
4172 dsl_pool_undirty_space(dp, delta, zio->io_txg);
4173}
4174
34dc7c2f
BB
4175/* ARGSUSED */
4176static void
4177dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4178{
4179 dmu_buf_impl_t *db = vdb;
428870ff 4180 blkptr_t *bp_orig = &zio->io_bp_orig;
b0bc7a84
MG
4181 blkptr_t *bp = db->db_blkptr;
4182 objset_t *os = db->db_objset;
4183 dmu_tx_t *tx = os->os_synctx;
34dc7c2f
BB
4184 dbuf_dirty_record_t **drp, *dr;
4185
c99c9001 4186 ASSERT0(zio->io_error);
428870ff
BB
4187 ASSERT(db->db_blkptr == bp);
4188
03c6040b
GW
4189 /*
4190 * For nopwrites and rewrites we ensure that the bp matches our
4191 * original and bypass all the accounting.
4192 */
4193 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
428870ff
BB
4194 ASSERT(BP_EQUAL(bp, bp_orig));
4195 } else {
b0bc7a84 4196 dsl_dataset_t *ds = os->os_dsl_dataset;
428870ff
BB
4197 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4198 dsl_dataset_block_born(ds, bp, tx);
4199 }
34dc7c2f
BB
4200
4201 mutex_enter(&db->db_mtx);
4202
428870ff
BB
4203 DBUF_VERIFY(db);
4204
34dc7c2f
BB
4205 drp = &db->db_last_dirty;
4206 while ((dr = *drp) != db->db_data_pending)
4207 drp = &dr->dr_next;
4208 ASSERT(!list_link_active(&dr->dr_dirty_node));
428870ff 4209 ASSERT(dr->dr_dbuf == db);
34dc7c2f
BB
4210 ASSERT(dr->dr_next == NULL);
4211 *drp = dr->dr_next;
4212
428870ff
BB
4213#ifdef ZFS_DEBUG
4214 if (db->db_blkid == DMU_SPILL_BLKID) {
572e2857
BB
4215 dnode_t *dn;
4216
4217 DB_DNODE_ENTER(db);
4218 dn = DB_DNODE(db);
428870ff
BB
4219 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4220 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
50c957f7 4221 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
572e2857 4222 DB_DNODE_EXIT(db);
428870ff
BB
4223 }
4224#endif
4225
34dc7c2f 4226 if (db->db_level == 0) {
428870ff 4227 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f 4228 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
b128c09f
BB
4229 if (db->db_state != DB_NOFILL) {
4230 if (dr->dt.dl.dr_data != db->db_buf)
d3c2ae1c 4231 arc_buf_destroy(dr->dt.dl.dr_data, db);
b128c09f 4232 }
34dc7c2f 4233 } else {
572e2857
BB
4234 dnode_t *dn;
4235
4236 DB_DNODE_ENTER(db);
4237 dn = DB_DNODE(db);
34dc7c2f 4238 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
b0bc7a84 4239 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
34dc7c2f 4240 if (!BP_IS_HOLE(db->db_blkptr)) {
1fde1e37
BB
4241 ASSERTV(int epbs = dn->dn_phys->dn_indblkshift -
4242 SPA_BLKPTRSHIFT);
b0bc7a84
MG
4243 ASSERT3U(db->db_blkid, <=,
4244 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
34dc7c2f
BB
4245 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4246 db->db.db_size);
34dc7c2f 4247 }
572e2857 4248 DB_DNODE_EXIT(db);
34dc7c2f
BB
4249 mutex_destroy(&dr->dt.di.dr_mtx);
4250 list_destroy(&dr->dt.di.dr_children);
4251 }
4252 kmem_free(dr, sizeof (dbuf_dirty_record_t));
4253
4254 cv_broadcast(&db->db_changed);
4255 ASSERT(db->db_dirtycnt > 0);
4256 db->db_dirtycnt -= 1;
4257 db->db_data_pending = NULL;
3d503a76 4258 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
428870ff
BB
4259}
4260
4261static void
4262dbuf_write_nofill_ready(zio_t *zio)
4263{
4264 dbuf_write_ready(zio, NULL, zio->io_private);
4265}
4266
4267static void
4268dbuf_write_nofill_done(zio_t *zio)
4269{
4270 dbuf_write_done(zio, NULL, zio->io_private);
4271}
4272
4273static void
4274dbuf_write_override_ready(zio_t *zio)
4275{
4276 dbuf_dirty_record_t *dr = zio->io_private;
4277 dmu_buf_impl_t *db = dr->dr_dbuf;
4278
4279 dbuf_write_ready(zio, NULL, db);
4280}
4281
4282static void
4283dbuf_write_override_done(zio_t *zio)
4284{
4285 dbuf_dirty_record_t *dr = zio->io_private;
4286 dmu_buf_impl_t *db = dr->dr_dbuf;
4287 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4288
4289 mutex_enter(&db->db_mtx);
4290 if (!BP_EQUAL(zio->io_bp, obp)) {
4291 if (!BP_IS_HOLE(obp))
4292 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4293 arc_release(dr->dt.dl.dr_data, db);
4294 }
34dc7c2f
BB
4295 mutex_exit(&db->db_mtx);
4296
428870ff 4297 dbuf_write_done(zio, NULL, db);
a6255b7f
DQ
4298
4299 if (zio->io_abd != NULL)
4300 abd_put(zio->io_abd);
428870ff
BB
4301}
4302
a1d477c2
MA
4303typedef struct dbuf_remap_impl_callback_arg {
4304 objset_t *drica_os;
4305 uint64_t drica_blk_birth;
4306 dmu_tx_t *drica_tx;
4307} dbuf_remap_impl_callback_arg_t;
4308
4309static void
4310dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4311 void *arg)
4312{
4313 dbuf_remap_impl_callback_arg_t *drica = arg;
4314 objset_t *os = drica->drica_os;
4315 spa_t *spa = dmu_objset_spa(os);
4316 dmu_tx_t *tx = drica->drica_tx;
4317
4318 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4319
4320 if (os == spa_meta_objset(spa)) {
4321 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4322 } else {
4323 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4324 size, drica->drica_blk_birth, tx);
4325 }
4326}
4327
4328static void
4329dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, dmu_tx_t *tx)
4330{
4331 blkptr_t bp_copy = *bp;
4332 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4333 dbuf_remap_impl_callback_arg_t drica;
4334
4335 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4336
4337 drica.drica_os = dn->dn_objset;
4338 drica.drica_blk_birth = bp->blk_birth;
4339 drica.drica_tx = tx;
4340 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
4341 &drica)) {
4342 /*
4343 * The struct_rwlock prevents dbuf_read_impl() from
4344 * dereferencing the BP while we are changing it. To
4345 * avoid lock contention, only grab it when we are actually
4346 * changing the BP.
4347 */
4348 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
4349 *bp = bp_copy;
4350 rw_exit(&dn->dn_struct_rwlock);
4351 }
4352}
4353
4354/*
4355 * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting
4356 * to remap a copy of every bp in the dbuf.
4357 */
4358boolean_t
4359dbuf_can_remap(const dmu_buf_impl_t *db)
4360{
4361 spa_t *spa = dmu_objset_spa(db->db_objset);
4362 blkptr_t *bp = db->db.db_data;
4363 boolean_t ret = B_FALSE;
4364
4365 ASSERT3U(db->db_level, >, 0);
4366 ASSERT3S(db->db_state, ==, DB_CACHED);
4367
4368 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
4369
4370 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
4371 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
4372 blkptr_t bp_copy = bp[i];
4373 if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) {
4374 ret = B_TRUE;
4375 break;
4376 }
4377 }
4378 spa_config_exit(spa, SCL_VDEV, FTAG);
4379
4380 return (ret);
4381}
4382
4383boolean_t
4384dnode_needs_remap(const dnode_t *dn)
4385{
4386 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4387 boolean_t ret = B_FALSE;
4388
4389 if (dn->dn_phys->dn_nlevels == 0) {
4390 return (B_FALSE);
4391 }
4392
4393 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
4394
4395 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
4396 for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) {
4397 blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j];
4398 if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) {
4399 ret = B_TRUE;
4400 break;
4401 }
4402 }
4403 spa_config_exit(spa, SCL_VDEV, FTAG);
4404
4405 return (ret);
4406}
4407
4408/*
4409 * Remap any existing BP's to concrete vdevs, if possible.
4410 */
4411static void
4412dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
4413{
4414 spa_t *spa = dmu_objset_spa(db->db_objset);
4415 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4416
4417 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
4418 return;
4419
4420 if (db->db_level > 0) {
4421 blkptr_t *bp = db->db.db_data;
4422 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
4423 dbuf_remap_impl(dn, &bp[i], tx);
4424 }
4425 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
4426 dnode_phys_t *dnp = db->db.db_data;
4427 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
4428 DMU_OT_DNODE);
4429 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
4430 i += dnp[i].dn_extra_slots + 1) {
4431 for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
4432 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], tx);
4433 }
4434 }
4435 }
4436}
4437
4438
e49f1e20 4439/* Issue I/O to commit a dirty buffer to disk. */
428870ff
BB
4440static void
4441dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
4442{
4443 dmu_buf_impl_t *db = dr->dr_dbuf;
572e2857
BB
4444 dnode_t *dn;
4445 objset_t *os;
428870ff
BB
4446 dmu_buf_impl_t *parent = db->db_parent;
4447 uint64_t txg = tx->tx_txg;
5dbd68a3 4448 zbookmark_phys_t zb;
428870ff
BB
4449 zio_prop_t zp;
4450 zio_t *zio;
4451 int wp_flag = 0;
34dc7c2f 4452
463a8cfe
AR
4453 ASSERT(dmu_tx_is_syncing(tx));
4454
572e2857
BB
4455 DB_DNODE_ENTER(db);
4456 dn = DB_DNODE(db);
4457 os = dn->dn_objset;
4458
428870ff
BB
4459 if (db->db_state != DB_NOFILL) {
4460 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
4461 /*
4462 * Private object buffers are released here rather
4463 * than in dbuf_dirty() since they are only modified
4464 * in the syncing context and we don't want the
4465 * overhead of making multiple copies of the data.
4466 */
4467 if (BP_IS_HOLE(db->db_blkptr)) {
4468 arc_buf_thaw(data);
4469 } else {
4470 dbuf_release_bp(db);
4471 }
a1d477c2 4472 dbuf_remap(dn, db, tx);
428870ff
BB
4473 }
4474 }
4475
4476 if (parent != dn->dn_dbuf) {
e49f1e20
WA
4477 /* Our parent is an indirect block. */
4478 /* We have a dirty parent that has been scheduled for write. */
428870ff 4479 ASSERT(parent && parent->db_data_pending);
e49f1e20 4480 /* Our parent's buffer is one level closer to the dnode. */
428870ff 4481 ASSERT(db->db_level == parent->db_level-1);
e49f1e20
WA
4482 /*
4483 * We're about to modify our parent's db_data by modifying
4484 * our block pointer, so the parent must be released.
4485 */
428870ff
BB
4486 ASSERT(arc_released(parent->db_buf));
4487 zio = parent->db_data_pending->dr_zio;
4488 } else {
e49f1e20 4489 /* Our parent is the dnode itself. */
428870ff
BB
4490 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
4491 db->db_blkid != DMU_SPILL_BLKID) ||
4492 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
4493 if (db->db_blkid != DMU_SPILL_BLKID)
4494 ASSERT3P(db->db_blkptr, ==,
4495 &dn->dn_phys->dn_blkptr[db->db_blkid]);
4496 zio = dn->dn_zio;
4497 }
4498
4499 ASSERT(db->db_level == 0 || data == db->db_buf);
4500 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
4501 ASSERT(zio);
4502
4503 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
4504 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
4505 db->db.db_object, db->db_level, db->db_blkid);
4506
4507 if (db->db_blkid == DMU_SPILL_BLKID)
4508 wp_flag = WP_SPILL;
4509 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
4510
82644107 4511 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
572e2857 4512 DB_DNODE_EXIT(db);
428870ff 4513
463a8cfe
AR
4514 /*
4515 * We copy the blkptr now (rather than when we instantiate the dirty
4516 * record), because its value can change between open context and
4517 * syncing context. We do not need to hold dn_struct_rwlock to read
4518 * db_blkptr because we are in syncing context.
4519 */
4520 dr->dr_bp_copy = *db->db_blkptr;
4521
9b67f605
MA
4522 if (db->db_level == 0 &&
4523 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
4524 /*
4525 * The BP for this block has been provided by open context
4526 * (by dmu_sync() or dmu_buf_write_embedded()).
4527 */
a6255b7f
DQ
4528 abd_t *contents = (data != NULL) ?
4529 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
9b67f605 4530
428870ff 4531 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2aa34383
DK
4532 &dr->dr_bp_copy, contents, db->db.db_size, db->db.db_size,
4533 &zp, dbuf_write_override_ready, NULL, NULL,
bc77ba73 4534 dbuf_write_override_done,
e8b96c60 4535 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
428870ff
BB
4536 mutex_enter(&db->db_mtx);
4537 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
4538 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
03c6040b 4539 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
428870ff
BB
4540 mutex_exit(&db->db_mtx);
4541 } else if (db->db_state == DB_NOFILL) {
3c67d83a
TH
4542 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
4543 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
428870ff 4544 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2aa34383 4545 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
bc77ba73
PD
4546 dbuf_write_nofill_ready, NULL, NULL,
4547 dbuf_write_nofill_done, db,
428870ff
BB
4548 ZIO_PRIORITY_ASYNC_WRITE,
4549 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
4550 } else {
4551 ASSERT(arc_released(data));
bc77ba73
PD
4552
4553 /*
4554 * For indirect blocks, we want to setup the children
4555 * ready callback so that we can properly handle an indirect
4556 * block that only contains holes.
4557 */
1c27024e 4558 arc_write_done_func_t *children_ready_cb = NULL;
bc77ba73
PD
4559 if (db->db_level != 0)
4560 children_ready_cb = dbuf_write_children_ready;
4561
428870ff 4562 dr->dr_zio = arc_write(zio, os->os_spa, txg,
463a8cfe 4563 &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db),
d3c2ae1c
GW
4564 &zp, dbuf_write_ready,
4565 children_ready_cb, dbuf_write_physdone,
4566 dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
4567 ZIO_FLAG_MUSTSUCCEED, &zb);
428870ff 4568 }
34dc7c2f 4569}
c28b2279 4570
93ce2b4c 4571#if defined(_KERNEL)
8f576c23
BB
4572EXPORT_SYMBOL(dbuf_find);
4573EXPORT_SYMBOL(dbuf_is_metadata);
d3c2ae1c 4574EXPORT_SYMBOL(dbuf_destroy);
8f576c23
BB
4575EXPORT_SYMBOL(dbuf_loan_arcbuf);
4576EXPORT_SYMBOL(dbuf_whichblock);
4577EXPORT_SYMBOL(dbuf_read);
4578EXPORT_SYMBOL(dbuf_unoverride);
4579EXPORT_SYMBOL(dbuf_free_range);
4580EXPORT_SYMBOL(dbuf_new_size);
4581EXPORT_SYMBOL(dbuf_release_bp);
4582EXPORT_SYMBOL(dbuf_dirty);
0c03d21a 4583EXPORT_SYMBOL(dmu_buf_set_crypt_params);
c28b2279 4584EXPORT_SYMBOL(dmu_buf_will_dirty);
a73e8fdb 4585EXPORT_SYMBOL(dmu_buf_is_dirty);
8f576c23
BB
4586EXPORT_SYMBOL(dmu_buf_will_not_fill);
4587EXPORT_SYMBOL(dmu_buf_will_fill);
4588EXPORT_SYMBOL(dmu_buf_fill_done);
4047414a 4589EXPORT_SYMBOL(dmu_buf_rele);
8f576c23 4590EXPORT_SYMBOL(dbuf_assign_arcbuf);
8f576c23
BB
4591EXPORT_SYMBOL(dbuf_prefetch);
4592EXPORT_SYMBOL(dbuf_hold_impl);
4593EXPORT_SYMBOL(dbuf_hold);
4594EXPORT_SYMBOL(dbuf_hold_level);
4595EXPORT_SYMBOL(dbuf_create_bonus);
4596EXPORT_SYMBOL(dbuf_spill_set_blksz);
4597EXPORT_SYMBOL(dbuf_rm_spill);
4598EXPORT_SYMBOL(dbuf_add_ref);
4599EXPORT_SYMBOL(dbuf_rele);
4600EXPORT_SYMBOL(dbuf_rele_and_unlock);
4601EXPORT_SYMBOL(dbuf_refcount);
4602EXPORT_SYMBOL(dbuf_sync_list);
4603EXPORT_SYMBOL(dmu_buf_set_user);
4604EXPORT_SYMBOL(dmu_buf_set_user_ie);
8f576c23 4605EXPORT_SYMBOL(dmu_buf_get_user);
0f699108 4606EXPORT_SYMBOL(dmu_buf_get_blkptr);
d3c2ae1c 4607
02730c33 4608/* BEGIN CSTYLED */
d3c2ae1c
GW
4609module_param(dbuf_cache_max_bytes, ulong, 0644);
4610MODULE_PARM_DESC(dbuf_cache_max_bytes,
02730c33 4611 "Maximum size in bytes of the dbuf cache.");
d3c2ae1c
GW
4612
4613module_param(dbuf_cache_hiwater_pct, uint, 0644);
4614MODULE_PARM_DESC(dbuf_cache_hiwater_pct,
f974e25d 4615 "Percentage over dbuf_cache_max_bytes when dbufs must be evicted "
4616 "directly.");
d3c2ae1c
GW
4617
4618module_param(dbuf_cache_lowater_pct, uint, 0644);
4619MODULE_PARM_DESC(dbuf_cache_lowater_pct,
f974e25d 4620 "Percentage below dbuf_cache_max_bytes when the evict thread stops "
4621 "evicting dbufs.");
d3c2ae1c 4622
2e5dc449
MA
4623module_param(dbuf_metadata_cache_max_bytes, ulong, 0644);
4624MODULE_PARM_DESC(dbuf_metadata_cache_max_bytes,
4625 "Maximum size in bytes of the dbuf metadata cache.");
4626
de4f8d5d
BB
4627module_param(dbuf_cache_shift, int, 0644);
4628MODULE_PARM_DESC(dbuf_cache_shift,
4629 "Set the size of the dbuf cache to a log2 fraction of arc size.");
2e5dc449
MA
4630
4631module_param(dbuf_metadata_cache_shift, int, 0644);
4632MODULE_PARM_DESC(dbuf_cache_shift,
4633 "Set the size of the dbuf metadata cache to a log2 fraction of "
4634 "arc size.");
02730c33 4635/* END CSTYLED */
c28b2279 4636#endif