]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dbuf.c
OpenZFS restructuring - move linux tracing code to platform directories
[mirror_zfs.git] / module / zfs / dbuf.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 */
28
29 #include <sys/zfs_context.h>
30 #include <sys/arc.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_send.h>
33 #include <sys/dmu_impl.h>
34 #include <sys/dbuf.h>
35 #include <sys/dmu_objset.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/spa.h>
40 #include <sys/zio.h>
41 #include <sys/dmu_zfetch.h>
42 #include <sys/sa.h>
43 #include <sys/sa_impl.h>
44 #include <sys/zfeature.h>
45 #include <sys/blkptr.h>
46 #include <sys/range_tree.h>
47 #include <sys/trace_defs.h>
48 #include <sys/callb.h>
49 #include <sys/abd.h>
50 #include <sys/vdev.h>
51 #include <sys/cityhash.h>
52 #include <sys/spa_impl.h>
53
54 kstat_t *dbuf_ksp;
55
56 typedef struct dbuf_stats {
57 /*
58 * Various statistics about the size of the dbuf cache.
59 */
60 kstat_named_t cache_count;
61 kstat_named_t cache_size_bytes;
62 kstat_named_t cache_size_bytes_max;
63 /*
64 * Statistics regarding the bounds on the dbuf cache size.
65 */
66 kstat_named_t cache_target_bytes;
67 kstat_named_t cache_lowater_bytes;
68 kstat_named_t cache_hiwater_bytes;
69 /*
70 * Total number of dbuf cache evictions that have occurred.
71 */
72 kstat_named_t cache_total_evicts;
73 /*
74 * The distribution of dbuf levels in the dbuf cache and
75 * the total size of all dbufs at each level.
76 */
77 kstat_named_t cache_levels[DN_MAX_LEVELS];
78 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
79 /*
80 * Statistics about the dbuf hash table.
81 */
82 kstat_named_t hash_hits;
83 kstat_named_t hash_misses;
84 kstat_named_t hash_collisions;
85 kstat_named_t hash_elements;
86 kstat_named_t hash_elements_max;
87 /*
88 * Number of sublists containing more than one dbuf in the dbuf
89 * hash table. Keep track of the longest hash chain.
90 */
91 kstat_named_t hash_chains;
92 kstat_named_t hash_chain_max;
93 /*
94 * Number of times a dbuf_create() discovers that a dbuf was
95 * already created and in the dbuf hash table.
96 */
97 kstat_named_t hash_insert_race;
98 /*
99 * Statistics about the size of the metadata dbuf cache.
100 */
101 kstat_named_t metadata_cache_count;
102 kstat_named_t metadata_cache_size_bytes;
103 kstat_named_t metadata_cache_size_bytes_max;
104 /*
105 * For diagnostic purposes, this is incremented whenever we can't add
106 * something to the metadata cache because it's full, and instead put
107 * the data in the regular dbuf cache.
108 */
109 kstat_named_t metadata_cache_overflow;
110 } dbuf_stats_t;
111
112 dbuf_stats_t dbuf_stats = {
113 { "cache_count", KSTAT_DATA_UINT64 },
114 { "cache_size_bytes", KSTAT_DATA_UINT64 },
115 { "cache_size_bytes_max", KSTAT_DATA_UINT64 },
116 { "cache_target_bytes", KSTAT_DATA_UINT64 },
117 { "cache_lowater_bytes", KSTAT_DATA_UINT64 },
118 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
119 { "cache_total_evicts", KSTAT_DATA_UINT64 },
120 { { "cache_levels_N", KSTAT_DATA_UINT64 } },
121 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
122 { "hash_hits", KSTAT_DATA_UINT64 },
123 { "hash_misses", KSTAT_DATA_UINT64 },
124 { "hash_collisions", KSTAT_DATA_UINT64 },
125 { "hash_elements", KSTAT_DATA_UINT64 },
126 { "hash_elements_max", KSTAT_DATA_UINT64 },
127 { "hash_chains", KSTAT_DATA_UINT64 },
128 { "hash_chain_max", KSTAT_DATA_UINT64 },
129 { "hash_insert_race", KSTAT_DATA_UINT64 },
130 { "metadata_cache_count", KSTAT_DATA_UINT64 },
131 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
132 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
133 { "metadata_cache_overflow", KSTAT_DATA_UINT64 }
134 };
135
136 #define DBUF_STAT_INCR(stat, val) \
137 atomic_add_64(&dbuf_stats.stat.value.ui64, (val));
138 #define DBUF_STAT_DECR(stat, val) \
139 DBUF_STAT_INCR(stat, -(val));
140 #define DBUF_STAT_BUMP(stat) \
141 DBUF_STAT_INCR(stat, 1);
142 #define DBUF_STAT_BUMPDOWN(stat) \
143 DBUF_STAT_INCR(stat, -1);
144 #define DBUF_STAT_MAX(stat, v) { \
145 uint64_t _m; \
146 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
147 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
148 continue; \
149 }
150
151 typedef struct dbuf_hold_arg {
152 /* Function arguments */
153 dnode_t *dh_dn;
154 uint8_t dh_level;
155 uint64_t dh_blkid;
156 boolean_t dh_fail_sparse;
157 boolean_t dh_fail_uncached;
158 void *dh_tag;
159 dmu_buf_impl_t **dh_dbp;
160 /* Local variables */
161 dmu_buf_impl_t *dh_db;
162 dmu_buf_impl_t *dh_parent;
163 blkptr_t *dh_bp;
164 int dh_err;
165 dbuf_dirty_record_t *dh_dr;
166 } dbuf_hold_arg_t;
167
168 static dbuf_hold_arg_t *dbuf_hold_arg_create(dnode_t *dn, uint8_t level,
169 uint64_t blkid, boolean_t fail_sparse, boolean_t fail_uncached,
170 void *tag, dmu_buf_impl_t **dbp);
171 static int dbuf_hold_impl_arg(dbuf_hold_arg_t *dh);
172 static void dbuf_hold_arg_destroy(dbuf_hold_arg_t *dh);
173
174 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
175 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
176
177 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu,
178 dmu_buf_evict_func_t *evict_func_sync,
179 dmu_buf_evict_func_t *evict_func_async,
180 dmu_buf_t **clear_on_evict_dbufp);
181
182 /*
183 * Global data structures and functions for the dbuf cache.
184 */
185 static kmem_cache_t *dbuf_kmem_cache;
186 static taskq_t *dbu_evict_taskq;
187
188 static kthread_t *dbuf_cache_evict_thread;
189 static kmutex_t dbuf_evict_lock;
190 static kcondvar_t dbuf_evict_cv;
191 static boolean_t dbuf_evict_thread_exit;
192
193 /*
194 * There are two dbuf caches; each dbuf can only be in one of them at a time.
195 *
196 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
197 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
198 * that represent the metadata that describes filesystems/snapshots/
199 * bookmarks/properties/etc. We only evict from this cache when we export a
200 * pool, to short-circuit as much I/O as possible for all administrative
201 * commands that need the metadata. There is no eviction policy for this
202 * cache, because we try to only include types in it which would occupy a
203 * very small amount of space per object but create a large impact on the
204 * performance of these commands. Instead, after it reaches a maximum size
205 * (which should only happen on very small memory systems with a very large
206 * number of filesystem objects), we stop taking new dbufs into the
207 * metadata cache, instead putting them in the normal dbuf cache.
208 *
209 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
210 * are not currently held but have been recently released. These dbufs
211 * are not eligible for arc eviction until they are aged out of the cache.
212 * Dbufs that are aged out of the cache will be immediately destroyed and
213 * become eligible for arc eviction.
214 *
215 * Dbufs are added to these caches once the last hold is released. If a dbuf is
216 * later accessed and still exists in the dbuf cache, then it will be removed
217 * from the cache and later re-added to the head of the cache.
218 *
219 * If a given dbuf meets the requirements for the metadata cache, it will go
220 * there, otherwise it will be considered for the generic LRU dbuf cache. The
221 * caches and the refcounts tracking their sizes are stored in an array indexed
222 * by those caches' matching enum values (from dbuf_cached_state_t).
223 */
224 typedef struct dbuf_cache {
225 multilist_t *cache;
226 zfs_refcount_t size;
227 } dbuf_cache_t;
228 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
229
230 /* Size limits for the caches */
231 unsigned long dbuf_cache_max_bytes = 0;
232 unsigned long dbuf_metadata_cache_max_bytes = 0;
233 /* Set the default sizes of the caches to log2 fraction of arc size */
234 int dbuf_cache_shift = 5;
235 int dbuf_metadata_cache_shift = 6;
236
237 /*
238 * The LRU dbuf cache uses a three-stage eviction policy:
239 * - A low water marker designates when the dbuf eviction thread
240 * should stop evicting from the dbuf cache.
241 * - When we reach the maximum size (aka mid water mark), we
242 * signal the eviction thread to run.
243 * - The high water mark indicates when the eviction thread
244 * is unable to keep up with the incoming load and eviction must
245 * happen in the context of the calling thread.
246 *
247 * The dbuf cache:
248 * (max size)
249 * low water mid water hi water
250 * +----------------------------------------+----------+----------+
251 * | | | |
252 * | | | |
253 * | | | |
254 * | | | |
255 * +----------------------------------------+----------+----------+
256 * stop signal evict
257 * evicting eviction directly
258 * thread
259 *
260 * The high and low water marks indicate the operating range for the eviction
261 * thread. The low water mark is, by default, 90% of the total size of the
262 * cache and the high water mark is at 110% (both of these percentages can be
263 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
264 * respectively). The eviction thread will try to ensure that the cache remains
265 * within this range by waking up every second and checking if the cache is
266 * above the low water mark. The thread can also be woken up by callers adding
267 * elements into the cache if the cache is larger than the mid water (i.e max
268 * cache size). Once the eviction thread is woken up and eviction is required,
269 * it will continue evicting buffers until it's able to reduce the cache size
270 * to the low water mark. If the cache size continues to grow and hits the high
271 * water mark, then callers adding elements to the cache will begin to evict
272 * directly from the cache until the cache is no longer above the high water
273 * mark.
274 */
275
276 /*
277 * The percentage above and below the maximum cache size.
278 */
279 uint_t dbuf_cache_hiwater_pct = 10;
280 uint_t dbuf_cache_lowater_pct = 10;
281
282 /* ARGSUSED */
283 static int
284 dbuf_cons(void *vdb, void *unused, int kmflag)
285 {
286 dmu_buf_impl_t *db = vdb;
287 bzero(db, sizeof (dmu_buf_impl_t));
288
289 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
290 rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
291 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
292 multilist_link_init(&db->db_cache_link);
293 zfs_refcount_create(&db->db_holds);
294
295 return (0);
296 }
297
298 /* ARGSUSED */
299 static void
300 dbuf_dest(void *vdb, void *unused)
301 {
302 dmu_buf_impl_t *db = vdb;
303 mutex_destroy(&db->db_mtx);
304 rw_destroy(&db->db_rwlock);
305 cv_destroy(&db->db_changed);
306 ASSERT(!multilist_link_active(&db->db_cache_link));
307 zfs_refcount_destroy(&db->db_holds);
308 }
309
310 /*
311 * dbuf hash table routines
312 */
313 static dbuf_hash_table_t dbuf_hash_table;
314
315 static uint64_t dbuf_hash_count;
316
317 /*
318 * We use Cityhash for this. It's fast, and has good hash properties without
319 * requiring any large static buffers.
320 */
321 static uint64_t
322 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
323 {
324 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
325 }
326
327 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
328 ((dbuf)->db.db_object == (obj) && \
329 (dbuf)->db_objset == (os) && \
330 (dbuf)->db_level == (level) && \
331 (dbuf)->db_blkid == (blkid))
332
333 dmu_buf_impl_t *
334 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
335 {
336 dbuf_hash_table_t *h = &dbuf_hash_table;
337 uint64_t hv;
338 uint64_t idx;
339 dmu_buf_impl_t *db;
340
341 hv = dbuf_hash(os, obj, level, blkid);
342 idx = hv & h->hash_table_mask;
343
344 mutex_enter(DBUF_HASH_MUTEX(h, idx));
345 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
346 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
347 mutex_enter(&db->db_mtx);
348 if (db->db_state != DB_EVICTING) {
349 mutex_exit(DBUF_HASH_MUTEX(h, idx));
350 return (db);
351 }
352 mutex_exit(&db->db_mtx);
353 }
354 }
355 mutex_exit(DBUF_HASH_MUTEX(h, idx));
356 return (NULL);
357 }
358
359 static dmu_buf_impl_t *
360 dbuf_find_bonus(objset_t *os, uint64_t object)
361 {
362 dnode_t *dn;
363 dmu_buf_impl_t *db = NULL;
364
365 if (dnode_hold(os, object, FTAG, &dn) == 0) {
366 rw_enter(&dn->dn_struct_rwlock, RW_READER);
367 if (dn->dn_bonus != NULL) {
368 db = dn->dn_bonus;
369 mutex_enter(&db->db_mtx);
370 }
371 rw_exit(&dn->dn_struct_rwlock);
372 dnode_rele(dn, FTAG);
373 }
374 return (db);
375 }
376
377 /*
378 * Insert an entry into the hash table. If there is already an element
379 * equal to elem in the hash table, then the already existing element
380 * will be returned and the new element will not be inserted.
381 * Otherwise returns NULL.
382 */
383 static dmu_buf_impl_t *
384 dbuf_hash_insert(dmu_buf_impl_t *db)
385 {
386 dbuf_hash_table_t *h = &dbuf_hash_table;
387 objset_t *os = db->db_objset;
388 uint64_t obj = db->db.db_object;
389 int level = db->db_level;
390 uint64_t blkid, hv, idx;
391 dmu_buf_impl_t *dbf;
392 uint32_t i;
393
394 blkid = db->db_blkid;
395 hv = dbuf_hash(os, obj, level, blkid);
396 idx = hv & h->hash_table_mask;
397
398 mutex_enter(DBUF_HASH_MUTEX(h, idx));
399 for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
400 dbf = dbf->db_hash_next, i++) {
401 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
402 mutex_enter(&dbf->db_mtx);
403 if (dbf->db_state != DB_EVICTING) {
404 mutex_exit(DBUF_HASH_MUTEX(h, idx));
405 return (dbf);
406 }
407 mutex_exit(&dbf->db_mtx);
408 }
409 }
410
411 if (i > 0) {
412 DBUF_STAT_BUMP(hash_collisions);
413 if (i == 1)
414 DBUF_STAT_BUMP(hash_chains);
415
416 DBUF_STAT_MAX(hash_chain_max, i);
417 }
418
419 mutex_enter(&db->db_mtx);
420 db->db_hash_next = h->hash_table[idx];
421 h->hash_table[idx] = db;
422 mutex_exit(DBUF_HASH_MUTEX(h, idx));
423 atomic_inc_64(&dbuf_hash_count);
424 DBUF_STAT_MAX(hash_elements_max, dbuf_hash_count);
425
426 return (NULL);
427 }
428
429 /*
430 * This returns whether this dbuf should be stored in the metadata cache, which
431 * is based on whether it's from one of the dnode types that store data related
432 * to traversing dataset hierarchies.
433 */
434 static boolean_t
435 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
436 {
437 DB_DNODE_ENTER(db);
438 dmu_object_type_t type = DB_DNODE(db)->dn_type;
439 DB_DNODE_EXIT(db);
440
441 /* Check if this dbuf is one of the types we care about */
442 if (DMU_OT_IS_METADATA_CACHED(type)) {
443 /* If we hit this, then we set something up wrong in dmu_ot */
444 ASSERT(DMU_OT_IS_METADATA(type));
445
446 /*
447 * Sanity check for small-memory systems: don't allocate too
448 * much memory for this purpose.
449 */
450 if (zfs_refcount_count(
451 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
452 dbuf_metadata_cache_max_bytes) {
453 DBUF_STAT_BUMP(metadata_cache_overflow);
454 return (B_FALSE);
455 }
456
457 return (B_TRUE);
458 }
459
460 return (B_FALSE);
461 }
462
463 /*
464 * Remove an entry from the hash table. It must be in the EVICTING state.
465 */
466 static void
467 dbuf_hash_remove(dmu_buf_impl_t *db)
468 {
469 dbuf_hash_table_t *h = &dbuf_hash_table;
470 uint64_t hv, idx;
471 dmu_buf_impl_t *dbf, **dbp;
472
473 hv = dbuf_hash(db->db_objset, db->db.db_object,
474 db->db_level, db->db_blkid);
475 idx = hv & h->hash_table_mask;
476
477 /*
478 * We mustn't hold db_mtx to maintain lock ordering:
479 * DBUF_HASH_MUTEX > db_mtx.
480 */
481 ASSERT(zfs_refcount_is_zero(&db->db_holds));
482 ASSERT(db->db_state == DB_EVICTING);
483 ASSERT(!MUTEX_HELD(&db->db_mtx));
484
485 mutex_enter(DBUF_HASH_MUTEX(h, idx));
486 dbp = &h->hash_table[idx];
487 while ((dbf = *dbp) != db) {
488 dbp = &dbf->db_hash_next;
489 ASSERT(dbf != NULL);
490 }
491 *dbp = db->db_hash_next;
492 db->db_hash_next = NULL;
493 if (h->hash_table[idx] &&
494 h->hash_table[idx]->db_hash_next == NULL)
495 DBUF_STAT_BUMPDOWN(hash_chains);
496 mutex_exit(DBUF_HASH_MUTEX(h, idx));
497 atomic_dec_64(&dbuf_hash_count);
498 }
499
500 typedef enum {
501 DBVU_EVICTING,
502 DBVU_NOT_EVICTING
503 } dbvu_verify_type_t;
504
505 static void
506 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
507 {
508 #ifdef ZFS_DEBUG
509 int64_t holds;
510
511 if (db->db_user == NULL)
512 return;
513
514 /* Only data blocks support the attachment of user data. */
515 ASSERT(db->db_level == 0);
516
517 /* Clients must resolve a dbuf before attaching user data. */
518 ASSERT(db->db.db_data != NULL);
519 ASSERT3U(db->db_state, ==, DB_CACHED);
520
521 holds = zfs_refcount_count(&db->db_holds);
522 if (verify_type == DBVU_EVICTING) {
523 /*
524 * Immediate eviction occurs when holds == dirtycnt.
525 * For normal eviction buffers, holds is zero on
526 * eviction, except when dbuf_fix_old_data() calls
527 * dbuf_clear_data(). However, the hold count can grow
528 * during eviction even though db_mtx is held (see
529 * dmu_bonus_hold() for an example), so we can only
530 * test the generic invariant that holds >= dirtycnt.
531 */
532 ASSERT3U(holds, >=, db->db_dirtycnt);
533 } else {
534 if (db->db_user_immediate_evict == TRUE)
535 ASSERT3U(holds, >=, db->db_dirtycnt);
536 else
537 ASSERT3U(holds, >, 0);
538 }
539 #endif
540 }
541
542 static void
543 dbuf_evict_user(dmu_buf_impl_t *db)
544 {
545 dmu_buf_user_t *dbu = db->db_user;
546
547 ASSERT(MUTEX_HELD(&db->db_mtx));
548
549 if (dbu == NULL)
550 return;
551
552 dbuf_verify_user(db, DBVU_EVICTING);
553 db->db_user = NULL;
554
555 #ifdef ZFS_DEBUG
556 if (dbu->dbu_clear_on_evict_dbufp != NULL)
557 *dbu->dbu_clear_on_evict_dbufp = NULL;
558 #endif
559
560 /*
561 * There are two eviction callbacks - one that we call synchronously
562 * and one that we invoke via a taskq. The async one is useful for
563 * avoiding lock order reversals and limiting stack depth.
564 *
565 * Note that if we have a sync callback but no async callback,
566 * it's likely that the sync callback will free the structure
567 * containing the dbu. In that case we need to take care to not
568 * dereference dbu after calling the sync evict func.
569 */
570 boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
571
572 if (dbu->dbu_evict_func_sync != NULL)
573 dbu->dbu_evict_func_sync(dbu);
574
575 if (has_async) {
576 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
577 dbu, 0, &dbu->dbu_tqent);
578 }
579 }
580
581 boolean_t
582 dbuf_is_metadata(dmu_buf_impl_t *db)
583 {
584 /*
585 * Consider indirect blocks and spill blocks to be meta data.
586 */
587 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
588 return (B_TRUE);
589 } else {
590 boolean_t is_metadata;
591
592 DB_DNODE_ENTER(db);
593 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
594 DB_DNODE_EXIT(db);
595
596 return (is_metadata);
597 }
598 }
599
600
601 /*
602 * This function *must* return indices evenly distributed between all
603 * sublists of the multilist. This is needed due to how the dbuf eviction
604 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
605 * distributed between all sublists and uses this assumption when
606 * deciding which sublist to evict from and how much to evict from it.
607 */
608 unsigned int
609 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
610 {
611 dmu_buf_impl_t *db = obj;
612
613 /*
614 * The assumption here, is the hash value for a given
615 * dmu_buf_impl_t will remain constant throughout it's lifetime
616 * (i.e. it's objset, object, level and blkid fields don't change).
617 * Thus, we don't need to store the dbuf's sublist index
618 * on insertion, as this index can be recalculated on removal.
619 *
620 * Also, the low order bits of the hash value are thought to be
621 * distributed evenly. Otherwise, in the case that the multilist
622 * has a power of two number of sublists, each sublists' usage
623 * would not be evenly distributed.
624 */
625 return (dbuf_hash(db->db_objset, db->db.db_object,
626 db->db_level, db->db_blkid) %
627 multilist_get_num_sublists(ml));
628 }
629
630 static inline unsigned long
631 dbuf_cache_target_bytes(void)
632 {
633 return MIN(dbuf_cache_max_bytes,
634 arc_target_bytes() >> dbuf_cache_shift);
635 }
636
637 static inline uint64_t
638 dbuf_cache_hiwater_bytes(void)
639 {
640 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
641 return (dbuf_cache_target +
642 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
643 }
644
645 static inline uint64_t
646 dbuf_cache_lowater_bytes(void)
647 {
648 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
649 return (dbuf_cache_target -
650 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
651 }
652
653 static inline boolean_t
654 dbuf_cache_above_hiwater(void)
655 {
656 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
657 dbuf_cache_hiwater_bytes());
658 }
659
660 static inline boolean_t
661 dbuf_cache_above_lowater(void)
662 {
663 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
664 dbuf_cache_lowater_bytes());
665 }
666
667 /*
668 * Evict the oldest eligible dbuf from the dbuf cache.
669 */
670 static void
671 dbuf_evict_one(void)
672 {
673 int idx = multilist_get_random_index(dbuf_caches[DB_DBUF_CACHE].cache);
674 multilist_sublist_t *mls = multilist_sublist_lock(
675 dbuf_caches[DB_DBUF_CACHE].cache, idx);
676
677 ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
678
679 dmu_buf_impl_t *db = multilist_sublist_tail(mls);
680 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
681 db = multilist_sublist_prev(mls, db);
682 }
683
684 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
685 multilist_sublist_t *, mls);
686
687 if (db != NULL) {
688 multilist_sublist_remove(mls, db);
689 multilist_sublist_unlock(mls);
690 (void) zfs_refcount_remove_many(
691 &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
692 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
693 DBUF_STAT_BUMPDOWN(cache_count);
694 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
695 db->db.db_size);
696 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
697 db->db_caching_status = DB_NO_CACHE;
698 dbuf_destroy(db);
699 DBUF_STAT_MAX(cache_size_bytes_max,
700 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size));
701 DBUF_STAT_BUMP(cache_total_evicts);
702 } else {
703 multilist_sublist_unlock(mls);
704 }
705 }
706
707 /*
708 * The dbuf evict thread is responsible for aging out dbufs from the
709 * cache. Once the cache has reached it's maximum size, dbufs are removed
710 * and destroyed. The eviction thread will continue running until the size
711 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
712 * out of the cache it is destroyed and becomes eligible for arc eviction.
713 */
714 /* ARGSUSED */
715 static void
716 dbuf_evict_thread(void *unused)
717 {
718 callb_cpr_t cpr;
719
720 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
721
722 mutex_enter(&dbuf_evict_lock);
723 while (!dbuf_evict_thread_exit) {
724 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
725 CALLB_CPR_SAFE_BEGIN(&cpr);
726 (void) cv_timedwait_sig_hires(&dbuf_evict_cv,
727 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
728 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
729 }
730 mutex_exit(&dbuf_evict_lock);
731
732 /*
733 * Keep evicting as long as we're above the low water mark
734 * for the cache. We do this without holding the locks to
735 * minimize lock contention.
736 */
737 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
738 dbuf_evict_one();
739 }
740
741 mutex_enter(&dbuf_evict_lock);
742 }
743
744 dbuf_evict_thread_exit = B_FALSE;
745 cv_broadcast(&dbuf_evict_cv);
746 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
747 thread_exit();
748 }
749
750 /*
751 * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
752 * If the dbuf cache is at its high water mark, then evict a dbuf from the
753 * dbuf cache using the callers context.
754 */
755 static void
756 dbuf_evict_notify(void)
757 {
758 /*
759 * We check if we should evict without holding the dbuf_evict_lock,
760 * because it's OK to occasionally make the wrong decision here,
761 * and grabbing the lock results in massive lock contention.
762 */
763 if (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
764 dbuf_cache_target_bytes()) {
765 if (dbuf_cache_above_hiwater())
766 dbuf_evict_one();
767 cv_signal(&dbuf_evict_cv);
768 }
769 }
770
771 static int
772 dbuf_kstat_update(kstat_t *ksp, int rw)
773 {
774 dbuf_stats_t *ds = ksp->ks_data;
775
776 if (rw == KSTAT_WRITE) {
777 return (SET_ERROR(EACCES));
778 } else {
779 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
780 &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
781 ds->cache_size_bytes.value.ui64 =
782 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
783 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
784 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
785 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
786 ds->hash_elements.value.ui64 = dbuf_hash_count;
787 }
788
789 return (0);
790 }
791
792 void
793 dbuf_init(void)
794 {
795 uint64_t hsize = 1ULL << 16;
796 dbuf_hash_table_t *h = &dbuf_hash_table;
797 int i;
798
799 /*
800 * The hash table is big enough to fill all of physical memory
801 * with an average block size of zfs_arc_average_blocksize (default 8K).
802 * By default, the table will take up
803 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
804 */
805 while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE)
806 hsize <<= 1;
807
808 retry:
809 h->hash_table_mask = hsize - 1;
810 #if defined(_KERNEL)
811 /*
812 * Large allocations which do not require contiguous pages
813 * should be using vmem_alloc() in the linux kernel
814 */
815 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
816 #else
817 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
818 #endif
819 if (h->hash_table == NULL) {
820 /* XXX - we should really return an error instead of assert */
821 ASSERT(hsize > (1ULL << 10));
822 hsize >>= 1;
823 goto retry;
824 }
825
826 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
827 sizeof (dmu_buf_impl_t),
828 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
829
830 for (i = 0; i < DBUF_MUTEXES; i++)
831 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
832
833 dbuf_stats_init(h);
834
835 /*
836 * Setup the parameters for the dbuf caches. We set the sizes of the
837 * dbuf cache and the metadata cache to 1/32nd and 1/16th (default)
838 * of the target size of the ARC. If the values has been specified as
839 * a module option and they're not greater than the target size of the
840 * ARC, then we honor that value.
841 */
842 if (dbuf_cache_max_bytes == 0 ||
843 dbuf_cache_max_bytes >= arc_target_bytes()) {
844 dbuf_cache_max_bytes = arc_target_bytes() >> dbuf_cache_shift;
845 }
846 if (dbuf_metadata_cache_max_bytes == 0 ||
847 dbuf_metadata_cache_max_bytes >= arc_target_bytes()) {
848 dbuf_metadata_cache_max_bytes =
849 arc_target_bytes() >> dbuf_metadata_cache_shift;
850 }
851
852 /*
853 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
854 * configuration is not required.
855 */
856 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
857
858 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
859 dbuf_caches[dcs].cache =
860 multilist_create(sizeof (dmu_buf_impl_t),
861 offsetof(dmu_buf_impl_t, db_cache_link),
862 dbuf_cache_multilist_index_func);
863 zfs_refcount_create(&dbuf_caches[dcs].size);
864 }
865
866 dbuf_evict_thread_exit = B_FALSE;
867 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
868 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
869 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
870 NULL, 0, &p0, TS_RUN, minclsyspri);
871
872 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
873 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
874 KSTAT_FLAG_VIRTUAL);
875 if (dbuf_ksp != NULL) {
876 dbuf_ksp->ks_data = &dbuf_stats;
877 dbuf_ksp->ks_update = dbuf_kstat_update;
878 kstat_install(dbuf_ksp);
879
880 for (i = 0; i < DN_MAX_LEVELS; i++) {
881 snprintf(dbuf_stats.cache_levels[i].name,
882 KSTAT_STRLEN, "cache_level_%d", i);
883 dbuf_stats.cache_levels[i].data_type =
884 KSTAT_DATA_UINT64;
885 snprintf(dbuf_stats.cache_levels_bytes[i].name,
886 KSTAT_STRLEN, "cache_level_%d_bytes", i);
887 dbuf_stats.cache_levels_bytes[i].data_type =
888 KSTAT_DATA_UINT64;
889 }
890 }
891 }
892
893 void
894 dbuf_fini(void)
895 {
896 dbuf_hash_table_t *h = &dbuf_hash_table;
897 int i;
898
899 dbuf_stats_destroy();
900
901 for (i = 0; i < DBUF_MUTEXES; i++)
902 mutex_destroy(&h->hash_mutexes[i]);
903 #if defined(_KERNEL)
904 /*
905 * Large allocations which do not require contiguous pages
906 * should be using vmem_free() in the linux kernel
907 */
908 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
909 #else
910 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
911 #endif
912 kmem_cache_destroy(dbuf_kmem_cache);
913 taskq_destroy(dbu_evict_taskq);
914
915 mutex_enter(&dbuf_evict_lock);
916 dbuf_evict_thread_exit = B_TRUE;
917 while (dbuf_evict_thread_exit) {
918 cv_signal(&dbuf_evict_cv);
919 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
920 }
921 mutex_exit(&dbuf_evict_lock);
922
923 mutex_destroy(&dbuf_evict_lock);
924 cv_destroy(&dbuf_evict_cv);
925
926 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
927 zfs_refcount_destroy(&dbuf_caches[dcs].size);
928 multilist_destroy(dbuf_caches[dcs].cache);
929 }
930
931 if (dbuf_ksp != NULL) {
932 kstat_delete(dbuf_ksp);
933 dbuf_ksp = NULL;
934 }
935 }
936
937 /*
938 * Other stuff.
939 */
940
941 #ifdef ZFS_DEBUG
942 static void
943 dbuf_verify(dmu_buf_impl_t *db)
944 {
945 dnode_t *dn;
946 dbuf_dirty_record_t *dr;
947
948 ASSERT(MUTEX_HELD(&db->db_mtx));
949
950 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
951 return;
952
953 ASSERT(db->db_objset != NULL);
954 DB_DNODE_ENTER(db);
955 dn = DB_DNODE(db);
956 if (dn == NULL) {
957 ASSERT(db->db_parent == NULL);
958 ASSERT(db->db_blkptr == NULL);
959 } else {
960 ASSERT3U(db->db.db_object, ==, dn->dn_object);
961 ASSERT3P(db->db_objset, ==, dn->dn_objset);
962 ASSERT3U(db->db_level, <, dn->dn_nlevels);
963 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
964 db->db_blkid == DMU_SPILL_BLKID ||
965 !avl_is_empty(&dn->dn_dbufs));
966 }
967 if (db->db_blkid == DMU_BONUS_BLKID) {
968 ASSERT(dn != NULL);
969 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
970 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
971 } else if (db->db_blkid == DMU_SPILL_BLKID) {
972 ASSERT(dn != NULL);
973 ASSERT0(db->db.db_offset);
974 } else {
975 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
976 }
977
978 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
979 ASSERT(dr->dr_dbuf == db);
980
981 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
982 ASSERT(dr->dr_dbuf == db);
983
984 /*
985 * We can't assert that db_size matches dn_datablksz because it
986 * can be momentarily different when another thread is doing
987 * dnode_set_blksz().
988 */
989 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
990 dr = db->db_data_pending;
991 /*
992 * It should only be modified in syncing context, so
993 * make sure we only have one copy of the data.
994 */
995 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
996 }
997
998 /* verify db->db_blkptr */
999 if (db->db_blkptr) {
1000 if (db->db_parent == dn->dn_dbuf) {
1001 /* db is pointed to by the dnode */
1002 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1003 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1004 ASSERT(db->db_parent == NULL);
1005 else
1006 ASSERT(db->db_parent != NULL);
1007 if (db->db_blkid != DMU_SPILL_BLKID)
1008 ASSERT3P(db->db_blkptr, ==,
1009 &dn->dn_phys->dn_blkptr[db->db_blkid]);
1010 } else {
1011 /* db is pointed to by an indirect block */
1012 ASSERTV(int epb = db->db_parent->db.db_size >>
1013 SPA_BLKPTRSHIFT);
1014 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1015 ASSERT3U(db->db_parent->db.db_object, ==,
1016 db->db.db_object);
1017 /*
1018 * dnode_grow_indblksz() can make this fail if we don't
1019 * have the parent's rwlock. XXX indblksz no longer
1020 * grows. safe to do this now?
1021 */
1022 if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1023 ASSERT3P(db->db_blkptr, ==,
1024 ((blkptr_t *)db->db_parent->db.db_data +
1025 db->db_blkid % epb));
1026 }
1027 }
1028 }
1029 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1030 (db->db_buf == NULL || db->db_buf->b_data) &&
1031 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1032 db->db_state != DB_FILL && !dn->dn_free_txg) {
1033 /*
1034 * If the blkptr isn't set but they have nonzero data,
1035 * it had better be dirty, otherwise we'll lose that
1036 * data when we evict this buffer.
1037 *
1038 * There is an exception to this rule for indirect blocks; in
1039 * this case, if the indirect block is a hole, we fill in a few
1040 * fields on each of the child blocks (importantly, birth time)
1041 * to prevent hole birth times from being lost when you
1042 * partially fill in a hole.
1043 */
1044 if (db->db_dirtycnt == 0) {
1045 if (db->db_level == 0) {
1046 uint64_t *buf = db->db.db_data;
1047 int i;
1048
1049 for (i = 0; i < db->db.db_size >> 3; i++) {
1050 ASSERT(buf[i] == 0);
1051 }
1052 } else {
1053 blkptr_t *bps = db->db.db_data;
1054 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1055 db->db.db_size);
1056 /*
1057 * We want to verify that all the blkptrs in the
1058 * indirect block are holes, but we may have
1059 * automatically set up a few fields for them.
1060 * We iterate through each blkptr and verify
1061 * they only have those fields set.
1062 */
1063 for (int i = 0;
1064 i < db->db.db_size / sizeof (blkptr_t);
1065 i++) {
1066 blkptr_t *bp = &bps[i];
1067 ASSERT(ZIO_CHECKSUM_IS_ZERO(
1068 &bp->blk_cksum));
1069 ASSERT(
1070 DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1071 DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1072 DVA_IS_EMPTY(&bp->blk_dva[2]));
1073 ASSERT0(bp->blk_fill);
1074 ASSERT0(bp->blk_pad[0]);
1075 ASSERT0(bp->blk_pad[1]);
1076 ASSERT(!BP_IS_EMBEDDED(bp));
1077 ASSERT(BP_IS_HOLE(bp));
1078 ASSERT0(bp->blk_phys_birth);
1079 }
1080 }
1081 }
1082 }
1083 DB_DNODE_EXIT(db);
1084 }
1085 #endif
1086
1087 static void
1088 dbuf_clear_data(dmu_buf_impl_t *db)
1089 {
1090 ASSERT(MUTEX_HELD(&db->db_mtx));
1091 dbuf_evict_user(db);
1092 ASSERT3P(db->db_buf, ==, NULL);
1093 db->db.db_data = NULL;
1094 if (db->db_state != DB_NOFILL)
1095 db->db_state = DB_UNCACHED;
1096 }
1097
1098 static void
1099 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1100 {
1101 ASSERT(MUTEX_HELD(&db->db_mtx));
1102 ASSERT(buf != NULL);
1103
1104 db->db_buf = buf;
1105 ASSERT(buf->b_data != NULL);
1106 db->db.db_data = buf->b_data;
1107 }
1108
1109 /*
1110 * Loan out an arc_buf for read. Return the loaned arc_buf.
1111 */
1112 arc_buf_t *
1113 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1114 {
1115 arc_buf_t *abuf;
1116
1117 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1118 mutex_enter(&db->db_mtx);
1119 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1120 int blksz = db->db.db_size;
1121 spa_t *spa = db->db_objset->os_spa;
1122
1123 mutex_exit(&db->db_mtx);
1124 abuf = arc_loan_buf(spa, B_FALSE, blksz);
1125 bcopy(db->db.db_data, abuf->b_data, blksz);
1126 } else {
1127 abuf = db->db_buf;
1128 arc_loan_inuse_buf(abuf, db);
1129 db->db_buf = NULL;
1130 dbuf_clear_data(db);
1131 mutex_exit(&db->db_mtx);
1132 }
1133 return (abuf);
1134 }
1135
1136 /*
1137 * Calculate which level n block references the data at the level 0 offset
1138 * provided.
1139 */
1140 uint64_t
1141 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1142 {
1143 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1144 /*
1145 * The level n blkid is equal to the level 0 blkid divided by
1146 * the number of level 0s in a level n block.
1147 *
1148 * The level 0 blkid is offset >> datablkshift =
1149 * offset / 2^datablkshift.
1150 *
1151 * The number of level 0s in a level n is the number of block
1152 * pointers in an indirect block, raised to the power of level.
1153 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1154 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1155 *
1156 * Thus, the level n blkid is: offset /
1157 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1158 * = offset / 2^(datablkshift + level *
1159 * (indblkshift - SPA_BLKPTRSHIFT))
1160 * = offset >> (datablkshift + level *
1161 * (indblkshift - SPA_BLKPTRSHIFT))
1162 */
1163
1164 const unsigned exp = dn->dn_datablkshift +
1165 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1166
1167 if (exp >= 8 * sizeof (offset)) {
1168 /* This only happens on the highest indirection level */
1169 ASSERT3U(level, ==, dn->dn_nlevels - 1);
1170 return (0);
1171 }
1172
1173 ASSERT3U(exp, <, 8 * sizeof (offset));
1174
1175 return (offset >> exp);
1176 } else {
1177 ASSERT3U(offset, <, dn->dn_datablksz);
1178 return (0);
1179 }
1180 }
1181
1182 /*
1183 * This function is used to lock the parent of the provided dbuf. This should be
1184 * used when modifying or reading db_blkptr.
1185 */
1186 db_lock_type_t
1187 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, void *tag)
1188 {
1189 enum db_lock_type ret = DLT_NONE;
1190 if (db->db_parent != NULL) {
1191 rw_enter(&db->db_parent->db_rwlock, rw);
1192 ret = DLT_PARENT;
1193 } else if (dmu_objset_ds(db->db_objset) != NULL) {
1194 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1195 tag);
1196 ret = DLT_OBJSET;
1197 }
1198 /*
1199 * We only return a DLT_NONE lock when it's the top-most indirect block
1200 * of the meta-dnode of the MOS.
1201 */
1202 return (ret);
1203 }
1204
1205 /*
1206 * We need to pass the lock type in because it's possible that the block will
1207 * move from being the topmost indirect block in a dnode (and thus, have no
1208 * parent) to not the top-most via an indirection increase. This would cause a
1209 * panic if we didn't pass the lock type in.
1210 */
1211 void
1212 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, void *tag)
1213 {
1214 if (type == DLT_PARENT)
1215 rw_exit(&db->db_parent->db_rwlock);
1216 else if (type == DLT_OBJSET)
1217 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1218 }
1219
1220 static void
1221 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1222 arc_buf_t *buf, void *vdb)
1223 {
1224 dmu_buf_impl_t *db = vdb;
1225
1226 mutex_enter(&db->db_mtx);
1227 ASSERT3U(db->db_state, ==, DB_READ);
1228 /*
1229 * All reads are synchronous, so we must have a hold on the dbuf
1230 */
1231 ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1232 ASSERT(db->db_buf == NULL);
1233 ASSERT(db->db.db_data == NULL);
1234 if (buf == NULL) {
1235 /* i/o error */
1236 ASSERT(zio == NULL || zio->io_error != 0);
1237 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1238 ASSERT3P(db->db_buf, ==, NULL);
1239 db->db_state = DB_UNCACHED;
1240 } else if (db->db_level == 0 && db->db_freed_in_flight) {
1241 /* freed in flight */
1242 ASSERT(zio == NULL || zio->io_error == 0);
1243 arc_release(buf, db);
1244 bzero(buf->b_data, db->db.db_size);
1245 arc_buf_freeze(buf);
1246 db->db_freed_in_flight = FALSE;
1247 dbuf_set_data(db, buf);
1248 db->db_state = DB_CACHED;
1249 } else {
1250 /* success */
1251 ASSERT(zio == NULL || zio->io_error == 0);
1252 dbuf_set_data(db, buf);
1253 db->db_state = DB_CACHED;
1254 }
1255 cv_broadcast(&db->db_changed);
1256 dbuf_rele_and_unlock(db, NULL, B_FALSE);
1257 }
1258
1259
1260 /*
1261 * This function ensures that, when doing a decrypting read of a block,
1262 * we make sure we have decrypted the dnode associated with it. We must do
1263 * this so that we ensure we are fully authenticating the checksum-of-MACs
1264 * tree from the root of the objset down to this block. Indirect blocks are
1265 * always verified against their secure checksum-of-MACs assuming that the
1266 * dnode containing them is correct. Now that we are doing a decrypting read,
1267 * we can be sure that the key is loaded and verify that assumption. This is
1268 * especially important considering that we always read encrypted dnode
1269 * blocks as raw data (without verifying their MACs) to start, and
1270 * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1271 */
1272 static int
1273 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1274 {
1275 int err = 0;
1276 objset_t *os = db->db_objset;
1277 arc_buf_t *dnode_abuf;
1278 dnode_t *dn;
1279 zbookmark_phys_t zb;
1280
1281 ASSERT(MUTEX_HELD(&db->db_mtx));
1282
1283 if (!os->os_encrypted || os->os_raw_receive ||
1284 (flags & DB_RF_NO_DECRYPT) != 0)
1285 return (0);
1286
1287 DB_DNODE_ENTER(db);
1288 dn = DB_DNODE(db);
1289 dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1290
1291 if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1292 DB_DNODE_EXIT(db);
1293 return (0);
1294 }
1295
1296 SET_BOOKMARK(&zb, dmu_objset_id(os),
1297 DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1298 err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1299
1300 /*
1301 * An error code of EACCES tells us that the key is still not
1302 * available. This is ok if we are only reading authenticated
1303 * (and therefore non-encrypted) blocks.
1304 */
1305 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1306 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1307 (db->db_blkid == DMU_BONUS_BLKID &&
1308 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1309 err = 0;
1310
1311 DB_DNODE_EXIT(db);
1312
1313 return (err);
1314 }
1315
1316 /*
1317 * Drops db_mtx and the parent lock specified by dblt and tag before
1318 * returning.
1319 */
1320 static int
1321 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
1322 db_lock_type_t dblt, void *tag)
1323 {
1324 dnode_t *dn;
1325 zbookmark_phys_t zb;
1326 uint32_t aflags = ARC_FLAG_NOWAIT;
1327 int err, zio_flags = 0;
1328
1329 DB_DNODE_ENTER(db);
1330 dn = DB_DNODE(db);
1331 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1332 ASSERT(MUTEX_HELD(&db->db_mtx));
1333 ASSERT(db->db_state == DB_UNCACHED);
1334 ASSERT(db->db_buf == NULL);
1335 ASSERT(db->db_parent == NULL ||
1336 RW_LOCK_HELD(&db->db_parent->db_rwlock));
1337
1338 if (db->db_blkid == DMU_BONUS_BLKID) {
1339 /*
1340 * The bonus length stored in the dnode may be less than
1341 * the maximum available space in the bonus buffer.
1342 */
1343 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1344 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1345
1346 /* if the underlying dnode block is encrypted, decrypt it */
1347 err = dbuf_read_verify_dnode_crypt(db, flags);
1348 if (err != 0) {
1349 DB_DNODE_EXIT(db);
1350 mutex_exit(&db->db_mtx);
1351 return (err);
1352 }
1353
1354 ASSERT3U(bonuslen, <=, db->db.db_size);
1355 db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1356 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1357 if (bonuslen < max_bonuslen)
1358 bzero(db->db.db_data, max_bonuslen);
1359 if (bonuslen)
1360 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
1361 DB_DNODE_EXIT(db);
1362 db->db_state = DB_CACHED;
1363 mutex_exit(&db->db_mtx);
1364 dmu_buf_unlock_parent(db, dblt, tag);
1365 return (0);
1366 }
1367
1368 /*
1369 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1370 * processes the delete record and clears the bp while we are waiting
1371 * for the dn_mtx (resulting in a "no" from block_freed).
1372 */
1373 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
1374 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
1375 BP_IS_HOLE(db->db_blkptr)))) {
1376 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1377
1378 dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type,
1379 db->db.db_size));
1380 bzero(db->db.db_data, db->db.db_size);
1381
1382 if (db->db_blkptr != NULL && db->db_level > 0 &&
1383 BP_IS_HOLE(db->db_blkptr) &&
1384 db->db_blkptr->blk_birth != 0) {
1385 blkptr_t *bps = db->db.db_data;
1386 for (int i = 0; i < ((1 <<
1387 DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t));
1388 i++) {
1389 blkptr_t *bp = &bps[i];
1390 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
1391 1 << dn->dn_indblkshift);
1392 BP_SET_LSIZE(bp,
1393 BP_GET_LEVEL(db->db_blkptr) == 1 ?
1394 dn->dn_datablksz :
1395 BP_GET_LSIZE(db->db_blkptr));
1396 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr));
1397 BP_SET_LEVEL(bp,
1398 BP_GET_LEVEL(db->db_blkptr) - 1);
1399 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0);
1400 }
1401 }
1402 DB_DNODE_EXIT(db);
1403 db->db_state = DB_CACHED;
1404 mutex_exit(&db->db_mtx);
1405 dmu_buf_unlock_parent(db, dblt, tag);
1406 return (0);
1407 }
1408
1409 /*
1410 * Any attempt to read a redacted block should result in an error. This
1411 * will never happen under normal conditions, but can be useful for
1412 * debugging purposes.
1413 */
1414 if (BP_IS_REDACTED(db->db_blkptr)) {
1415 ASSERT(dsl_dataset_feature_is_active(
1416 db->db_objset->os_dsl_dataset,
1417 SPA_FEATURE_REDACTED_DATASETS));
1418 DB_DNODE_EXIT(db);
1419 mutex_exit(&db->db_mtx);
1420 return (SET_ERROR(EIO));
1421 }
1422
1423
1424 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1425 db->db.db_object, db->db_level, db->db_blkid);
1426
1427 /*
1428 * All bps of an encrypted os should have the encryption bit set.
1429 * If this is not true it indicates tampering and we report an error.
1430 */
1431 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) {
1432 spa_log_error(db->db_objset->os_spa, &zb);
1433 zfs_panic_recover("unencrypted block in encrypted "
1434 "object set %llu", dmu_objset_id(db->db_objset));
1435 DB_DNODE_EXIT(db);
1436 mutex_exit(&db->db_mtx);
1437 dmu_buf_unlock_parent(db, dblt, tag);
1438 return (SET_ERROR(EIO));
1439 }
1440
1441 err = dbuf_read_verify_dnode_crypt(db, flags);
1442 if (err != 0) {
1443 DB_DNODE_EXIT(db);
1444 dmu_buf_unlock_parent(db, dblt, tag);
1445 mutex_exit(&db->db_mtx);
1446 return (err);
1447 }
1448
1449 DB_DNODE_EXIT(db);
1450
1451 db->db_state = DB_READ;
1452 mutex_exit(&db->db_mtx);
1453
1454 if (DBUF_IS_L2CACHEABLE(db))
1455 aflags |= ARC_FLAG_L2CACHE;
1456
1457 dbuf_add_ref(db, NULL);
1458
1459 zio_flags = (flags & DB_RF_CANFAIL) ?
1460 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1461
1462 if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1463 zio_flags |= ZIO_FLAG_RAW;
1464 /*
1465 * The zio layer will copy the provided blkptr later, but we need to
1466 * do this now so that we can release the parent's rwlock. We have to
1467 * do that now so that if dbuf_read_done is called synchronously (on
1468 * an l1 cache hit) we don't acquire the db_mtx while holding the
1469 * parent's rwlock, which would be a lock ordering violation.
1470 */
1471 blkptr_t bp = *db->db_blkptr;
1472 dmu_buf_unlock_parent(db, dblt, tag);
1473 (void) arc_read(zio, db->db_objset->os_spa, &bp,
1474 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1475 &aflags, &zb);
1476 return (err);
1477 }
1478
1479 /*
1480 * This is our just-in-time copy function. It makes a copy of buffers that
1481 * have been modified in a previous transaction group before we access them in
1482 * the current active group.
1483 *
1484 * This function is used in three places: when we are dirtying a buffer for the
1485 * first time in a txg, when we are freeing a range in a dnode that includes
1486 * this buffer, and when we are accessing a buffer which was received compressed
1487 * and later referenced in a WRITE_BYREF record.
1488 *
1489 * Note that when we are called from dbuf_free_range() we do not put a hold on
1490 * the buffer, we just traverse the active dbuf list for the dnode.
1491 */
1492 static void
1493 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1494 {
1495 dbuf_dirty_record_t *dr = db->db_last_dirty;
1496
1497 ASSERT(MUTEX_HELD(&db->db_mtx));
1498 ASSERT(db->db.db_data != NULL);
1499 ASSERT(db->db_level == 0);
1500 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1501
1502 if (dr == NULL ||
1503 (dr->dt.dl.dr_data !=
1504 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1505 return;
1506
1507 /*
1508 * If the last dirty record for this dbuf has not yet synced
1509 * and its referencing the dbuf data, either:
1510 * reset the reference to point to a new copy,
1511 * or (if there a no active holders)
1512 * just null out the current db_data pointer.
1513 */
1514 ASSERT3U(dr->dr_txg, >=, txg - 2);
1515 if (db->db_blkid == DMU_BONUS_BLKID) {
1516 dnode_t *dn = DB_DNODE(db);
1517 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1518 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1519 arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1520 bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
1521 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1522 dnode_t *dn = DB_DNODE(db);
1523 int size = arc_buf_size(db->db_buf);
1524 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1525 spa_t *spa = db->db_objset->os_spa;
1526 enum zio_compress compress_type =
1527 arc_get_compression(db->db_buf);
1528
1529 if (arc_is_encrypted(db->db_buf)) {
1530 boolean_t byteorder;
1531 uint8_t salt[ZIO_DATA_SALT_LEN];
1532 uint8_t iv[ZIO_DATA_IV_LEN];
1533 uint8_t mac[ZIO_DATA_MAC_LEN];
1534
1535 arc_get_raw_params(db->db_buf, &byteorder, salt,
1536 iv, mac);
1537 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1538 dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1539 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1540 compress_type);
1541 } else if (compress_type != ZIO_COMPRESS_OFF) {
1542 ASSERT3U(type, ==, ARC_BUFC_DATA);
1543 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1544 size, arc_buf_lsize(db->db_buf), compress_type);
1545 } else {
1546 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1547 }
1548 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
1549 } else {
1550 db->db_buf = NULL;
1551 dbuf_clear_data(db);
1552 }
1553 }
1554
1555 int
1556 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1557 {
1558 int err = 0;
1559 boolean_t prefetch;
1560 dnode_t *dn;
1561
1562 /*
1563 * We don't have to hold the mutex to check db_state because it
1564 * can't be freed while we have a hold on the buffer.
1565 */
1566 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1567
1568 if (db->db_state == DB_NOFILL)
1569 return (SET_ERROR(EIO));
1570
1571 DB_DNODE_ENTER(db);
1572 dn = DB_DNODE(db);
1573
1574 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1575 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
1576 DBUF_IS_CACHEABLE(db);
1577
1578 mutex_enter(&db->db_mtx);
1579 if (db->db_state == DB_CACHED) {
1580 spa_t *spa = dn->dn_objset->os_spa;
1581
1582 /*
1583 * Ensure that this block's dnode has been decrypted if
1584 * the caller has requested decrypted data.
1585 */
1586 err = dbuf_read_verify_dnode_crypt(db, flags);
1587
1588 /*
1589 * If the arc buf is compressed or encrypted and the caller
1590 * requested uncompressed data, we need to untransform it
1591 * before returning. We also call arc_untransform() on any
1592 * unauthenticated blocks, which will verify their MAC if
1593 * the key is now available.
1594 */
1595 if (err == 0 && db->db_buf != NULL &&
1596 (flags & DB_RF_NO_DECRYPT) == 0 &&
1597 (arc_is_encrypted(db->db_buf) ||
1598 arc_is_unauthenticated(db->db_buf) ||
1599 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1600 zbookmark_phys_t zb;
1601
1602 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1603 db->db.db_object, db->db_level, db->db_blkid);
1604 dbuf_fix_old_data(db, spa_syncing_txg(spa));
1605 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1606 dbuf_set_data(db, db->db_buf);
1607 }
1608 mutex_exit(&db->db_mtx);
1609 if (err == 0 && prefetch) {
1610 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1611 flags & DB_RF_HAVESTRUCT);
1612 }
1613 DB_DNODE_EXIT(db);
1614 DBUF_STAT_BUMP(hash_hits);
1615 } else if (db->db_state == DB_UNCACHED) {
1616 spa_t *spa = dn->dn_objset->os_spa;
1617 boolean_t need_wait = B_FALSE;
1618
1619 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1620
1621 if (zio == NULL &&
1622 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
1623 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1624 need_wait = B_TRUE;
1625 }
1626 err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
1627 /*
1628 * dbuf_read_impl has dropped db_mtx and our parent's rwlock
1629 * for us
1630 */
1631 if (!err && prefetch) {
1632 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1633 flags & DB_RF_HAVESTRUCT);
1634 }
1635
1636 DB_DNODE_EXIT(db);
1637 DBUF_STAT_BUMP(hash_misses);
1638
1639 /*
1640 * If we created a zio_root we must execute it to avoid
1641 * leaking it, even if it isn't attached to any work due
1642 * to an error in dbuf_read_impl().
1643 */
1644 if (need_wait) {
1645 if (err == 0)
1646 err = zio_wait(zio);
1647 else
1648 VERIFY0(zio_wait(zio));
1649 }
1650 } else {
1651 /*
1652 * Another reader came in while the dbuf was in flight
1653 * between UNCACHED and CACHED. Either a writer will finish
1654 * writing the buffer (sending the dbuf to CACHED) or the
1655 * first reader's request will reach the read_done callback
1656 * and send the dbuf to CACHED. Otherwise, a failure
1657 * occurred and the dbuf went to UNCACHED.
1658 */
1659 mutex_exit(&db->db_mtx);
1660 if (prefetch) {
1661 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1662 flags & DB_RF_HAVESTRUCT);
1663 }
1664 DB_DNODE_EXIT(db);
1665 DBUF_STAT_BUMP(hash_misses);
1666
1667 /* Skip the wait per the caller's request. */
1668 mutex_enter(&db->db_mtx);
1669 if ((flags & DB_RF_NEVERWAIT) == 0) {
1670 while (db->db_state == DB_READ ||
1671 db->db_state == DB_FILL) {
1672 ASSERT(db->db_state == DB_READ ||
1673 (flags & DB_RF_HAVESTRUCT) == 0);
1674 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1675 db, zio_t *, zio);
1676 cv_wait(&db->db_changed, &db->db_mtx);
1677 }
1678 if (db->db_state == DB_UNCACHED)
1679 err = SET_ERROR(EIO);
1680 }
1681 mutex_exit(&db->db_mtx);
1682 }
1683
1684 return (err);
1685 }
1686
1687 static void
1688 dbuf_noread(dmu_buf_impl_t *db)
1689 {
1690 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1691 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1692 mutex_enter(&db->db_mtx);
1693 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1694 cv_wait(&db->db_changed, &db->db_mtx);
1695 if (db->db_state == DB_UNCACHED) {
1696 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1697 spa_t *spa = db->db_objset->os_spa;
1698
1699 ASSERT(db->db_buf == NULL);
1700 ASSERT(db->db.db_data == NULL);
1701 dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size));
1702 db->db_state = DB_FILL;
1703 } else if (db->db_state == DB_NOFILL) {
1704 dbuf_clear_data(db);
1705 } else {
1706 ASSERT3U(db->db_state, ==, DB_CACHED);
1707 }
1708 mutex_exit(&db->db_mtx);
1709 }
1710
1711 void
1712 dbuf_unoverride(dbuf_dirty_record_t *dr)
1713 {
1714 dmu_buf_impl_t *db = dr->dr_dbuf;
1715 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1716 uint64_t txg = dr->dr_txg;
1717
1718 ASSERT(MUTEX_HELD(&db->db_mtx));
1719 /*
1720 * This assert is valid because dmu_sync() expects to be called by
1721 * a zilog's get_data while holding a range lock. This call only
1722 * comes from dbuf_dirty() callers who must also hold a range lock.
1723 */
1724 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1725 ASSERT(db->db_level == 0);
1726
1727 if (db->db_blkid == DMU_BONUS_BLKID ||
1728 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1729 return;
1730
1731 ASSERT(db->db_data_pending != dr);
1732
1733 /* free this block */
1734 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1735 zio_free(db->db_objset->os_spa, txg, bp);
1736
1737 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1738 dr->dt.dl.dr_nopwrite = B_FALSE;
1739 dr->dt.dl.dr_has_raw_params = B_FALSE;
1740
1741 /*
1742 * Release the already-written buffer, so we leave it in
1743 * a consistent dirty state. Note that all callers are
1744 * modifying the buffer, so they will immediately do
1745 * another (redundant) arc_release(). Therefore, leave
1746 * the buf thawed to save the effort of freezing &
1747 * immediately re-thawing it.
1748 */
1749 arc_release(dr->dt.dl.dr_data, db);
1750 }
1751
1752 /*
1753 * Evict (if its unreferenced) or clear (if its referenced) any level-0
1754 * data blocks in the free range, so that any future readers will find
1755 * empty blocks.
1756 */
1757 void
1758 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1759 dmu_tx_t *tx)
1760 {
1761 dmu_buf_impl_t *db_search;
1762 dmu_buf_impl_t *db, *db_next;
1763 uint64_t txg = tx->tx_txg;
1764 avl_index_t where;
1765
1766 if (end_blkid > dn->dn_maxblkid &&
1767 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1768 end_blkid = dn->dn_maxblkid;
1769 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
1770
1771 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1772 db_search->db_level = 0;
1773 db_search->db_blkid = start_blkid;
1774 db_search->db_state = DB_SEARCH;
1775
1776 mutex_enter(&dn->dn_dbufs_mtx);
1777 db = avl_find(&dn->dn_dbufs, db_search, &where);
1778 ASSERT3P(db, ==, NULL);
1779
1780 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1781
1782 for (; db != NULL; db = db_next) {
1783 db_next = AVL_NEXT(&dn->dn_dbufs, db);
1784 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1785
1786 if (db->db_level != 0 || db->db_blkid > end_blkid) {
1787 break;
1788 }
1789 ASSERT3U(db->db_blkid, >=, start_blkid);
1790
1791 /* found a level 0 buffer in the range */
1792 mutex_enter(&db->db_mtx);
1793 if (dbuf_undirty(db, tx)) {
1794 /* mutex has been dropped and dbuf destroyed */
1795 continue;
1796 }
1797
1798 if (db->db_state == DB_UNCACHED ||
1799 db->db_state == DB_NOFILL ||
1800 db->db_state == DB_EVICTING) {
1801 ASSERT(db->db.db_data == NULL);
1802 mutex_exit(&db->db_mtx);
1803 continue;
1804 }
1805 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
1806 /* will be handled in dbuf_read_done or dbuf_rele */
1807 db->db_freed_in_flight = TRUE;
1808 mutex_exit(&db->db_mtx);
1809 continue;
1810 }
1811 if (zfs_refcount_count(&db->db_holds) == 0) {
1812 ASSERT(db->db_buf);
1813 dbuf_destroy(db);
1814 continue;
1815 }
1816 /* The dbuf is referenced */
1817
1818 if (db->db_last_dirty != NULL) {
1819 dbuf_dirty_record_t *dr = db->db_last_dirty;
1820
1821 if (dr->dr_txg == txg) {
1822 /*
1823 * This buffer is "in-use", re-adjust the file
1824 * size to reflect that this buffer may
1825 * contain new data when we sync.
1826 */
1827 if (db->db_blkid != DMU_SPILL_BLKID &&
1828 db->db_blkid > dn->dn_maxblkid)
1829 dn->dn_maxblkid = db->db_blkid;
1830 dbuf_unoverride(dr);
1831 } else {
1832 /*
1833 * This dbuf is not dirty in the open context.
1834 * Either uncache it (if its not referenced in
1835 * the open context) or reset its contents to
1836 * empty.
1837 */
1838 dbuf_fix_old_data(db, txg);
1839 }
1840 }
1841 /* clear the contents if its cached */
1842 if (db->db_state == DB_CACHED) {
1843 ASSERT(db->db.db_data != NULL);
1844 arc_release(db->db_buf, db);
1845 rw_enter(&db->db_rwlock, RW_WRITER);
1846 bzero(db->db.db_data, db->db.db_size);
1847 rw_exit(&db->db_rwlock);
1848 arc_buf_freeze(db->db_buf);
1849 }
1850
1851 mutex_exit(&db->db_mtx);
1852 }
1853
1854 kmem_free(db_search, sizeof (dmu_buf_impl_t));
1855 mutex_exit(&dn->dn_dbufs_mtx);
1856 }
1857
1858 void
1859 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
1860 {
1861 arc_buf_t *buf, *obuf;
1862 int osize = db->db.db_size;
1863 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1864 dnode_t *dn;
1865
1866 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1867
1868 DB_DNODE_ENTER(db);
1869 dn = DB_DNODE(db);
1870
1871 /*
1872 * XXX we should be doing a dbuf_read, checking the return
1873 * value and returning that up to our callers
1874 */
1875 dmu_buf_will_dirty(&db->db, tx);
1876
1877 /* create the data buffer for the new block */
1878 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
1879
1880 /* copy old block data to the new block */
1881 obuf = db->db_buf;
1882 bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
1883 /* zero the remainder */
1884 if (size > osize)
1885 bzero((uint8_t *)buf->b_data + osize, size - osize);
1886
1887 mutex_enter(&db->db_mtx);
1888 dbuf_set_data(db, buf);
1889 arc_buf_destroy(obuf, db);
1890 db->db.db_size = size;
1891
1892 if (db->db_level == 0) {
1893 db->db_last_dirty->dt.dl.dr_data = buf;
1894 }
1895 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1896 ASSERT3U(db->db_last_dirty->dr_accounted, ==, osize);
1897 db->db_last_dirty->dr_accounted = size;
1898 mutex_exit(&db->db_mtx);
1899
1900 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
1901 DB_DNODE_EXIT(db);
1902 }
1903
1904 void
1905 dbuf_release_bp(dmu_buf_impl_t *db)
1906 {
1907 ASSERTV(objset_t *os = db->db_objset);
1908
1909 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1910 ASSERT(arc_released(os->os_phys_buf) ||
1911 list_link_active(&os->os_dsl_dataset->ds_synced_link));
1912 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1913
1914 (void) arc_release(db->db_buf, db);
1915 }
1916
1917 /*
1918 * We already have a dirty record for this TXG, and we are being
1919 * dirtied again.
1920 */
1921 static void
1922 dbuf_redirty(dbuf_dirty_record_t *dr)
1923 {
1924 dmu_buf_impl_t *db = dr->dr_dbuf;
1925
1926 ASSERT(MUTEX_HELD(&db->db_mtx));
1927
1928 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1929 /*
1930 * If this buffer has already been written out,
1931 * we now need to reset its state.
1932 */
1933 dbuf_unoverride(dr);
1934 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1935 db->db_state != DB_NOFILL) {
1936 /* Already released on initial dirty, so just thaw. */
1937 ASSERT(arc_released(db->db_buf));
1938 arc_buf_thaw(db->db_buf);
1939 }
1940 }
1941 }
1942
1943 dbuf_dirty_record_t *
1944 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1945 {
1946 dnode_t *dn;
1947 objset_t *os;
1948 dbuf_dirty_record_t **drp, *dr;
1949 int txgoff = tx->tx_txg & TXG_MASK;
1950 boolean_t drop_struct_rwlock = B_FALSE;
1951
1952 ASSERT(tx->tx_txg != 0);
1953 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1954 DMU_TX_DIRTY_BUF(tx, db);
1955
1956 DB_DNODE_ENTER(db);
1957 dn = DB_DNODE(db);
1958 /*
1959 * Shouldn't dirty a regular buffer in syncing context. Private
1960 * objects may be dirtied in syncing context, but only if they
1961 * were already pre-dirtied in open context.
1962 */
1963 #ifdef DEBUG
1964 if (dn->dn_objset->os_dsl_dataset != NULL) {
1965 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
1966 RW_READER, FTAG);
1967 }
1968 ASSERT(!dmu_tx_is_syncing(tx) ||
1969 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1970 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1971 dn->dn_objset->os_dsl_dataset == NULL);
1972 if (dn->dn_objset->os_dsl_dataset != NULL)
1973 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
1974 #endif
1975 /*
1976 * We make this assert for private objects as well, but after we
1977 * check if we're already dirty. They are allowed to re-dirty
1978 * in syncing context.
1979 */
1980 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1981 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1982 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1983
1984 mutex_enter(&db->db_mtx);
1985 /*
1986 * XXX make this true for indirects too? The problem is that
1987 * transactions created with dmu_tx_create_assigned() from
1988 * syncing context don't bother holding ahead.
1989 */
1990 ASSERT(db->db_level != 0 ||
1991 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1992 db->db_state == DB_NOFILL);
1993
1994 mutex_enter(&dn->dn_mtx);
1995 /*
1996 * Don't set dirtyctx to SYNC if we're just modifying this as we
1997 * initialize the objset.
1998 */
1999 if (dn->dn_dirtyctx == DN_UNDIRTIED) {
2000 if (dn->dn_objset->os_dsl_dataset != NULL) {
2001 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2002 RW_READER, FTAG);
2003 }
2004 if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
2005 dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ?
2006 DN_DIRTY_SYNC : DN_DIRTY_OPEN);
2007 ASSERT(dn->dn_dirtyctx_firstset == NULL);
2008 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
2009 }
2010 if (dn->dn_objset->os_dsl_dataset != NULL) {
2011 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2012 FTAG);
2013 }
2014 }
2015
2016 if (tx->tx_txg > dn->dn_dirty_txg)
2017 dn->dn_dirty_txg = tx->tx_txg;
2018 mutex_exit(&dn->dn_mtx);
2019
2020 if (db->db_blkid == DMU_SPILL_BLKID)
2021 dn->dn_have_spill = B_TRUE;
2022
2023 /*
2024 * If this buffer is already dirty, we're done.
2025 */
2026 drp = &db->db_last_dirty;
2027 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
2028 db->db.db_object == DMU_META_DNODE_OBJECT);
2029 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
2030 drp = &dr->dr_next;
2031 if (dr && dr->dr_txg == tx->tx_txg) {
2032 DB_DNODE_EXIT(db);
2033
2034 dbuf_redirty(dr);
2035 mutex_exit(&db->db_mtx);
2036 return (dr);
2037 }
2038
2039 /*
2040 * Only valid if not already dirty.
2041 */
2042 ASSERT(dn->dn_object == 0 ||
2043 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2044 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2045
2046 ASSERT3U(dn->dn_nlevels, >, db->db_level);
2047
2048 /*
2049 * We should only be dirtying in syncing context if it's the
2050 * mos or we're initializing the os or it's a special object.
2051 * However, we are allowed to dirty in syncing context provided
2052 * we already dirtied it in open context. Hence we must make
2053 * this assertion only if we're not already dirty.
2054 */
2055 os = dn->dn_objset;
2056 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2057 #ifdef DEBUG
2058 if (dn->dn_objset->os_dsl_dataset != NULL)
2059 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2060 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2061 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2062 if (dn->dn_objset->os_dsl_dataset != NULL)
2063 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2064 #endif
2065 ASSERT(db->db.db_size != 0);
2066
2067 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2068
2069 if (db->db_blkid != DMU_BONUS_BLKID) {
2070 dmu_objset_willuse_space(os, db->db.db_size, tx);
2071 }
2072
2073 /*
2074 * If this buffer is dirty in an old transaction group we need
2075 * to make a copy of it so that the changes we make in this
2076 * transaction group won't leak out when we sync the older txg.
2077 */
2078 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2079 list_link_init(&dr->dr_dirty_node);
2080 if (db->db_level == 0) {
2081 void *data_old = db->db_buf;
2082
2083 if (db->db_state != DB_NOFILL) {
2084 if (db->db_blkid == DMU_BONUS_BLKID) {
2085 dbuf_fix_old_data(db, tx->tx_txg);
2086 data_old = db->db.db_data;
2087 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2088 /*
2089 * Release the data buffer from the cache so
2090 * that we can modify it without impacting
2091 * possible other users of this cached data
2092 * block. Note that indirect blocks and
2093 * private objects are not released until the
2094 * syncing state (since they are only modified
2095 * then).
2096 */
2097 arc_release(db->db_buf, db);
2098 dbuf_fix_old_data(db, tx->tx_txg);
2099 data_old = db->db_buf;
2100 }
2101 ASSERT(data_old != NULL);
2102 }
2103 dr->dt.dl.dr_data = data_old;
2104 } else {
2105 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2106 list_create(&dr->dt.di.dr_children,
2107 sizeof (dbuf_dirty_record_t),
2108 offsetof(dbuf_dirty_record_t, dr_dirty_node));
2109 }
2110 if (db->db_blkid != DMU_BONUS_BLKID)
2111 dr->dr_accounted = db->db.db_size;
2112 dr->dr_dbuf = db;
2113 dr->dr_txg = tx->tx_txg;
2114 dr->dr_next = *drp;
2115 *drp = dr;
2116
2117 /*
2118 * We could have been freed_in_flight between the dbuf_noread
2119 * and dbuf_dirty. We win, as though the dbuf_noread() had
2120 * happened after the free.
2121 */
2122 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2123 db->db_blkid != DMU_SPILL_BLKID) {
2124 mutex_enter(&dn->dn_mtx);
2125 if (dn->dn_free_ranges[txgoff] != NULL) {
2126 range_tree_clear(dn->dn_free_ranges[txgoff],
2127 db->db_blkid, 1);
2128 }
2129 mutex_exit(&dn->dn_mtx);
2130 db->db_freed_in_flight = FALSE;
2131 }
2132
2133 /*
2134 * This buffer is now part of this txg
2135 */
2136 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2137 db->db_dirtycnt += 1;
2138 ASSERT3U(db->db_dirtycnt, <=, 3);
2139
2140 mutex_exit(&db->db_mtx);
2141
2142 if (db->db_blkid == DMU_BONUS_BLKID ||
2143 db->db_blkid == DMU_SPILL_BLKID) {
2144 mutex_enter(&dn->dn_mtx);
2145 ASSERT(!list_link_active(&dr->dr_dirty_node));
2146 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2147 mutex_exit(&dn->dn_mtx);
2148 dnode_setdirty(dn, tx);
2149 DB_DNODE_EXIT(db);
2150 return (dr);
2151 }
2152
2153 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2154 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2155 drop_struct_rwlock = B_TRUE;
2156 }
2157
2158 /*
2159 * If we are overwriting a dedup BP, then unless it is snapshotted,
2160 * when we get to syncing context we will need to decrement its
2161 * refcount in the DDT. Prefetch the relevant DDT block so that
2162 * syncing context won't have to wait for the i/o.
2163 */
2164 if (db->db_blkptr != NULL) {
2165 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2166 ddt_prefetch(os->os_spa, db->db_blkptr);
2167 dmu_buf_unlock_parent(db, dblt, FTAG);
2168 }
2169
2170 /*
2171 * We need to hold the dn_struct_rwlock to make this assertion,
2172 * because it protects dn_phys / dn_next_nlevels from changing.
2173 */
2174 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2175 dn->dn_phys->dn_nlevels > db->db_level ||
2176 dn->dn_next_nlevels[txgoff] > db->db_level ||
2177 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2178 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2179
2180
2181 if (db->db_level == 0) {
2182 ASSERT(!db->db_objset->os_raw_receive ||
2183 dn->dn_maxblkid >= db->db_blkid);
2184 dnode_new_blkid(dn, db->db_blkid, tx,
2185 drop_struct_rwlock, B_FALSE);
2186 ASSERT(dn->dn_maxblkid >= db->db_blkid);
2187 }
2188
2189 if (db->db_level+1 < dn->dn_nlevels) {
2190 dmu_buf_impl_t *parent = db->db_parent;
2191 dbuf_dirty_record_t *di;
2192 int parent_held = FALSE;
2193
2194 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2195 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2196 parent = dbuf_hold_level(dn, db->db_level + 1,
2197 db->db_blkid >> epbs, FTAG);
2198 ASSERT(parent != NULL);
2199 parent_held = TRUE;
2200 }
2201 if (drop_struct_rwlock)
2202 rw_exit(&dn->dn_struct_rwlock);
2203 ASSERT3U(db->db_level + 1, ==, parent->db_level);
2204 di = dbuf_dirty(parent, tx);
2205 if (parent_held)
2206 dbuf_rele(parent, FTAG);
2207
2208 mutex_enter(&db->db_mtx);
2209 /*
2210 * Since we've dropped the mutex, it's possible that
2211 * dbuf_undirty() might have changed this out from under us.
2212 */
2213 if (db->db_last_dirty == dr ||
2214 dn->dn_object == DMU_META_DNODE_OBJECT) {
2215 mutex_enter(&di->dt.di.dr_mtx);
2216 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2217 ASSERT(!list_link_active(&dr->dr_dirty_node));
2218 list_insert_tail(&di->dt.di.dr_children, dr);
2219 mutex_exit(&di->dt.di.dr_mtx);
2220 dr->dr_parent = di;
2221 }
2222 mutex_exit(&db->db_mtx);
2223 } else {
2224 ASSERT(db->db_level + 1 == dn->dn_nlevels);
2225 ASSERT(db->db_blkid < dn->dn_nblkptr);
2226 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2227 mutex_enter(&dn->dn_mtx);
2228 ASSERT(!list_link_active(&dr->dr_dirty_node));
2229 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2230 mutex_exit(&dn->dn_mtx);
2231 if (drop_struct_rwlock)
2232 rw_exit(&dn->dn_struct_rwlock);
2233 }
2234
2235 dnode_setdirty(dn, tx);
2236 DB_DNODE_EXIT(db);
2237 return (dr);
2238 }
2239
2240 /*
2241 * Undirty a buffer in the transaction group referenced by the given
2242 * transaction. Return whether this evicted the dbuf.
2243 */
2244 static boolean_t
2245 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2246 {
2247 dnode_t *dn;
2248 uint64_t txg = tx->tx_txg;
2249 dbuf_dirty_record_t *dr, **drp;
2250
2251 ASSERT(txg != 0);
2252
2253 /*
2254 * Due to our use of dn_nlevels below, this can only be called
2255 * in open context, unless we are operating on the MOS.
2256 * From syncing context, dn_nlevels may be different from the
2257 * dn_nlevels used when dbuf was dirtied.
2258 */
2259 ASSERT(db->db_objset ==
2260 dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2261 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2262 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2263 ASSERT0(db->db_level);
2264 ASSERT(MUTEX_HELD(&db->db_mtx));
2265
2266 /*
2267 * If this buffer is not dirty, we're done.
2268 */
2269 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
2270 if (dr->dr_txg <= txg)
2271 break;
2272 if (dr == NULL || dr->dr_txg < txg)
2273 return (B_FALSE);
2274 ASSERT(dr->dr_txg == txg);
2275 ASSERT(dr->dr_dbuf == db);
2276
2277 DB_DNODE_ENTER(db);
2278 dn = DB_DNODE(db);
2279
2280 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2281
2282 ASSERT(db->db.db_size != 0);
2283
2284 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2285 dr->dr_accounted, txg);
2286
2287 *drp = dr->dr_next;
2288
2289 /*
2290 * Note that there are three places in dbuf_dirty()
2291 * where this dirty record may be put on a list.
2292 * Make sure to do a list_remove corresponding to
2293 * every one of those list_insert calls.
2294 */
2295 if (dr->dr_parent) {
2296 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2297 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2298 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2299 } else if (db->db_blkid == DMU_SPILL_BLKID ||
2300 db->db_level + 1 == dn->dn_nlevels) {
2301 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2302 mutex_enter(&dn->dn_mtx);
2303 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2304 mutex_exit(&dn->dn_mtx);
2305 }
2306 DB_DNODE_EXIT(db);
2307
2308 if (db->db_state != DB_NOFILL) {
2309 dbuf_unoverride(dr);
2310
2311 ASSERT(db->db_buf != NULL);
2312 ASSERT(dr->dt.dl.dr_data != NULL);
2313 if (dr->dt.dl.dr_data != db->db_buf)
2314 arc_buf_destroy(dr->dt.dl.dr_data, db);
2315 }
2316
2317 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2318
2319 ASSERT(db->db_dirtycnt > 0);
2320 db->db_dirtycnt -= 1;
2321
2322 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2323 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
2324 dbuf_destroy(db);
2325 return (B_TRUE);
2326 }
2327
2328 return (B_FALSE);
2329 }
2330
2331 static void
2332 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2333 {
2334 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2335
2336 ASSERT(tx->tx_txg != 0);
2337 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2338
2339 /*
2340 * Quick check for dirtiness. For already dirty blocks, this
2341 * reduces runtime of this function by >90%, and overall performance
2342 * by 50% for some workloads (e.g. file deletion with indirect blocks
2343 * cached).
2344 */
2345 mutex_enter(&db->db_mtx);
2346
2347 dbuf_dirty_record_t *dr;
2348 for (dr = db->db_last_dirty;
2349 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) {
2350 /*
2351 * It's possible that it is already dirty but not cached,
2352 * because there are some calls to dbuf_dirty() that don't
2353 * go through dmu_buf_will_dirty().
2354 */
2355 if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) {
2356 /* This dbuf is already dirty and cached. */
2357 dbuf_redirty(dr);
2358 mutex_exit(&db->db_mtx);
2359 return;
2360 }
2361 }
2362 mutex_exit(&db->db_mtx);
2363
2364 DB_DNODE_ENTER(db);
2365 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2366 flags |= DB_RF_HAVESTRUCT;
2367 DB_DNODE_EXIT(db);
2368 (void) dbuf_read(db, NULL, flags);
2369 (void) dbuf_dirty(db, tx);
2370 }
2371
2372 void
2373 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2374 {
2375 dmu_buf_will_dirty_impl(db_fake,
2376 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2377 }
2378
2379 boolean_t
2380 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2381 {
2382 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2383
2384 mutex_enter(&db->db_mtx);
2385 for (dbuf_dirty_record_t *dr = db->db_last_dirty;
2386 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) {
2387 if (dr->dr_txg == tx->tx_txg) {
2388 mutex_exit(&db->db_mtx);
2389 return (B_TRUE);
2390 }
2391 }
2392 mutex_exit(&db->db_mtx);
2393 return (B_FALSE);
2394 }
2395
2396 void
2397 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2398 {
2399 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2400
2401 db->db_state = DB_NOFILL;
2402
2403 dmu_buf_will_fill(db_fake, tx);
2404 }
2405
2406 void
2407 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2408 {
2409 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2410
2411 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2412 ASSERT(tx->tx_txg != 0);
2413 ASSERT(db->db_level == 0);
2414 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2415
2416 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2417 dmu_tx_private_ok(tx));
2418
2419 dbuf_noread(db);
2420 (void) dbuf_dirty(db, tx);
2421 }
2422
2423 /*
2424 * This function is effectively the same as dmu_buf_will_dirty(), but
2425 * indicates the caller expects raw encrypted data in the db, and provides
2426 * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2427 * blkptr_t when this dbuf is written. This is only used for blocks of
2428 * dnodes, during raw receive.
2429 */
2430 void
2431 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2432 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2433 {
2434 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2435 dbuf_dirty_record_t *dr;
2436
2437 /*
2438 * dr_has_raw_params is only processed for blocks of dnodes
2439 * (see dbuf_sync_dnode_leaf_crypt()).
2440 */
2441 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2442 ASSERT3U(db->db_level, ==, 0);
2443 ASSERT(db->db_objset->os_raw_receive);
2444
2445 dmu_buf_will_dirty_impl(db_fake,
2446 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2447
2448 dr = db->db_last_dirty;
2449 while (dr != NULL && dr->dr_txg > tx->tx_txg)
2450 dr = dr->dr_next;
2451
2452 ASSERT3P(dr, !=, NULL);
2453 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2454
2455 dr->dt.dl.dr_has_raw_params = B_TRUE;
2456 dr->dt.dl.dr_byteorder = byteorder;
2457 bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN);
2458 bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN);
2459 bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN);
2460 }
2461
2462 static void
2463 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2464 {
2465 struct dirty_leaf *dl;
2466
2467 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
2468 dl = &db->db_last_dirty->dt.dl;
2469 dl->dr_overridden_by = *bp;
2470 dl->dr_override_state = DR_OVERRIDDEN;
2471 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg;
2472 }
2473
2474 /* ARGSUSED */
2475 void
2476 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
2477 {
2478 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2479 mutex_enter(&db->db_mtx);
2480 DBUF_VERIFY(db);
2481
2482 if (db->db_state == DB_FILL) {
2483 if (db->db_level == 0 && db->db_freed_in_flight) {
2484 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2485 /* we were freed while filling */
2486 /* XXX dbuf_undirty? */
2487 bzero(db->db.db_data, db->db.db_size);
2488 db->db_freed_in_flight = FALSE;
2489 }
2490 db->db_state = DB_CACHED;
2491 cv_broadcast(&db->db_changed);
2492 }
2493 mutex_exit(&db->db_mtx);
2494 }
2495
2496 void
2497 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2498 bp_embedded_type_t etype, enum zio_compress comp,
2499 int uncompressed_size, int compressed_size, int byteorder,
2500 dmu_tx_t *tx)
2501 {
2502 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2503 struct dirty_leaf *dl;
2504 dmu_object_type_t type;
2505
2506 if (etype == BP_EMBEDDED_TYPE_DATA) {
2507 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2508 SPA_FEATURE_EMBEDDED_DATA));
2509 }
2510
2511 DB_DNODE_ENTER(db);
2512 type = DB_DNODE(db)->dn_type;
2513 DB_DNODE_EXIT(db);
2514
2515 ASSERT0(db->db_level);
2516 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2517
2518 dmu_buf_will_not_fill(dbuf, tx);
2519
2520 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
2521 dl = &db->db_last_dirty->dt.dl;
2522 encode_embedded_bp_compressed(&dl->dr_overridden_by,
2523 data, comp, uncompressed_size, compressed_size);
2524 BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2525 BP_SET_TYPE(&dl->dr_overridden_by, type);
2526 BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2527 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2528
2529 dl->dr_override_state = DR_OVERRIDDEN;
2530 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg;
2531 }
2532
2533 void
2534 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
2535 {
2536 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2537 dmu_object_type_t type;
2538 ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
2539 SPA_FEATURE_REDACTED_DATASETS));
2540
2541 DB_DNODE_ENTER(db);
2542 type = DB_DNODE(db)->dn_type;
2543 DB_DNODE_EXIT(db);
2544
2545 ASSERT0(db->db_level);
2546 dmu_buf_will_not_fill(dbuf, tx);
2547
2548 blkptr_t bp = { { { {0} } } };
2549 BP_SET_TYPE(&bp, type);
2550 BP_SET_LEVEL(&bp, 0);
2551 BP_SET_BIRTH(&bp, tx->tx_txg, 0);
2552 BP_SET_REDACTED(&bp);
2553 BPE_SET_LSIZE(&bp, dbuf->db_size);
2554
2555 dbuf_override_impl(db, &bp, tx);
2556 }
2557
2558 /*
2559 * Directly assign a provided arc buf to a given dbuf if it's not referenced
2560 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2561 */
2562 void
2563 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2564 {
2565 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2566 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2567 ASSERT(db->db_level == 0);
2568 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2569 ASSERT(buf != NULL);
2570 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2571 ASSERT(tx->tx_txg != 0);
2572
2573 arc_return_buf(buf, db);
2574 ASSERT(arc_released(buf));
2575
2576 mutex_enter(&db->db_mtx);
2577
2578 while (db->db_state == DB_READ || db->db_state == DB_FILL)
2579 cv_wait(&db->db_changed, &db->db_mtx);
2580
2581 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
2582
2583 if (db->db_state == DB_CACHED &&
2584 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2585 /*
2586 * In practice, we will never have a case where we have an
2587 * encrypted arc buffer while additional holds exist on the
2588 * dbuf. We don't handle this here so we simply assert that
2589 * fact instead.
2590 */
2591 ASSERT(!arc_is_encrypted(buf));
2592 mutex_exit(&db->db_mtx);
2593 (void) dbuf_dirty(db, tx);
2594 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
2595 arc_buf_destroy(buf, db);
2596 xuio_stat_wbuf_copied();
2597 return;
2598 }
2599
2600 xuio_stat_wbuf_nocopy();
2601 if (db->db_state == DB_CACHED) {
2602 dbuf_dirty_record_t *dr = db->db_last_dirty;
2603
2604 ASSERT(db->db_buf != NULL);
2605 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2606 ASSERT(dr->dt.dl.dr_data == db->db_buf);
2607
2608 if (!arc_released(db->db_buf)) {
2609 ASSERT(dr->dt.dl.dr_override_state ==
2610 DR_OVERRIDDEN);
2611 arc_release(db->db_buf, db);
2612 }
2613 dr->dt.dl.dr_data = buf;
2614 arc_buf_destroy(db->db_buf, db);
2615 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2616 arc_release(db->db_buf, db);
2617 arc_buf_destroy(db->db_buf, db);
2618 }
2619 db->db_buf = NULL;
2620 }
2621 ASSERT(db->db_buf == NULL);
2622 dbuf_set_data(db, buf);
2623 db->db_state = DB_FILL;
2624 mutex_exit(&db->db_mtx);
2625 (void) dbuf_dirty(db, tx);
2626 dmu_buf_fill_done(&db->db, tx);
2627 }
2628
2629 void
2630 dbuf_destroy(dmu_buf_impl_t *db)
2631 {
2632 dnode_t *dn;
2633 dmu_buf_impl_t *parent = db->db_parent;
2634 dmu_buf_impl_t *dndb;
2635
2636 ASSERT(MUTEX_HELD(&db->db_mtx));
2637 ASSERT(zfs_refcount_is_zero(&db->db_holds));
2638
2639 if (db->db_buf != NULL) {
2640 arc_buf_destroy(db->db_buf, db);
2641 db->db_buf = NULL;
2642 }
2643
2644 if (db->db_blkid == DMU_BONUS_BLKID) {
2645 int slots = DB_DNODE(db)->dn_num_slots;
2646 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
2647 if (db->db.db_data != NULL) {
2648 kmem_free(db->db.db_data, bonuslen);
2649 arc_space_return(bonuslen, ARC_SPACE_BONUS);
2650 db->db_state = DB_UNCACHED;
2651 }
2652 }
2653
2654 dbuf_clear_data(db);
2655
2656 if (multilist_link_active(&db->db_cache_link)) {
2657 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
2658 db->db_caching_status == DB_DBUF_METADATA_CACHE);
2659
2660 multilist_remove(dbuf_caches[db->db_caching_status].cache, db);
2661 (void) zfs_refcount_remove_many(
2662 &dbuf_caches[db->db_caching_status].size,
2663 db->db.db_size, db);
2664
2665 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
2666 DBUF_STAT_BUMPDOWN(metadata_cache_count);
2667 } else {
2668 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
2669 DBUF_STAT_BUMPDOWN(cache_count);
2670 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
2671 db->db.db_size);
2672 }
2673 db->db_caching_status = DB_NO_CACHE;
2674 }
2675
2676 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
2677 ASSERT(db->db_data_pending == NULL);
2678
2679 db->db_state = DB_EVICTING;
2680 db->db_blkptr = NULL;
2681
2682 /*
2683 * Now that db_state is DB_EVICTING, nobody else can find this via
2684 * the hash table. We can now drop db_mtx, which allows us to
2685 * acquire the dn_dbufs_mtx.
2686 */
2687 mutex_exit(&db->db_mtx);
2688
2689 DB_DNODE_ENTER(db);
2690 dn = DB_DNODE(db);
2691 dndb = dn->dn_dbuf;
2692 if (db->db_blkid != DMU_BONUS_BLKID) {
2693 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
2694 if (needlock)
2695 mutex_enter_nested(&dn->dn_dbufs_mtx,
2696 NESTED_SINGLE);
2697 avl_remove(&dn->dn_dbufs, db);
2698 atomic_dec_32(&dn->dn_dbufs_count);
2699 membar_producer();
2700 DB_DNODE_EXIT(db);
2701 if (needlock)
2702 mutex_exit(&dn->dn_dbufs_mtx);
2703 /*
2704 * Decrementing the dbuf count means that the hold corresponding
2705 * to the removed dbuf is no longer discounted in dnode_move(),
2706 * so the dnode cannot be moved until after we release the hold.
2707 * The membar_producer() ensures visibility of the decremented
2708 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
2709 * release any lock.
2710 */
2711 mutex_enter(&dn->dn_mtx);
2712 dnode_rele_and_unlock(dn, db, B_TRUE);
2713 db->db_dnode_handle = NULL;
2714
2715 dbuf_hash_remove(db);
2716 } else {
2717 DB_DNODE_EXIT(db);
2718 }
2719
2720 ASSERT(zfs_refcount_is_zero(&db->db_holds));
2721
2722 db->db_parent = NULL;
2723
2724 ASSERT(db->db_buf == NULL);
2725 ASSERT(db->db.db_data == NULL);
2726 ASSERT(db->db_hash_next == NULL);
2727 ASSERT(db->db_blkptr == NULL);
2728 ASSERT(db->db_data_pending == NULL);
2729 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
2730 ASSERT(!multilist_link_active(&db->db_cache_link));
2731
2732 kmem_cache_free(dbuf_kmem_cache, db);
2733 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
2734
2735 /*
2736 * If this dbuf is referenced from an indirect dbuf,
2737 * decrement the ref count on the indirect dbuf.
2738 */
2739 if (parent && parent != dndb) {
2740 mutex_enter(&parent->db_mtx);
2741 dbuf_rele_and_unlock(parent, db, B_TRUE);
2742 }
2743 }
2744
2745 /*
2746 * Note: While bpp will always be updated if the function returns success,
2747 * parentp will not be updated if the dnode does not have dn_dbuf filled in;
2748 * this happens when the dnode is the meta-dnode, or {user|group|project}used
2749 * object.
2750 */
2751 __attribute__((always_inline))
2752 static inline int
2753 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
2754 dmu_buf_impl_t **parentp, blkptr_t **bpp)
2755 {
2756 *parentp = NULL;
2757 *bpp = NULL;
2758
2759 ASSERT(blkid != DMU_BONUS_BLKID);
2760
2761 if (blkid == DMU_SPILL_BLKID) {
2762 mutex_enter(&dn->dn_mtx);
2763 if (dn->dn_have_spill &&
2764 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
2765 *bpp = DN_SPILL_BLKPTR(dn->dn_phys);
2766 else
2767 *bpp = NULL;
2768 dbuf_add_ref(dn->dn_dbuf, NULL);
2769 *parentp = dn->dn_dbuf;
2770 mutex_exit(&dn->dn_mtx);
2771 return (0);
2772 }
2773
2774 int nlevels =
2775 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
2776 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2777
2778 ASSERT3U(level * epbs, <, 64);
2779 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2780 /*
2781 * This assertion shouldn't trip as long as the max indirect block size
2782 * is less than 1M. The reason for this is that up to that point,
2783 * the number of levels required to address an entire object with blocks
2784 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
2785 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
2786 * (i.e. we can address the entire object), objects will all use at most
2787 * N-1 levels and the assertion won't overflow. However, once epbs is
2788 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
2789 * enough to address an entire object, so objects will have 5 levels,
2790 * but then this assertion will overflow.
2791 *
2792 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
2793 * need to redo this logic to handle overflows.
2794 */
2795 ASSERT(level >= nlevels ||
2796 ((nlevels - level - 1) * epbs) +
2797 highbit64(dn->dn_phys->dn_nblkptr) <= 64);
2798 if (level >= nlevels ||
2799 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
2800 ((nlevels - level - 1) * epbs)) ||
2801 (fail_sparse &&
2802 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
2803 /* the buffer has no parent yet */
2804 return (SET_ERROR(ENOENT));
2805 } else if (level < nlevels-1) {
2806 /* this block is referenced from an indirect block */
2807 int err;
2808 dbuf_hold_arg_t *dh = dbuf_hold_arg_create(dn, level + 1,
2809 blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
2810 err = dbuf_hold_impl_arg(dh);
2811 dbuf_hold_arg_destroy(dh);
2812 if (err)
2813 return (err);
2814 err = dbuf_read(*parentp, NULL,
2815 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2816 if (err) {
2817 dbuf_rele(*parentp, NULL);
2818 *parentp = NULL;
2819 return (err);
2820 }
2821 rw_enter(&(*parentp)->db_rwlock, RW_READER);
2822 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
2823 (blkid & ((1ULL << epbs) - 1));
2824 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
2825 ASSERT(BP_IS_HOLE(*bpp));
2826 rw_exit(&(*parentp)->db_rwlock);
2827 return (0);
2828 } else {
2829 /* the block is referenced from the dnode */
2830 ASSERT3U(level, ==, nlevels-1);
2831 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
2832 blkid < dn->dn_phys->dn_nblkptr);
2833 if (dn->dn_dbuf) {
2834 dbuf_add_ref(dn->dn_dbuf, NULL);
2835 *parentp = dn->dn_dbuf;
2836 }
2837 *bpp = &dn->dn_phys->dn_blkptr[blkid];
2838 return (0);
2839 }
2840 }
2841
2842 static dmu_buf_impl_t *
2843 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
2844 dmu_buf_impl_t *parent, blkptr_t *blkptr)
2845 {
2846 objset_t *os = dn->dn_objset;
2847 dmu_buf_impl_t *db, *odb;
2848
2849 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2850 ASSERT(dn->dn_type != DMU_OT_NONE);
2851
2852 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
2853
2854 db->db_objset = os;
2855 db->db.db_object = dn->dn_object;
2856 db->db_level = level;
2857 db->db_blkid = blkid;
2858 db->db_last_dirty = NULL;
2859 db->db_dirtycnt = 0;
2860 db->db_dnode_handle = dn->dn_handle;
2861 db->db_parent = parent;
2862 db->db_blkptr = blkptr;
2863
2864 db->db_user = NULL;
2865 db->db_user_immediate_evict = FALSE;
2866 db->db_freed_in_flight = FALSE;
2867 db->db_pending_evict = FALSE;
2868
2869 if (blkid == DMU_BONUS_BLKID) {
2870 ASSERT3P(parent, ==, dn->dn_dbuf);
2871 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
2872 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
2873 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
2874 db->db.db_offset = DMU_BONUS_BLKID;
2875 db->db_state = DB_UNCACHED;
2876 db->db_caching_status = DB_NO_CACHE;
2877 /* the bonus dbuf is not placed in the hash table */
2878 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
2879 return (db);
2880 } else if (blkid == DMU_SPILL_BLKID) {
2881 db->db.db_size = (blkptr != NULL) ?
2882 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
2883 db->db.db_offset = 0;
2884 } else {
2885 int blocksize =
2886 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
2887 db->db.db_size = blocksize;
2888 db->db.db_offset = db->db_blkid * blocksize;
2889 }
2890
2891 /*
2892 * Hold the dn_dbufs_mtx while we get the new dbuf
2893 * in the hash table *and* added to the dbufs list.
2894 * This prevents a possible deadlock with someone
2895 * trying to look up this dbuf before it's added to the
2896 * dn_dbufs list.
2897 */
2898 mutex_enter(&dn->dn_dbufs_mtx);
2899 db->db_state = DB_EVICTING;
2900 if ((odb = dbuf_hash_insert(db)) != NULL) {
2901 /* someone else inserted it first */
2902 kmem_cache_free(dbuf_kmem_cache, db);
2903 mutex_exit(&dn->dn_dbufs_mtx);
2904 DBUF_STAT_BUMP(hash_insert_race);
2905 return (odb);
2906 }
2907 avl_add(&dn->dn_dbufs, db);
2908
2909 db->db_state = DB_UNCACHED;
2910 db->db_caching_status = DB_NO_CACHE;
2911 mutex_exit(&dn->dn_dbufs_mtx);
2912 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
2913
2914 if (parent && parent != dn->dn_dbuf)
2915 dbuf_add_ref(parent, db);
2916
2917 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2918 zfs_refcount_count(&dn->dn_holds) > 0);
2919 (void) zfs_refcount_add(&dn->dn_holds, db);
2920 atomic_inc_32(&dn->dn_dbufs_count);
2921
2922 dprintf_dbuf(db, "db=%p\n", db);
2923
2924 return (db);
2925 }
2926
2927 /*
2928 * This function returns a block pointer and information about the object,
2929 * given a dnode and a block. This is a publicly accessible version of
2930 * dbuf_findbp that only returns some information, rather than the
2931 * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock
2932 * should be locked as (at least) a reader.
2933 */
2934 int
2935 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
2936 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
2937 {
2938 dmu_buf_impl_t *dbp = NULL;
2939 blkptr_t *bp2;
2940 int err = 0;
2941 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2942
2943 err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
2944 if (err == 0) {
2945 *bp = *bp2;
2946 if (dbp != NULL)
2947 dbuf_rele(dbp, NULL);
2948 if (datablkszsec != NULL)
2949 *datablkszsec = dn->dn_phys->dn_datablkszsec;
2950 if (indblkshift != NULL)
2951 *indblkshift = dn->dn_phys->dn_indblkshift;
2952 }
2953
2954 return (err);
2955 }
2956
2957 typedef struct dbuf_prefetch_arg {
2958 spa_t *dpa_spa; /* The spa to issue the prefetch in. */
2959 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
2960 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
2961 int dpa_curlevel; /* The current level that we're reading */
2962 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
2963 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
2964 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
2965 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
2966 } dbuf_prefetch_arg_t;
2967
2968 /*
2969 * Actually issue the prefetch read for the block given.
2970 */
2971 static void
2972 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
2973 {
2974 ASSERT(!BP_IS_REDACTED(bp) ||
2975 dsl_dataset_feature_is_active(
2976 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
2977 SPA_FEATURE_REDACTED_DATASETS));
2978
2979 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
2980 return;
2981
2982 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
2983 arc_flags_t aflags =
2984 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
2985
2986 /* dnodes are always read as raw and then converted later */
2987 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
2988 dpa->dpa_curlevel == 0)
2989 zio_flags |= ZIO_FLAG_RAW;
2990
2991 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
2992 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
2993 ASSERT(dpa->dpa_zio != NULL);
2994 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL,
2995 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
2996 }
2997
2998 /*
2999 * Called when an indirect block above our prefetch target is read in. This
3000 * will either read in the next indirect block down the tree or issue the actual
3001 * prefetch if the next block down is our target.
3002 */
3003 static void
3004 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3005 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3006 {
3007 dbuf_prefetch_arg_t *dpa = private;
3008
3009 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3010 ASSERT3S(dpa->dpa_curlevel, >, 0);
3011
3012 if (abuf == NULL) {
3013 ASSERT(zio == NULL || zio->io_error != 0);
3014 kmem_free(dpa, sizeof (*dpa));
3015 return;
3016 }
3017 ASSERT(zio == NULL || zio->io_error == 0);
3018
3019 /*
3020 * The dpa_dnode is only valid if we are called with a NULL
3021 * zio. This indicates that the arc_read() returned without
3022 * first calling zio_read() to issue a physical read. Once
3023 * a physical read is made the dpa_dnode must be invalidated
3024 * as the locks guarding it may have been dropped. If the
3025 * dpa_dnode is still valid, then we want to add it to the dbuf
3026 * cache. To do so, we must hold the dbuf associated with the block
3027 * we just prefetched, read its contents so that we associate it
3028 * with an arc_buf_t, and then release it.
3029 */
3030 if (zio != NULL) {
3031 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3032 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3033 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3034 } else {
3035 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3036 }
3037 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3038
3039 dpa->dpa_dnode = NULL;
3040 } else if (dpa->dpa_dnode != NULL) {
3041 uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3042 (dpa->dpa_epbs * (dpa->dpa_curlevel -
3043 dpa->dpa_zb.zb_level));
3044 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3045 dpa->dpa_curlevel, curblkid, FTAG);
3046 if (db == NULL) {
3047 kmem_free(dpa, sizeof (*dpa));
3048 arc_buf_destroy(abuf, private);
3049 return;
3050 }
3051
3052 (void) dbuf_read(db, NULL,
3053 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3054 dbuf_rele(db, FTAG);
3055 }
3056
3057 dpa->dpa_curlevel--;
3058 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3059 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3060 blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3061 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3062
3063 ASSERT(!BP_IS_REDACTED(bp) ||
3064 dsl_dataset_feature_is_active(
3065 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3066 SPA_FEATURE_REDACTED_DATASETS));
3067 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3068 kmem_free(dpa, sizeof (*dpa));
3069 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3070 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3071 dbuf_issue_final_prefetch(dpa, bp);
3072 kmem_free(dpa, sizeof (*dpa));
3073 } else {
3074 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3075 zbookmark_phys_t zb;
3076
3077 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3078 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3079 iter_aflags |= ARC_FLAG_L2CACHE;
3080
3081 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3082
3083 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3084 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3085
3086 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3087 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio,
3088 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3089 &iter_aflags, &zb);
3090 }
3091
3092 arc_buf_destroy(abuf, private);
3093 }
3094
3095 /*
3096 * Issue prefetch reads for the given block on the given level. If the indirect
3097 * blocks above that block are not in memory, we will read them in
3098 * asynchronously. As a result, this call never blocks waiting for a read to
3099 * complete. Note that the prefetch might fail if the dataset is encrypted and
3100 * the encryption key is unmapped before the IO completes.
3101 */
3102 void
3103 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3104 arc_flags_t aflags)
3105 {
3106 blkptr_t bp;
3107 int epbs, nlevels, curlevel;
3108 uint64_t curblkid;
3109
3110 ASSERT(blkid != DMU_BONUS_BLKID);
3111 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3112
3113 if (blkid > dn->dn_maxblkid)
3114 return;
3115
3116 if (level == 0 && dnode_block_freed(dn, blkid))
3117 return;
3118
3119 /*
3120 * This dnode hasn't been written to disk yet, so there's nothing to
3121 * prefetch.
3122 */
3123 nlevels = dn->dn_phys->dn_nlevels;
3124 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3125 return;
3126
3127 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3128 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3129 return;
3130
3131 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3132 level, blkid);
3133 if (db != NULL) {
3134 mutex_exit(&db->db_mtx);
3135 /*
3136 * This dbuf already exists. It is either CACHED, or
3137 * (we assume) about to be read or filled.
3138 */
3139 return;
3140 }
3141
3142 /*
3143 * Find the closest ancestor (indirect block) of the target block
3144 * that is present in the cache. In this indirect block, we will
3145 * find the bp that is at curlevel, curblkid.
3146 */
3147 curlevel = level;
3148 curblkid = blkid;
3149 while (curlevel < nlevels - 1) {
3150 int parent_level = curlevel + 1;
3151 uint64_t parent_blkid = curblkid >> epbs;
3152 dmu_buf_impl_t *db;
3153
3154 if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3155 FALSE, TRUE, FTAG, &db) == 0) {
3156 blkptr_t *bpp = db->db_buf->b_data;
3157 bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3158 dbuf_rele(db, FTAG);
3159 break;
3160 }
3161
3162 curlevel = parent_level;
3163 curblkid = parent_blkid;
3164 }
3165
3166 if (curlevel == nlevels - 1) {
3167 /* No cached indirect blocks found. */
3168 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3169 bp = dn->dn_phys->dn_blkptr[curblkid];
3170 }
3171 ASSERT(!BP_IS_REDACTED(&bp) ||
3172 dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3173 SPA_FEATURE_REDACTED_DATASETS));
3174 if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3175 return;
3176
3177 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3178
3179 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3180 ZIO_FLAG_CANFAIL);
3181
3182 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3183 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3184 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3185 dn->dn_object, level, blkid);
3186 dpa->dpa_curlevel = curlevel;
3187 dpa->dpa_prio = prio;
3188 dpa->dpa_aflags = aflags;
3189 dpa->dpa_spa = dn->dn_objset->os_spa;
3190 dpa->dpa_dnode = dn;
3191 dpa->dpa_epbs = epbs;
3192 dpa->dpa_zio = pio;
3193
3194 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3195 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
3196 dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3197
3198 /*
3199 * If we have the indirect just above us, no need to do the asynchronous
3200 * prefetch chain; we'll just run the last step ourselves. If we're at
3201 * a higher level, though, we want to issue the prefetches for all the
3202 * indirect blocks asynchronously, so we can go on with whatever we were
3203 * doing.
3204 */
3205 if (curlevel == level) {
3206 ASSERT3U(curblkid, ==, blkid);
3207 dbuf_issue_final_prefetch(dpa, &bp);
3208 kmem_free(dpa, sizeof (*dpa));
3209 } else {
3210 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3211 zbookmark_phys_t zb;
3212
3213 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3214 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
3215 iter_aflags |= ARC_FLAG_L2CACHE;
3216
3217 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3218 dn->dn_object, curlevel, curblkid);
3219 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3220 &bp, dbuf_prefetch_indirect_done, dpa, prio,
3221 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3222 &iter_aflags, &zb);
3223 }
3224 /*
3225 * We use pio here instead of dpa_zio since it's possible that
3226 * dpa may have already been freed.
3227 */
3228 zio_nowait(pio);
3229 }
3230
3231 #define DBUF_HOLD_IMPL_MAX_DEPTH 20
3232
3233 /*
3234 * Helper function for dbuf_hold_impl_arg() to copy a buffer. Handles
3235 * the case of encrypted, compressed and uncompressed buffers by
3236 * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3237 * arc_alloc_compressed_buf() or arc_alloc_buf().*
3238 *
3239 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl_arg().
3240 */
3241 noinline static void
3242 dbuf_hold_copy(struct dbuf_hold_arg *dh)
3243 {
3244 dnode_t *dn = dh->dh_dn;
3245 dmu_buf_impl_t *db = dh->dh_db;
3246 dbuf_dirty_record_t *dr = dh->dh_dr;
3247 arc_buf_t *data = dr->dt.dl.dr_data;
3248
3249 enum zio_compress compress_type = arc_get_compression(data);
3250
3251 if (arc_is_encrypted(data)) {
3252 boolean_t byteorder;
3253 uint8_t salt[ZIO_DATA_SALT_LEN];
3254 uint8_t iv[ZIO_DATA_IV_LEN];
3255 uint8_t mac[ZIO_DATA_MAC_LEN];
3256
3257 arc_get_raw_params(data, &byteorder, salt, iv, mac);
3258 dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3259 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3260 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3261 compress_type));
3262 } else if (compress_type != ZIO_COMPRESS_OFF) {
3263 dbuf_set_data(db, arc_alloc_compressed_buf(
3264 dn->dn_objset->os_spa, db, arc_buf_size(data),
3265 arc_buf_lsize(data), compress_type));
3266 } else {
3267 dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3268 DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3269 }
3270
3271 rw_enter(&db->db_rwlock, RW_WRITER);
3272 bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
3273 rw_exit(&db->db_rwlock);
3274 }
3275
3276 /*
3277 * Returns with db_holds incremented, and db_mtx not held.
3278 * Note: dn_struct_rwlock must be held.
3279 */
3280 static int
3281 dbuf_hold_impl_arg(struct dbuf_hold_arg *dh)
3282 {
3283 dh->dh_parent = NULL;
3284
3285 ASSERT(dh->dh_blkid != DMU_BONUS_BLKID);
3286 ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock));
3287 ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level);
3288
3289 *(dh->dh_dbp) = NULL;
3290
3291 /* If the pool has been created, verify the tx_sync_lock is not held */
3292 spa_t *spa = dh->dh_dn->dn_objset->os_spa;
3293 dsl_pool_t *dp = spa->spa_dsl_pool;
3294 if (dp != NULL) {
3295 ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3296 }
3297
3298 /* dbuf_find() returns with db_mtx held */
3299 dh->dh_db = dbuf_find(dh->dh_dn->dn_objset, dh->dh_dn->dn_object,
3300 dh->dh_level, dh->dh_blkid);
3301
3302 if (dh->dh_db == NULL) {
3303 dh->dh_bp = NULL;
3304
3305 if (dh->dh_fail_uncached)
3306 return (SET_ERROR(ENOENT));
3307
3308 ASSERT3P(dh->dh_parent, ==, NULL);
3309 dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
3310 dh->dh_fail_sparse, &dh->dh_parent, &dh->dh_bp);
3311 if (dh->dh_fail_sparse) {
3312 if (dh->dh_err == 0 &&
3313 dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
3314 dh->dh_err = SET_ERROR(ENOENT);
3315 if (dh->dh_err) {
3316 if (dh->dh_parent)
3317 dbuf_rele(dh->dh_parent, NULL);
3318 return (dh->dh_err);
3319 }
3320 }
3321 if (dh->dh_err && dh->dh_err != ENOENT)
3322 return (dh->dh_err);
3323 dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid,
3324 dh->dh_parent, dh->dh_bp);
3325 }
3326
3327 if (dh->dh_fail_uncached && dh->dh_db->db_state != DB_CACHED) {
3328 mutex_exit(&dh->dh_db->db_mtx);
3329 return (SET_ERROR(ENOENT));
3330 }
3331
3332 if (dh->dh_db->db_buf != NULL) {
3333 arc_buf_access(dh->dh_db->db_buf);
3334 ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data);
3335 }
3336
3337 ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf));
3338
3339 /*
3340 * If this buffer is currently syncing out, and we are
3341 * still referencing it from db_data, we need to make a copy
3342 * of it in case we decide we want to dirty it again in this txg.
3343 */
3344 if (dh->dh_db->db_level == 0 &&
3345 dh->dh_db->db_blkid != DMU_BONUS_BLKID &&
3346 dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT &&
3347 dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) {
3348 dh->dh_dr = dh->dh_db->db_data_pending;
3349 if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf)
3350 dbuf_hold_copy(dh);
3351 }
3352
3353 if (multilist_link_active(&dh->dh_db->db_cache_link)) {
3354 ASSERT(zfs_refcount_is_zero(&dh->dh_db->db_holds));
3355 ASSERT(dh->dh_db->db_caching_status == DB_DBUF_CACHE ||
3356 dh->dh_db->db_caching_status == DB_DBUF_METADATA_CACHE);
3357
3358 multilist_remove(
3359 dbuf_caches[dh->dh_db->db_caching_status].cache,
3360 dh->dh_db);
3361 (void) zfs_refcount_remove_many(
3362 &dbuf_caches[dh->dh_db->db_caching_status].size,
3363 dh->dh_db->db.db_size, dh->dh_db);
3364
3365 if (dh->dh_db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3366 DBUF_STAT_BUMPDOWN(metadata_cache_count);
3367 } else {
3368 DBUF_STAT_BUMPDOWN(cache_levels[dh->dh_db->db_level]);
3369 DBUF_STAT_BUMPDOWN(cache_count);
3370 DBUF_STAT_DECR(cache_levels_bytes[dh->dh_db->db_level],
3371 dh->dh_db->db.db_size);
3372 }
3373 dh->dh_db->db_caching_status = DB_NO_CACHE;
3374 }
3375 (void) zfs_refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
3376 DBUF_VERIFY(dh->dh_db);
3377 mutex_exit(&dh->dh_db->db_mtx);
3378
3379 /* NOTE: we can't rele the parent until after we drop the db_mtx */
3380 if (dh->dh_parent)
3381 dbuf_rele(dh->dh_parent, NULL);
3382
3383 ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn);
3384 ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid);
3385 ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level);
3386 *(dh->dh_dbp) = dh->dh_db;
3387
3388 return (0);
3389 }
3390
3391 /*
3392 * dbuf_hold_impl_arg() is called recursively, via dbuf_findbp(). There can
3393 * be as many recursive calls as there are levels of on-disk indirect blocks,
3394 * but typically only 0-2 recursive calls. To minimize the stack frame size,
3395 * the recursive function's arguments and "local variables" are allocated on
3396 * the heap as the dbuf_hold_arg_t.
3397 */
3398 int
3399 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3400 boolean_t fail_sparse, boolean_t fail_uncached,
3401 void *tag, dmu_buf_impl_t **dbp)
3402 {
3403 dbuf_hold_arg_t *dh = dbuf_hold_arg_create(dn, level, blkid,
3404 fail_sparse, fail_uncached, tag, dbp);
3405
3406 int error = dbuf_hold_impl_arg(dh);
3407
3408 dbuf_hold_arg_destroy(dh);
3409
3410 return (error);
3411 }
3412
3413 static dbuf_hold_arg_t *
3414 dbuf_hold_arg_create(dnode_t *dn, uint8_t level, uint64_t blkid,
3415 boolean_t fail_sparse, boolean_t fail_uncached,
3416 void *tag, dmu_buf_impl_t **dbp)
3417 {
3418 dbuf_hold_arg_t *dh = kmem_alloc(sizeof (*dh), KM_SLEEP);
3419 dh->dh_dn = dn;
3420 dh->dh_level = level;
3421 dh->dh_blkid = blkid;
3422
3423 dh->dh_fail_sparse = fail_sparse;
3424 dh->dh_fail_uncached = fail_uncached;
3425
3426 dh->dh_tag = tag;
3427 dh->dh_dbp = dbp;
3428
3429 dh->dh_db = NULL;
3430 dh->dh_parent = NULL;
3431 dh->dh_bp = NULL;
3432 dh->dh_err = 0;
3433 dh->dh_dr = NULL;
3434
3435 return (dh);
3436 }
3437
3438 static void
3439 dbuf_hold_arg_destroy(dbuf_hold_arg_t *dh)
3440 {
3441 kmem_free(dh, sizeof (*dh));
3442 }
3443
3444 dmu_buf_impl_t *
3445 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
3446 {
3447 return (dbuf_hold_level(dn, 0, blkid, tag));
3448 }
3449
3450 dmu_buf_impl_t *
3451 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
3452 {
3453 dmu_buf_impl_t *db;
3454 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3455 return (err ? NULL : db);
3456 }
3457
3458 void
3459 dbuf_create_bonus(dnode_t *dn)
3460 {
3461 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3462
3463 ASSERT(dn->dn_bonus == NULL);
3464 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
3465 }
3466
3467 int
3468 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3469 {
3470 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3471
3472 if (db->db_blkid != DMU_SPILL_BLKID)
3473 return (SET_ERROR(ENOTSUP));
3474 if (blksz == 0)
3475 blksz = SPA_MINBLOCKSIZE;
3476 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3477 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3478
3479 dbuf_new_size(db, blksz, tx);
3480
3481 return (0);
3482 }
3483
3484 void
3485 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3486 {
3487 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3488 }
3489
3490 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3491 void
3492 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
3493 {
3494 int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3495 VERIFY3S(holds, >, 1);
3496 }
3497
3498 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3499 boolean_t
3500 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3501 void *tag)
3502 {
3503 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3504 dmu_buf_impl_t *found_db;
3505 boolean_t result = B_FALSE;
3506
3507 if (blkid == DMU_BONUS_BLKID)
3508 found_db = dbuf_find_bonus(os, obj);
3509 else
3510 found_db = dbuf_find(os, obj, 0, blkid);
3511
3512 if (found_db != NULL) {
3513 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3514 (void) zfs_refcount_add(&db->db_holds, tag);
3515 result = B_TRUE;
3516 }
3517 mutex_exit(&found_db->db_mtx);
3518 }
3519 return (result);
3520 }
3521
3522 /*
3523 * If you call dbuf_rele() you had better not be referencing the dnode handle
3524 * unless you have some other direct or indirect hold on the dnode. (An indirect
3525 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3526 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3527 * dnode's parent dbuf evicting its dnode handles.
3528 */
3529 void
3530 dbuf_rele(dmu_buf_impl_t *db, void *tag)
3531 {
3532 mutex_enter(&db->db_mtx);
3533 dbuf_rele_and_unlock(db, tag, B_FALSE);
3534 }
3535
3536 void
3537 dmu_buf_rele(dmu_buf_t *db, void *tag)
3538 {
3539 dbuf_rele((dmu_buf_impl_t *)db, tag);
3540 }
3541
3542 /*
3543 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
3544 * db_dirtycnt and db_holds to be updated atomically. The 'evicting'
3545 * argument should be set if we are already in the dbuf-evicting code
3546 * path, in which case we don't want to recursively evict. This allows us to
3547 * avoid deeply nested stacks that would have a call flow similar to this:
3548 *
3549 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3550 * ^ |
3551 * | |
3552 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+
3553 *
3554 */
3555 void
3556 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
3557 {
3558 int64_t holds;
3559
3560 ASSERT(MUTEX_HELD(&db->db_mtx));
3561 DBUF_VERIFY(db);
3562
3563 /*
3564 * Remove the reference to the dbuf before removing its hold on the
3565 * dnode so we can guarantee in dnode_move() that a referenced bonus
3566 * buffer has a corresponding dnode hold.
3567 */
3568 holds = zfs_refcount_remove(&db->db_holds, tag);
3569 ASSERT(holds >= 0);
3570
3571 /*
3572 * We can't freeze indirects if there is a possibility that they
3573 * may be modified in the current syncing context.
3574 */
3575 if (db->db_buf != NULL &&
3576 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3577 arc_buf_freeze(db->db_buf);
3578 }
3579
3580 if (holds == db->db_dirtycnt &&
3581 db->db_level == 0 && db->db_user_immediate_evict)
3582 dbuf_evict_user(db);
3583
3584 if (holds == 0) {
3585 if (db->db_blkid == DMU_BONUS_BLKID) {
3586 dnode_t *dn;
3587 boolean_t evict_dbuf = db->db_pending_evict;
3588
3589 /*
3590 * If the dnode moves here, we cannot cross this
3591 * barrier until the move completes.
3592 */
3593 DB_DNODE_ENTER(db);
3594
3595 dn = DB_DNODE(db);
3596 atomic_dec_32(&dn->dn_dbufs_count);
3597
3598 /*
3599 * Decrementing the dbuf count means that the bonus
3600 * buffer's dnode hold is no longer discounted in
3601 * dnode_move(). The dnode cannot move until after
3602 * the dnode_rele() below.
3603 */
3604 DB_DNODE_EXIT(db);
3605
3606 /*
3607 * Do not reference db after its lock is dropped.
3608 * Another thread may evict it.
3609 */
3610 mutex_exit(&db->db_mtx);
3611
3612 if (evict_dbuf)
3613 dnode_evict_bonus(dn);
3614
3615 dnode_rele(dn, db);
3616 } else if (db->db_buf == NULL) {
3617 /*
3618 * This is a special case: we never associated this
3619 * dbuf with any data allocated from the ARC.
3620 */
3621 ASSERT(db->db_state == DB_UNCACHED ||
3622 db->db_state == DB_NOFILL);
3623 dbuf_destroy(db);
3624 } else if (arc_released(db->db_buf)) {
3625 /*
3626 * This dbuf has anonymous data associated with it.
3627 */
3628 dbuf_destroy(db);
3629 } else {
3630 boolean_t do_arc_evict = B_FALSE;
3631 blkptr_t bp;
3632 spa_t *spa = dmu_objset_spa(db->db_objset);
3633
3634 if (!DBUF_IS_CACHEABLE(db) &&
3635 db->db_blkptr != NULL &&
3636 !BP_IS_HOLE(db->db_blkptr) &&
3637 !BP_IS_EMBEDDED(db->db_blkptr)) {
3638 do_arc_evict = B_TRUE;
3639 bp = *db->db_blkptr;
3640 }
3641
3642 if (!DBUF_IS_CACHEABLE(db) ||
3643 db->db_pending_evict) {
3644 dbuf_destroy(db);
3645 } else if (!multilist_link_active(&db->db_cache_link)) {
3646 ASSERT3U(db->db_caching_status, ==,
3647 DB_NO_CACHE);
3648
3649 dbuf_cached_state_t dcs =
3650 dbuf_include_in_metadata_cache(db) ?
3651 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
3652 db->db_caching_status = dcs;
3653
3654 multilist_insert(dbuf_caches[dcs].cache, db);
3655 (void) zfs_refcount_add_many(
3656 &dbuf_caches[dcs].size,
3657 db->db.db_size, db);
3658
3659 if (dcs == DB_DBUF_METADATA_CACHE) {
3660 DBUF_STAT_BUMP(metadata_cache_count);
3661 DBUF_STAT_MAX(
3662 metadata_cache_size_bytes_max,
3663 zfs_refcount_count(
3664 &dbuf_caches[dcs].size));
3665 } else {
3666 DBUF_STAT_BUMP(
3667 cache_levels[db->db_level]);
3668 DBUF_STAT_BUMP(cache_count);
3669 DBUF_STAT_INCR(
3670 cache_levels_bytes[db->db_level],
3671 db->db.db_size);
3672 DBUF_STAT_MAX(cache_size_bytes_max,
3673 zfs_refcount_count(
3674 &dbuf_caches[dcs].size));
3675 }
3676 mutex_exit(&db->db_mtx);
3677
3678 if (db->db_caching_status == DB_DBUF_CACHE &&
3679 !evicting) {
3680 dbuf_evict_notify();
3681 }
3682 }
3683
3684 if (do_arc_evict)
3685 arc_freed(spa, &bp);
3686 }
3687 } else {
3688 mutex_exit(&db->db_mtx);
3689 }
3690
3691 }
3692
3693 #pragma weak dmu_buf_refcount = dbuf_refcount
3694 uint64_t
3695 dbuf_refcount(dmu_buf_impl_t *db)
3696 {
3697 return (zfs_refcount_count(&db->db_holds));
3698 }
3699
3700 uint64_t
3701 dmu_buf_user_refcount(dmu_buf_t *db_fake)
3702 {
3703 uint64_t holds;
3704 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3705
3706 mutex_enter(&db->db_mtx);
3707 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
3708 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
3709 mutex_exit(&db->db_mtx);
3710
3711 return (holds);
3712 }
3713
3714 void *
3715 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
3716 dmu_buf_user_t *new_user)
3717 {
3718 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3719
3720 mutex_enter(&db->db_mtx);
3721 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3722 if (db->db_user == old_user)
3723 db->db_user = new_user;
3724 else
3725 old_user = db->db_user;
3726 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3727 mutex_exit(&db->db_mtx);
3728
3729 return (old_user);
3730 }
3731
3732 void *
3733 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3734 {
3735 return (dmu_buf_replace_user(db_fake, NULL, user));
3736 }
3737
3738 void *
3739 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3740 {
3741 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3742
3743 db->db_user_immediate_evict = TRUE;
3744 return (dmu_buf_set_user(db_fake, user));
3745 }
3746
3747 void *
3748 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3749 {
3750 return (dmu_buf_replace_user(db_fake, user, NULL));
3751 }
3752
3753 void *
3754 dmu_buf_get_user(dmu_buf_t *db_fake)
3755 {
3756 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3757
3758 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3759 return (db->db_user);
3760 }
3761
3762 void
3763 dmu_buf_user_evict_wait()
3764 {
3765 taskq_wait(dbu_evict_taskq);
3766 }
3767
3768 blkptr_t *
3769 dmu_buf_get_blkptr(dmu_buf_t *db)
3770 {
3771 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3772 return (dbi->db_blkptr);
3773 }
3774
3775 objset_t *
3776 dmu_buf_get_objset(dmu_buf_t *db)
3777 {
3778 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3779 return (dbi->db_objset);
3780 }
3781
3782 dnode_t *
3783 dmu_buf_dnode_enter(dmu_buf_t *db)
3784 {
3785 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3786 DB_DNODE_ENTER(dbi);
3787 return (DB_DNODE(dbi));
3788 }
3789
3790 void
3791 dmu_buf_dnode_exit(dmu_buf_t *db)
3792 {
3793 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3794 DB_DNODE_EXIT(dbi);
3795 }
3796
3797 static void
3798 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
3799 {
3800 /* ASSERT(dmu_tx_is_syncing(tx) */
3801 ASSERT(MUTEX_HELD(&db->db_mtx));
3802
3803 if (db->db_blkptr != NULL)
3804 return;
3805
3806 if (db->db_blkid == DMU_SPILL_BLKID) {
3807 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
3808 BP_ZERO(db->db_blkptr);
3809 return;
3810 }
3811 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
3812 /*
3813 * This buffer was allocated at a time when there was
3814 * no available blkptrs from the dnode, or it was
3815 * inappropriate to hook it in (i.e., nlevels mismatch).
3816 */
3817 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
3818 ASSERT(db->db_parent == NULL);
3819 db->db_parent = dn->dn_dbuf;
3820 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
3821 DBUF_VERIFY(db);
3822 } else {
3823 dmu_buf_impl_t *parent = db->db_parent;
3824 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3825
3826 ASSERT(dn->dn_phys->dn_nlevels > 1);
3827 if (parent == NULL) {
3828 mutex_exit(&db->db_mtx);
3829 rw_enter(&dn->dn_struct_rwlock, RW_READER);
3830 parent = dbuf_hold_level(dn, db->db_level + 1,
3831 db->db_blkid >> epbs, db);
3832 rw_exit(&dn->dn_struct_rwlock);
3833 mutex_enter(&db->db_mtx);
3834 db->db_parent = parent;
3835 }
3836 db->db_blkptr = (blkptr_t *)parent->db.db_data +
3837 (db->db_blkid & ((1ULL << epbs) - 1));
3838 DBUF_VERIFY(db);
3839 }
3840 }
3841
3842 /*
3843 * When syncing out a blocks of dnodes, adjust the block to deal with
3844 * encryption. Normally, we make sure the block is decrypted before writing
3845 * it. If we have crypt params, then we are writing a raw (encrypted) block,
3846 * from a raw receive. In this case, set the ARC buf's crypt params so
3847 * that the BP will be filled with the correct byteorder, salt, iv, and mac.
3848 */
3849 static void
3850 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
3851 {
3852 int err;
3853 dmu_buf_impl_t *db = dr->dr_dbuf;
3854
3855 ASSERT(MUTEX_HELD(&db->db_mtx));
3856 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
3857 ASSERT3U(db->db_level, ==, 0);
3858
3859 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
3860 zbookmark_phys_t zb;
3861
3862 /*
3863 * Unfortunately, there is currently no mechanism for
3864 * syncing context to handle decryption errors. An error
3865 * here is only possible if an attacker maliciously
3866 * changed a dnode block and updated the associated
3867 * checksums going up the block tree.
3868 */
3869 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
3870 db->db.db_object, db->db_level, db->db_blkid);
3871 err = arc_untransform(db->db_buf, db->db_objset->os_spa,
3872 &zb, B_TRUE);
3873 if (err)
3874 panic("Invalid dnode block MAC");
3875 } else if (dr->dt.dl.dr_has_raw_params) {
3876 (void) arc_release(dr->dt.dl.dr_data, db);
3877 arc_convert_to_raw(dr->dt.dl.dr_data,
3878 dmu_objset_id(db->db_objset),
3879 dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
3880 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
3881 }
3882 }
3883
3884 /*
3885 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
3886 * is critical the we not allow the compiler to inline this function in to
3887 * dbuf_sync_list() thereby drastically bloating the stack usage.
3888 */
3889 noinline static void
3890 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
3891 {
3892 dmu_buf_impl_t *db = dr->dr_dbuf;
3893 dnode_t *dn;
3894 zio_t *zio;
3895
3896 ASSERT(dmu_tx_is_syncing(tx));
3897
3898 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
3899
3900 mutex_enter(&db->db_mtx);
3901
3902 ASSERT(db->db_level > 0);
3903 DBUF_VERIFY(db);
3904
3905 /* Read the block if it hasn't been read yet. */
3906 if (db->db_buf == NULL) {
3907 mutex_exit(&db->db_mtx);
3908 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
3909 mutex_enter(&db->db_mtx);
3910 }
3911 ASSERT3U(db->db_state, ==, DB_CACHED);
3912 ASSERT(db->db_buf != NULL);
3913
3914 DB_DNODE_ENTER(db);
3915 dn = DB_DNODE(db);
3916 /* Indirect block size must match what the dnode thinks it is. */
3917 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
3918 dbuf_check_blkptr(dn, db);
3919 DB_DNODE_EXIT(db);
3920
3921 /* Provide the pending dirty record to child dbufs */
3922 db->db_data_pending = dr;
3923
3924 mutex_exit(&db->db_mtx);
3925
3926 dbuf_write(dr, db->db_buf, tx);
3927
3928 zio = dr->dr_zio;
3929 mutex_enter(&dr->dt.di.dr_mtx);
3930 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
3931 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
3932 mutex_exit(&dr->dt.di.dr_mtx);
3933 zio_nowait(zio);
3934 }
3935
3936 #ifdef ZFS_DEBUG
3937 /*
3938 * Verify that the size of the data in our bonus buffer does not exceed
3939 * its recorded size.
3940 *
3941 * The purpose of this verification is to catch any cases in development
3942 * where the size of a phys structure (i.e space_map_phys_t) grows and,
3943 * due to incorrect feature management, older pools expect to read more
3944 * data even though they didn't actually write it to begin with.
3945 *
3946 * For a example, this would catch an error in the feature logic where we
3947 * open an older pool and we expect to write the space map histogram of
3948 * a space map with size SPACE_MAP_SIZE_V0.
3949 */
3950 static void
3951 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
3952 {
3953 dnode_t *dn = DB_DNODE(dr->dr_dbuf);
3954
3955 /*
3956 * Encrypted bonus buffers can have data past their bonuslen.
3957 * Skip the verification of these blocks.
3958 */
3959 if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
3960 return;
3961
3962 uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
3963 uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
3964 ASSERT3U(bonuslen, <=, maxbonuslen);
3965
3966 arc_buf_t *datap = dr->dt.dl.dr_data;
3967 char *datap_end = ((char *)datap) + bonuslen;
3968 char *datap_max = ((char *)datap) + maxbonuslen;
3969
3970 /* ensure that everything is zero after our data */
3971 for (; datap_end < datap_max; datap_end++)
3972 ASSERT(*datap_end == 0);
3973 }
3974 #endif
3975
3976 /*
3977 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
3978 * critical the we not allow the compiler to inline this function in to
3979 * dbuf_sync_list() thereby drastically bloating the stack usage.
3980 */
3981 noinline static void
3982 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
3983 {
3984 arc_buf_t **datap = &dr->dt.dl.dr_data;
3985 dmu_buf_impl_t *db = dr->dr_dbuf;
3986 dnode_t *dn;
3987 objset_t *os;
3988 uint64_t txg = tx->tx_txg;
3989
3990 ASSERT(dmu_tx_is_syncing(tx));
3991
3992 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
3993
3994 mutex_enter(&db->db_mtx);
3995 /*
3996 * To be synced, we must be dirtied. But we
3997 * might have been freed after the dirty.
3998 */
3999 if (db->db_state == DB_UNCACHED) {
4000 /* This buffer has been freed since it was dirtied */
4001 ASSERT(db->db.db_data == NULL);
4002 } else if (db->db_state == DB_FILL) {
4003 /* This buffer was freed and is now being re-filled */
4004 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4005 } else {
4006 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4007 }
4008 DBUF_VERIFY(db);
4009
4010 DB_DNODE_ENTER(db);
4011 dn = DB_DNODE(db);
4012
4013 if (db->db_blkid == DMU_SPILL_BLKID) {
4014 mutex_enter(&dn->dn_mtx);
4015 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4016 /*
4017 * In the previous transaction group, the bonus buffer
4018 * was entirely used to store the attributes for the
4019 * dnode which overrode the dn_spill field. However,
4020 * when adding more attributes to the file a spill
4021 * block was required to hold the extra attributes.
4022 *
4023 * Make sure to clear the garbage left in the dn_spill
4024 * field from the previous attributes in the bonus
4025 * buffer. Otherwise, after writing out the spill
4026 * block to the new allocated dva, it will free
4027 * the old block pointed to by the invalid dn_spill.
4028 */
4029 db->db_blkptr = NULL;
4030 }
4031 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4032 mutex_exit(&dn->dn_mtx);
4033 }
4034
4035 /*
4036 * If this is a bonus buffer, simply copy the bonus data into the
4037 * dnode. It will be written out when the dnode is synced (and it
4038 * will be synced, since it must have been dirty for dbuf_sync to
4039 * be called).
4040 */
4041 if (db->db_blkid == DMU_BONUS_BLKID) {
4042 dbuf_dirty_record_t **drp;
4043
4044 ASSERT(*datap != NULL);
4045 ASSERT0(db->db_level);
4046 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
4047 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
4048 bcopy(*datap, DN_BONUS(dn->dn_phys),
4049 DN_MAX_BONUS_LEN(dn->dn_phys));
4050 DB_DNODE_EXIT(db);
4051
4052 #ifdef ZFS_DEBUG
4053 dbuf_sync_leaf_verify_bonus_dnode(dr);
4054 #endif
4055
4056 if (*datap != db->db.db_data) {
4057 int slots = DB_DNODE(db)->dn_num_slots;
4058 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
4059 kmem_free(*datap, bonuslen);
4060 arc_space_return(bonuslen, ARC_SPACE_BONUS);
4061 }
4062 db->db_data_pending = NULL;
4063 drp = &db->db_last_dirty;
4064 while (*drp != dr)
4065 drp = &(*drp)->dr_next;
4066 ASSERT(dr->dr_next == NULL);
4067 ASSERT(dr->dr_dbuf == db);
4068 *drp = dr->dr_next;
4069 if (dr->dr_dbuf->db_level != 0) {
4070 mutex_destroy(&dr->dt.di.dr_mtx);
4071 list_destroy(&dr->dt.di.dr_children);
4072 }
4073 kmem_free(dr, sizeof (dbuf_dirty_record_t));
4074 ASSERT(db->db_dirtycnt > 0);
4075 db->db_dirtycnt -= 1;
4076 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
4077 return;
4078 }
4079
4080 os = dn->dn_objset;
4081
4082 /*
4083 * This function may have dropped the db_mtx lock allowing a dmu_sync
4084 * operation to sneak in. As a result, we need to ensure that we
4085 * don't check the dr_override_state until we have returned from
4086 * dbuf_check_blkptr.
4087 */
4088 dbuf_check_blkptr(dn, db);
4089
4090 /*
4091 * If this buffer is in the middle of an immediate write,
4092 * wait for the synchronous IO to complete.
4093 */
4094 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4095 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4096 cv_wait(&db->db_changed, &db->db_mtx);
4097 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
4098 }
4099
4100 /*
4101 * If this is a dnode block, ensure it is appropriately encrypted
4102 * or decrypted, depending on what we are writing to it this txg.
4103 */
4104 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4105 dbuf_prepare_encrypted_dnode_leaf(dr);
4106
4107 if (db->db_state != DB_NOFILL &&
4108 dn->dn_object != DMU_META_DNODE_OBJECT &&
4109 zfs_refcount_count(&db->db_holds) > 1 &&
4110 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
4111 *datap == db->db_buf) {
4112 /*
4113 * If this buffer is currently "in use" (i.e., there
4114 * are active holds and db_data still references it),
4115 * then make a copy before we start the write so that
4116 * any modifications from the open txg will not leak
4117 * into this write.
4118 *
4119 * NOTE: this copy does not need to be made for
4120 * objects only modified in the syncing context (e.g.
4121 * DNONE_DNODE blocks).
4122 */
4123 int psize = arc_buf_size(*datap);
4124 int lsize = arc_buf_lsize(*datap);
4125 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4126 enum zio_compress compress_type = arc_get_compression(*datap);
4127
4128 if (arc_is_encrypted(*datap)) {
4129 boolean_t byteorder;
4130 uint8_t salt[ZIO_DATA_SALT_LEN];
4131 uint8_t iv[ZIO_DATA_IV_LEN];
4132 uint8_t mac[ZIO_DATA_MAC_LEN];
4133
4134 arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4135 *datap = arc_alloc_raw_buf(os->os_spa, db,
4136 dmu_objset_id(os), byteorder, salt, iv, mac,
4137 dn->dn_type, psize, lsize, compress_type);
4138 } else if (compress_type != ZIO_COMPRESS_OFF) {
4139 ASSERT3U(type, ==, ARC_BUFC_DATA);
4140 *datap = arc_alloc_compressed_buf(os->os_spa, db,
4141 psize, lsize, compress_type);
4142 } else {
4143 *datap = arc_alloc_buf(os->os_spa, db, type, psize);
4144 }
4145 bcopy(db->db.db_data, (*datap)->b_data, psize);
4146 }
4147 db->db_data_pending = dr;
4148
4149 mutex_exit(&db->db_mtx);
4150
4151 dbuf_write(dr, *datap, tx);
4152
4153 ASSERT(!list_link_active(&dr->dr_dirty_node));
4154 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4155 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4156 DB_DNODE_EXIT(db);
4157 } else {
4158 /*
4159 * Although zio_nowait() does not "wait for an IO", it does
4160 * initiate the IO. If this is an empty write it seems plausible
4161 * that the IO could actually be completed before the nowait
4162 * returns. We need to DB_DNODE_EXIT() first in case
4163 * zio_nowait() invalidates the dbuf.
4164 */
4165 DB_DNODE_EXIT(db);
4166 zio_nowait(dr->dr_zio);
4167 }
4168 }
4169
4170 void
4171 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4172 {
4173 dbuf_dirty_record_t *dr;
4174
4175 while ((dr = list_head(list))) {
4176 if (dr->dr_zio != NULL) {
4177 /*
4178 * If we find an already initialized zio then we
4179 * are processing the meta-dnode, and we have finished.
4180 * The dbufs for all dnodes are put back on the list
4181 * during processing, so that we can zio_wait()
4182 * these IOs after initiating all child IOs.
4183 */
4184 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4185 DMU_META_DNODE_OBJECT);
4186 break;
4187 }
4188 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4189 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4190 VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4191 }
4192 list_remove(list, dr);
4193 if (dr->dr_dbuf->db_level > 0)
4194 dbuf_sync_indirect(dr, tx);
4195 else
4196 dbuf_sync_leaf(dr, tx);
4197 }
4198 }
4199
4200 /* ARGSUSED */
4201 static void
4202 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4203 {
4204 dmu_buf_impl_t *db = vdb;
4205 dnode_t *dn;
4206 blkptr_t *bp = zio->io_bp;
4207 blkptr_t *bp_orig = &zio->io_bp_orig;
4208 spa_t *spa = zio->io_spa;
4209 int64_t delta;
4210 uint64_t fill = 0;
4211 int i;
4212
4213 ASSERT3P(db->db_blkptr, !=, NULL);
4214 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4215
4216 DB_DNODE_ENTER(db);
4217 dn = DB_DNODE(db);
4218 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4219 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4220 zio->io_prev_space_delta = delta;
4221
4222 if (bp->blk_birth != 0) {
4223 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4224 BP_GET_TYPE(bp) == dn->dn_type) ||
4225 (db->db_blkid == DMU_SPILL_BLKID &&
4226 BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4227 BP_IS_EMBEDDED(bp));
4228 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4229 }
4230
4231 mutex_enter(&db->db_mtx);
4232
4233 #ifdef ZFS_DEBUG
4234 if (db->db_blkid == DMU_SPILL_BLKID) {
4235 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4236 ASSERT(!(BP_IS_HOLE(bp)) &&
4237 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4238 }
4239 #endif
4240
4241 if (db->db_level == 0) {
4242 mutex_enter(&dn->dn_mtx);
4243 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4244 db->db_blkid != DMU_SPILL_BLKID) {
4245 ASSERT0(db->db_objset->os_raw_receive);
4246 dn->dn_phys->dn_maxblkid = db->db_blkid;
4247 }
4248 mutex_exit(&dn->dn_mtx);
4249
4250 if (dn->dn_type == DMU_OT_DNODE) {
4251 i = 0;
4252 while (i < db->db.db_size) {
4253 dnode_phys_t *dnp =
4254 (void *)(((char *)db->db.db_data) + i);
4255
4256 i += DNODE_MIN_SIZE;
4257 if (dnp->dn_type != DMU_OT_NONE) {
4258 fill++;
4259 i += dnp->dn_extra_slots *
4260 DNODE_MIN_SIZE;
4261 }
4262 }
4263 } else {
4264 if (BP_IS_HOLE(bp)) {
4265 fill = 0;
4266 } else {
4267 fill = 1;
4268 }
4269 }
4270 } else {
4271 blkptr_t *ibp = db->db.db_data;
4272 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4273 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4274 if (BP_IS_HOLE(ibp))
4275 continue;
4276 fill += BP_GET_FILL(ibp);
4277 }
4278 }
4279 DB_DNODE_EXIT(db);
4280
4281 if (!BP_IS_EMBEDDED(bp))
4282 BP_SET_FILL(bp, fill);
4283
4284 mutex_exit(&db->db_mtx);
4285
4286 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4287 *db->db_blkptr = *bp;
4288 dmu_buf_unlock_parent(db, dblt, FTAG);
4289 }
4290
4291 /* ARGSUSED */
4292 /*
4293 * This function gets called just prior to running through the compression
4294 * stage of the zio pipeline. If we're an indirect block comprised of only
4295 * holes, then we want this indirect to be compressed away to a hole. In
4296 * order to do that we must zero out any information about the holes that
4297 * this indirect points to prior to before we try to compress it.
4298 */
4299 static void
4300 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4301 {
4302 dmu_buf_impl_t *db = vdb;
4303 dnode_t *dn;
4304 blkptr_t *bp;
4305 unsigned int epbs, i;
4306
4307 ASSERT3U(db->db_level, >, 0);
4308 DB_DNODE_ENTER(db);
4309 dn = DB_DNODE(db);
4310 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4311 ASSERT3U(epbs, <, 31);
4312
4313 /* Determine if all our children are holes */
4314 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4315 if (!BP_IS_HOLE(bp))
4316 break;
4317 }
4318
4319 /*
4320 * If all the children are holes, then zero them all out so that
4321 * we may get compressed away.
4322 */
4323 if (i == 1ULL << epbs) {
4324 /*
4325 * We only found holes. Grab the rwlock to prevent
4326 * anybody from reading the blocks we're about to
4327 * zero out.
4328 */
4329 rw_enter(&db->db_rwlock, RW_WRITER);
4330 bzero(db->db.db_data, db->db.db_size);
4331 rw_exit(&db->db_rwlock);
4332 }
4333 DB_DNODE_EXIT(db);
4334 }
4335
4336 /*
4337 * The SPA will call this callback several times for each zio - once
4338 * for every physical child i/o (zio->io_phys_children times). This
4339 * allows the DMU to monitor the progress of each logical i/o. For example,
4340 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
4341 * block. There may be a long delay before all copies/fragments are completed,
4342 * so this callback allows us to retire dirty space gradually, as the physical
4343 * i/os complete.
4344 */
4345 /* ARGSUSED */
4346 static void
4347 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
4348 {
4349 dmu_buf_impl_t *db = arg;
4350 objset_t *os = db->db_objset;
4351 dsl_pool_t *dp = dmu_objset_pool(os);
4352 dbuf_dirty_record_t *dr;
4353 int delta = 0;
4354
4355 dr = db->db_data_pending;
4356 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4357
4358 /*
4359 * The callback will be called io_phys_children times. Retire one
4360 * portion of our dirty space each time we are called. Any rounding
4361 * error will be cleaned up by dbuf_write_done().
4362 */
4363 delta = dr->dr_accounted / zio->io_phys_children;
4364 dsl_pool_undirty_space(dp, delta, zio->io_txg);
4365 }
4366
4367 /* ARGSUSED */
4368 static void
4369 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4370 {
4371 dmu_buf_impl_t *db = vdb;
4372 blkptr_t *bp_orig = &zio->io_bp_orig;
4373 blkptr_t *bp = db->db_blkptr;
4374 objset_t *os = db->db_objset;
4375 dmu_tx_t *tx = os->os_synctx;
4376 dbuf_dirty_record_t **drp, *dr;
4377
4378 ASSERT0(zio->io_error);
4379 ASSERT(db->db_blkptr == bp);
4380
4381 /*
4382 * For nopwrites and rewrites we ensure that the bp matches our
4383 * original and bypass all the accounting.
4384 */
4385 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4386 ASSERT(BP_EQUAL(bp, bp_orig));
4387 } else {
4388 dsl_dataset_t *ds = os->os_dsl_dataset;
4389 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4390 dsl_dataset_block_born(ds, bp, tx);
4391 }
4392
4393 mutex_enter(&db->db_mtx);
4394
4395 DBUF_VERIFY(db);
4396
4397 drp = &db->db_last_dirty;
4398 while ((dr = *drp) != db->db_data_pending)
4399 drp = &dr->dr_next;
4400 ASSERT(!list_link_active(&dr->dr_dirty_node));
4401 ASSERT(dr->dr_dbuf == db);
4402 ASSERT(dr->dr_next == NULL);
4403 *drp = dr->dr_next;
4404
4405 #ifdef ZFS_DEBUG
4406 if (db->db_blkid == DMU_SPILL_BLKID) {
4407 dnode_t *dn;
4408
4409 DB_DNODE_ENTER(db);
4410 dn = DB_DNODE(db);
4411 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4412 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
4413 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4414 DB_DNODE_EXIT(db);
4415 }
4416 #endif
4417
4418 if (db->db_level == 0) {
4419 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
4420 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
4421 if (db->db_state != DB_NOFILL) {
4422 if (dr->dt.dl.dr_data != db->db_buf)
4423 arc_buf_destroy(dr->dt.dl.dr_data, db);
4424 }
4425 } else {
4426 dnode_t *dn;
4427
4428 DB_DNODE_ENTER(db);
4429 dn = DB_DNODE(db);
4430 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4431 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
4432 if (!BP_IS_HOLE(db->db_blkptr)) {
4433 ASSERTV(int epbs = dn->dn_phys->dn_indblkshift -
4434 SPA_BLKPTRSHIFT);
4435 ASSERT3U(db->db_blkid, <=,
4436 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
4437 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4438 db->db.db_size);
4439 }
4440 DB_DNODE_EXIT(db);
4441 mutex_destroy(&dr->dt.di.dr_mtx);
4442 list_destroy(&dr->dt.di.dr_children);
4443 }
4444
4445 cv_broadcast(&db->db_changed);
4446 ASSERT(db->db_dirtycnt > 0);
4447 db->db_dirtycnt -= 1;
4448 db->db_data_pending = NULL;
4449 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4450
4451 /*
4452 * If we didn't do a physical write in this ZIO and we
4453 * still ended up here, it means that the space of the
4454 * dbuf that we just released (and undirtied) above hasn't
4455 * been marked as undirtied in the pool's accounting.
4456 *
4457 * Thus, we undirty that space in the pool's view of the
4458 * world here. For physical writes this type of update
4459 * happens in dbuf_write_physdone().
4460 *
4461 * If we did a physical write, cleanup any rounding errors
4462 * that came up due to writing multiple copies of a block
4463 * on disk [see dbuf_write_physdone()].
4464 */
4465 if (zio->io_phys_children == 0) {
4466 dsl_pool_undirty_space(dmu_objset_pool(os),
4467 dr->dr_accounted, zio->io_txg);
4468 } else {
4469 dsl_pool_undirty_space(dmu_objset_pool(os),
4470 dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4471 }
4472
4473 kmem_free(dr, sizeof (dbuf_dirty_record_t));
4474 }
4475
4476 static void
4477 dbuf_write_nofill_ready(zio_t *zio)
4478 {
4479 dbuf_write_ready(zio, NULL, zio->io_private);
4480 }
4481
4482 static void
4483 dbuf_write_nofill_done(zio_t *zio)
4484 {
4485 dbuf_write_done(zio, NULL, zio->io_private);
4486 }
4487
4488 static void
4489 dbuf_write_override_ready(zio_t *zio)
4490 {
4491 dbuf_dirty_record_t *dr = zio->io_private;
4492 dmu_buf_impl_t *db = dr->dr_dbuf;
4493
4494 dbuf_write_ready(zio, NULL, db);
4495 }
4496
4497 static void
4498 dbuf_write_override_done(zio_t *zio)
4499 {
4500 dbuf_dirty_record_t *dr = zio->io_private;
4501 dmu_buf_impl_t *db = dr->dr_dbuf;
4502 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4503
4504 mutex_enter(&db->db_mtx);
4505 if (!BP_EQUAL(zio->io_bp, obp)) {
4506 if (!BP_IS_HOLE(obp))
4507 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4508 arc_release(dr->dt.dl.dr_data, db);
4509 }
4510 mutex_exit(&db->db_mtx);
4511
4512 dbuf_write_done(zio, NULL, db);
4513
4514 if (zio->io_abd != NULL)
4515 abd_put(zio->io_abd);
4516 }
4517
4518 typedef struct dbuf_remap_impl_callback_arg {
4519 objset_t *drica_os;
4520 uint64_t drica_blk_birth;
4521 dmu_tx_t *drica_tx;
4522 } dbuf_remap_impl_callback_arg_t;
4523
4524 static void
4525 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4526 void *arg)
4527 {
4528 dbuf_remap_impl_callback_arg_t *drica = arg;
4529 objset_t *os = drica->drica_os;
4530 spa_t *spa = dmu_objset_spa(os);
4531 dmu_tx_t *tx = drica->drica_tx;
4532
4533 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4534
4535 if (os == spa_meta_objset(spa)) {
4536 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4537 } else {
4538 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4539 size, drica->drica_blk_birth, tx);
4540 }
4541 }
4542
4543 static void
4544 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4545 {
4546 blkptr_t bp_copy = *bp;
4547 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4548 dbuf_remap_impl_callback_arg_t drica;
4549
4550 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4551
4552 drica.drica_os = dn->dn_objset;
4553 drica.drica_blk_birth = bp->blk_birth;
4554 drica.drica_tx = tx;
4555 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
4556 &drica)) {
4557 /*
4558 * If the blkptr being remapped is tracked by a livelist,
4559 * then we need to make sure the livelist reflects the update.
4560 * First, cancel out the old blkptr by appending a 'FREE'
4561 * entry. Next, add an 'ALLOC' to track the new version. This
4562 * way we avoid trying to free an inaccurate blkptr at delete.
4563 * Note that embedded blkptrs are not tracked in livelists.
4564 */
4565 if (dn->dn_objset != spa_meta_objset(spa)) {
4566 dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
4567 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
4568 bp->blk_birth > ds->ds_dir->dd_origin_txg) {
4569 ASSERT(!BP_IS_EMBEDDED(bp));
4570 ASSERT(dsl_dir_is_clone(ds->ds_dir));
4571 ASSERT(spa_feature_is_enabled(spa,
4572 SPA_FEATURE_LIVELIST));
4573 bplist_append(&ds->ds_dir->dd_pending_frees,
4574 bp);
4575 bplist_append(&ds->ds_dir->dd_pending_allocs,
4576 &bp_copy);
4577 }
4578 }
4579
4580 /*
4581 * The db_rwlock prevents dbuf_read_impl() from
4582 * dereferencing the BP while we are changing it. To
4583 * avoid lock contention, only grab it when we are actually
4584 * changing the BP.
4585 */
4586 if (rw != NULL)
4587 rw_enter(rw, RW_WRITER);
4588 *bp = bp_copy;
4589 if (rw != NULL)
4590 rw_exit(rw);
4591 }
4592 }
4593
4594 /*
4595 * Remap any existing BP's to concrete vdevs, if possible.
4596 */
4597 static void
4598 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
4599 {
4600 spa_t *spa = dmu_objset_spa(db->db_objset);
4601 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4602
4603 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
4604 return;
4605
4606 if (db->db_level > 0) {
4607 blkptr_t *bp = db->db.db_data;
4608 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
4609 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
4610 }
4611 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
4612 dnode_phys_t *dnp = db->db.db_data;
4613 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
4614 DMU_OT_DNODE);
4615 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
4616 i += dnp[i].dn_extra_slots + 1) {
4617 for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
4618 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
4619 &dn->dn_dbuf->db_rwlock);
4620 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
4621 tx);
4622 }
4623 }
4624 }
4625 }
4626
4627
4628 /* Issue I/O to commit a dirty buffer to disk. */
4629 static void
4630 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
4631 {
4632 dmu_buf_impl_t *db = dr->dr_dbuf;
4633 dnode_t *dn;
4634 objset_t *os;
4635 dmu_buf_impl_t *parent = db->db_parent;
4636 uint64_t txg = tx->tx_txg;
4637 zbookmark_phys_t zb;
4638 zio_prop_t zp;
4639 zio_t *zio;
4640 int wp_flag = 0;
4641
4642 ASSERT(dmu_tx_is_syncing(tx));
4643
4644 DB_DNODE_ENTER(db);
4645 dn = DB_DNODE(db);
4646 os = dn->dn_objset;
4647
4648 if (db->db_state != DB_NOFILL) {
4649 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
4650 /*
4651 * Private object buffers are released here rather
4652 * than in dbuf_dirty() since they are only modified
4653 * in the syncing context and we don't want the
4654 * overhead of making multiple copies of the data.
4655 */
4656 if (BP_IS_HOLE(db->db_blkptr)) {
4657 arc_buf_thaw(data);
4658 } else {
4659 dbuf_release_bp(db);
4660 }
4661 dbuf_remap(dn, db, tx);
4662 }
4663 }
4664
4665 if (parent != dn->dn_dbuf) {
4666 /* Our parent is an indirect block. */
4667 /* We have a dirty parent that has been scheduled for write. */
4668 ASSERT(parent && parent->db_data_pending);
4669 /* Our parent's buffer is one level closer to the dnode. */
4670 ASSERT(db->db_level == parent->db_level-1);
4671 /*
4672 * We're about to modify our parent's db_data by modifying
4673 * our block pointer, so the parent must be released.
4674 */
4675 ASSERT(arc_released(parent->db_buf));
4676 zio = parent->db_data_pending->dr_zio;
4677 } else {
4678 /* Our parent is the dnode itself. */
4679 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
4680 db->db_blkid != DMU_SPILL_BLKID) ||
4681 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
4682 if (db->db_blkid != DMU_SPILL_BLKID)
4683 ASSERT3P(db->db_blkptr, ==,
4684 &dn->dn_phys->dn_blkptr[db->db_blkid]);
4685 zio = dn->dn_zio;
4686 }
4687
4688 ASSERT(db->db_level == 0 || data == db->db_buf);
4689 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
4690 ASSERT(zio);
4691
4692 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
4693 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
4694 db->db.db_object, db->db_level, db->db_blkid);
4695
4696 if (db->db_blkid == DMU_SPILL_BLKID)
4697 wp_flag = WP_SPILL;
4698 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
4699
4700 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
4701 DB_DNODE_EXIT(db);
4702
4703 /*
4704 * We copy the blkptr now (rather than when we instantiate the dirty
4705 * record), because its value can change between open context and
4706 * syncing context. We do not need to hold dn_struct_rwlock to read
4707 * db_blkptr because we are in syncing context.
4708 */
4709 dr->dr_bp_copy = *db->db_blkptr;
4710
4711 if (db->db_level == 0 &&
4712 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
4713 /*
4714 * The BP for this block has been provided by open context
4715 * (by dmu_sync() or dmu_buf_write_embedded()).
4716 */
4717 abd_t *contents = (data != NULL) ?
4718 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
4719
4720 dr->dr_zio = zio_write(zio, os->os_spa, txg,
4721 &dr->dr_bp_copy, contents, db->db.db_size, db->db.db_size,
4722 &zp, dbuf_write_override_ready, NULL, NULL,
4723 dbuf_write_override_done,
4724 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
4725 mutex_enter(&db->db_mtx);
4726 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
4727 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
4728 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
4729 mutex_exit(&db->db_mtx);
4730 } else if (db->db_state == DB_NOFILL) {
4731 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
4732 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
4733 dr->dr_zio = zio_write(zio, os->os_spa, txg,
4734 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
4735 dbuf_write_nofill_ready, NULL, NULL,
4736 dbuf_write_nofill_done, db,
4737 ZIO_PRIORITY_ASYNC_WRITE,
4738 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
4739 } else {
4740 ASSERT(arc_released(data));
4741
4742 /*
4743 * For indirect blocks, we want to setup the children
4744 * ready callback so that we can properly handle an indirect
4745 * block that only contains holes.
4746 */
4747 arc_write_done_func_t *children_ready_cb = NULL;
4748 if (db->db_level != 0)
4749 children_ready_cb = dbuf_write_children_ready;
4750
4751 dr->dr_zio = arc_write(zio, os->os_spa, txg,
4752 &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db),
4753 &zp, dbuf_write_ready,
4754 children_ready_cb, dbuf_write_physdone,
4755 dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
4756 ZIO_FLAG_MUSTSUCCEED, &zb);
4757 }
4758 }
4759
4760 EXPORT_SYMBOL(dbuf_find);
4761 EXPORT_SYMBOL(dbuf_is_metadata);
4762 EXPORT_SYMBOL(dbuf_destroy);
4763 EXPORT_SYMBOL(dbuf_loan_arcbuf);
4764 EXPORT_SYMBOL(dbuf_whichblock);
4765 EXPORT_SYMBOL(dbuf_read);
4766 EXPORT_SYMBOL(dbuf_unoverride);
4767 EXPORT_SYMBOL(dbuf_free_range);
4768 EXPORT_SYMBOL(dbuf_new_size);
4769 EXPORT_SYMBOL(dbuf_release_bp);
4770 EXPORT_SYMBOL(dbuf_dirty);
4771 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
4772 EXPORT_SYMBOL(dmu_buf_will_dirty);
4773 EXPORT_SYMBOL(dmu_buf_is_dirty);
4774 EXPORT_SYMBOL(dmu_buf_will_not_fill);
4775 EXPORT_SYMBOL(dmu_buf_will_fill);
4776 EXPORT_SYMBOL(dmu_buf_fill_done);
4777 EXPORT_SYMBOL(dmu_buf_rele);
4778 EXPORT_SYMBOL(dbuf_assign_arcbuf);
4779 EXPORT_SYMBOL(dbuf_prefetch);
4780 EXPORT_SYMBOL(dbuf_hold_impl);
4781 EXPORT_SYMBOL(dbuf_hold);
4782 EXPORT_SYMBOL(dbuf_hold_level);
4783 EXPORT_SYMBOL(dbuf_create_bonus);
4784 EXPORT_SYMBOL(dbuf_spill_set_blksz);
4785 EXPORT_SYMBOL(dbuf_rm_spill);
4786 EXPORT_SYMBOL(dbuf_add_ref);
4787 EXPORT_SYMBOL(dbuf_rele);
4788 EXPORT_SYMBOL(dbuf_rele_and_unlock);
4789 EXPORT_SYMBOL(dbuf_refcount);
4790 EXPORT_SYMBOL(dbuf_sync_list);
4791 EXPORT_SYMBOL(dmu_buf_set_user);
4792 EXPORT_SYMBOL(dmu_buf_set_user_ie);
4793 EXPORT_SYMBOL(dmu_buf_get_user);
4794 EXPORT_SYMBOL(dmu_buf_get_blkptr);
4795
4796 /* BEGIN CSTYLED */
4797 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, ULONG, ZMOD_RW,
4798 "Maximum size in bytes of the dbuf cache.");
4799
4800 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
4801 "Percentage over dbuf_cache_max_bytes when dbufs must be evicted "
4802 "directly.");
4803
4804 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
4805 "Percentage below dbuf_cache_max_bytes when the evict thread stops "
4806 "evicting dbufs.");
4807
4808 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, ULONG, ZMOD_RW,
4809 "Maximum size in bytes of the dbuf metadata cache.");
4810
4811 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, INT, ZMOD_RW,
4812 "Set the size of the dbuf cache to a log2 fraction of arc size.");
4813
4814 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, INT, ZMOD_RW,
4815 "Set the size of the dbuf metadata cache to a log2 fraction of arc "
4816 "size.");
4817 /* END CSTYLED */