]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dbuf.c
Illumos #3104: eliminate empty bpobjs
[mirror_zfs.git] / module / zfs / dbuf.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
ef3c1dea 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
9ae529ec 24 * Copyright (c) 2012 by Delphix. All rights reserved.
34dc7c2f
BB
25 */
26
34dc7c2f 27#include <sys/zfs_context.h>
c28b2279 28#include <sys/arc.h>
34dc7c2f
BB
29#include <sys/dmu.h>
30#include <sys/dmu_impl.h>
31#include <sys/dbuf.h>
32#include <sys/dmu_objset.h>
33#include <sys/dsl_dataset.h>
34#include <sys/dsl_dir.h>
35#include <sys/dmu_tx.h>
36#include <sys/spa.h>
37#include <sys/zio.h>
38#include <sys/dmu_zfetch.h>
428870ff
BB
39#include <sys/sa.h>
40#include <sys/sa_impl.h>
34dc7c2f 41
fc5bb51f
BB
42struct dbuf_hold_impl_data {
43 /* Function arguments */
44 dnode_t *dh_dn;
45 uint8_t dh_level;
46 uint64_t dh_blkid;
47 int dh_fail_sparse;
48 void *dh_tag;
49 dmu_buf_impl_t **dh_dbp;
50 /* Local variables */
51 dmu_buf_impl_t *dh_db;
52 dmu_buf_impl_t *dh_parent;
53 blkptr_t *dh_bp;
54 int dh_err;
55 dbuf_dirty_record_t *dh_dr;
56 arc_buf_contents_t dh_type;
57 int dh_depth;
58};
59
60static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
61 dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
62 void *tag, dmu_buf_impl_t **dbp, int depth);
63static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh);
64
34dc7c2f
BB
65static void dbuf_destroy(dmu_buf_impl_t *db);
66static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
b128c09f 67static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
34dc7c2f
BB
68
69/*
70 * Global data structures and functions for the dbuf cache.
71 */
72static kmem_cache_t *dbuf_cache;
73
74/* ARGSUSED */
75static int
76dbuf_cons(void *vdb, void *unused, int kmflag)
77{
78 dmu_buf_impl_t *db = vdb;
79 bzero(db, sizeof (dmu_buf_impl_t));
80
81 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
82 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
83 refcount_create(&db->db_holds);
98f72a53 84 list_link_init(&db->db_link);
34dc7c2f
BB
85 return (0);
86}
87
88/* ARGSUSED */
89static void
90dbuf_dest(void *vdb, void *unused)
91{
92 dmu_buf_impl_t *db = vdb;
93 mutex_destroy(&db->db_mtx);
94 cv_destroy(&db->db_changed);
95 refcount_destroy(&db->db_holds);
96}
97
98/*
99 * dbuf hash table routines
100 */
101static dbuf_hash_table_t dbuf_hash_table;
102
103static uint64_t dbuf_hash_count;
104
105static uint64_t
106dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
107{
108 uintptr_t osv = (uintptr_t)os;
109 uint64_t crc = -1ULL;
110
111 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
112 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
113 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
114 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
115 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
116 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
117 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
118
119 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
120
121 return (crc);
122}
123
124#define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
125
126#define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
127 ((dbuf)->db.db_object == (obj) && \
128 (dbuf)->db_objset == (os) && \
129 (dbuf)->db_level == (level) && \
130 (dbuf)->db_blkid == (blkid))
131
132dmu_buf_impl_t *
133dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
134{
135 dbuf_hash_table_t *h = &dbuf_hash_table;
428870ff 136 objset_t *os = dn->dn_objset;
d6320ddb
BB
137 uint64_t obj;
138 uint64_t hv;
139 uint64_t idx;
34dc7c2f
BB
140 dmu_buf_impl_t *db;
141
d6320ddb
BB
142 obj = dn->dn_object;
143 hv = DBUF_HASH(os, obj, level, blkid);
144 idx = hv & h->hash_table_mask;
145
34dc7c2f
BB
146 mutex_enter(DBUF_HASH_MUTEX(h, idx));
147 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
148 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
149 mutex_enter(&db->db_mtx);
150 if (db->db_state != DB_EVICTING) {
151 mutex_exit(DBUF_HASH_MUTEX(h, idx));
152 return (db);
153 }
154 mutex_exit(&db->db_mtx);
155 }
156 }
157 mutex_exit(DBUF_HASH_MUTEX(h, idx));
158 return (NULL);
159}
160
161/*
162 * Insert an entry into the hash table. If there is already an element
163 * equal to elem in the hash table, then the already existing element
164 * will be returned and the new element will not be inserted.
165 * Otherwise returns NULL.
166 */
167static dmu_buf_impl_t *
168dbuf_hash_insert(dmu_buf_impl_t *db)
169{
170 dbuf_hash_table_t *h = &dbuf_hash_table;
428870ff 171 objset_t *os = db->db_objset;
34dc7c2f
BB
172 uint64_t obj = db->db.db_object;
173 int level = db->db_level;
d6320ddb 174 uint64_t blkid, hv, idx;
34dc7c2f
BB
175 dmu_buf_impl_t *dbf;
176
d6320ddb
BB
177 blkid = db->db_blkid;
178 hv = DBUF_HASH(os, obj, level, blkid);
179 idx = hv & h->hash_table_mask;
180
34dc7c2f
BB
181 mutex_enter(DBUF_HASH_MUTEX(h, idx));
182 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
183 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
184 mutex_enter(&dbf->db_mtx);
185 if (dbf->db_state != DB_EVICTING) {
186 mutex_exit(DBUF_HASH_MUTEX(h, idx));
187 return (dbf);
188 }
189 mutex_exit(&dbf->db_mtx);
190 }
191 }
192
193 mutex_enter(&db->db_mtx);
194 db->db_hash_next = h->hash_table[idx];
195 h->hash_table[idx] = db;
196 mutex_exit(DBUF_HASH_MUTEX(h, idx));
197 atomic_add_64(&dbuf_hash_count, 1);
198
199 return (NULL);
200}
201
202/*
203 * Remove an entry from the hash table. This operation will
204 * fail if there are any existing holds on the db.
205 */
206static void
207dbuf_hash_remove(dmu_buf_impl_t *db)
208{
209 dbuf_hash_table_t *h = &dbuf_hash_table;
d6320ddb 210 uint64_t hv, idx;
34dc7c2f
BB
211 dmu_buf_impl_t *dbf, **dbp;
212
d6320ddb
BB
213 hv = DBUF_HASH(db->db_objset, db->db.db_object,
214 db->db_level, db->db_blkid);
215 idx = hv & h->hash_table_mask;
216
34dc7c2f
BB
217 /*
218 * We musn't hold db_mtx to maintin lock ordering:
219 * DBUF_HASH_MUTEX > db_mtx.
220 */
221 ASSERT(refcount_is_zero(&db->db_holds));
222 ASSERT(db->db_state == DB_EVICTING);
223 ASSERT(!MUTEX_HELD(&db->db_mtx));
224
225 mutex_enter(DBUF_HASH_MUTEX(h, idx));
226 dbp = &h->hash_table[idx];
227 while ((dbf = *dbp) != db) {
228 dbp = &dbf->db_hash_next;
229 ASSERT(dbf != NULL);
230 }
231 *dbp = db->db_hash_next;
232 db->db_hash_next = NULL;
233 mutex_exit(DBUF_HASH_MUTEX(h, idx));
234 atomic_add_64(&dbuf_hash_count, -1);
235}
236
237static arc_evict_func_t dbuf_do_evict;
238
239static void
240dbuf_evict_user(dmu_buf_impl_t *db)
241{
242 ASSERT(MUTEX_HELD(&db->db_mtx));
243
244 if (db->db_level != 0 || db->db_evict_func == NULL)
245 return;
246
247 if (db->db_user_data_ptr_ptr)
248 *db->db_user_data_ptr_ptr = db->db.db_data;
249 db->db_evict_func(&db->db, db->db_user_ptr);
250 db->db_user_ptr = NULL;
251 db->db_user_data_ptr_ptr = NULL;
252 db->db_evict_func = NULL;
253}
254
572e2857
BB
255boolean_t
256dbuf_is_metadata(dmu_buf_impl_t *db)
257{
258 if (db->db_level > 0) {
259 return (B_TRUE);
260 } else {
261 boolean_t is_metadata;
262
263 DB_DNODE_ENTER(db);
9ae529ec 264 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
572e2857
BB
265 DB_DNODE_EXIT(db);
266
267 return (is_metadata);
268 }
269}
270
34dc7c2f
BB
271void
272dbuf_evict(dmu_buf_impl_t *db)
273{
274 ASSERT(MUTEX_HELD(&db->db_mtx));
275 ASSERT(db->db_buf == NULL);
276 ASSERT(db->db_data_pending == NULL);
277
278 dbuf_clear(db);
279 dbuf_destroy(db);
280}
281
282void
283dbuf_init(void)
284{
285 uint64_t hsize = 1ULL << 16;
286 dbuf_hash_table_t *h = &dbuf_hash_table;
287 int i;
288
289 /*
290 * The hash table is big enough to fill all of physical memory
291 * with an average 4K block size. The table will take up
292 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
293 */
294 while (hsize * 4096 < physmem * PAGESIZE)
295 hsize <<= 1;
296
297retry:
298 h->hash_table_mask = hsize - 1;
00b46022
BB
299#if defined(_KERNEL) && defined(HAVE_SPL)
300 /* Large allocations which do not require contiguous pages
301 * should be using vmem_alloc() in the linux kernel */
b8d06fca 302 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_PUSHPAGE);
00b46022 303#else
34dc7c2f 304 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
00b46022 305#endif
34dc7c2f
BB
306 if (h->hash_table == NULL) {
307 /* XXX - we should really return an error instead of assert */
308 ASSERT(hsize > (1ULL << 10));
309 hsize >>= 1;
310 goto retry;
311 }
312
313 dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
314 sizeof (dmu_buf_impl_t),
315 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
316
317 for (i = 0; i < DBUF_MUTEXES; i++)
318 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
319}
320
321void
322dbuf_fini(void)
323{
324 dbuf_hash_table_t *h = &dbuf_hash_table;
325 int i;
326
327 for (i = 0; i < DBUF_MUTEXES; i++)
328 mutex_destroy(&h->hash_mutexes[i]);
00b46022
BB
329#if defined(_KERNEL) && defined(HAVE_SPL)
330 /* Large allocations which do not require contiguous pages
331 * should be using vmem_free() in the linux kernel */
332 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
333#else
34dc7c2f 334 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
00b46022 335#endif
34dc7c2f
BB
336 kmem_cache_destroy(dbuf_cache);
337}
338
339/*
340 * Other stuff.
341 */
342
343#ifdef ZFS_DEBUG
344static void
345dbuf_verify(dmu_buf_impl_t *db)
346{
572e2857 347 dnode_t *dn;
428870ff 348 dbuf_dirty_record_t *dr;
34dc7c2f
BB
349
350 ASSERT(MUTEX_HELD(&db->db_mtx));
351
352 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
353 return;
354
355 ASSERT(db->db_objset != NULL);
572e2857
BB
356 DB_DNODE_ENTER(db);
357 dn = DB_DNODE(db);
34dc7c2f
BB
358 if (dn == NULL) {
359 ASSERT(db->db_parent == NULL);
360 ASSERT(db->db_blkptr == NULL);
361 } else {
362 ASSERT3U(db->db.db_object, ==, dn->dn_object);
363 ASSERT3P(db->db_objset, ==, dn->dn_objset);
364 ASSERT3U(db->db_level, <, dn->dn_nlevels);
572e2857
BB
365 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
366 db->db_blkid == DMU_SPILL_BLKID ||
367 !list_is_empty(&dn->dn_dbufs));
34dc7c2f 368 }
428870ff
BB
369 if (db->db_blkid == DMU_BONUS_BLKID) {
370 ASSERT(dn != NULL);
371 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
372 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
373 } else if (db->db_blkid == DMU_SPILL_BLKID) {
34dc7c2f
BB
374 ASSERT(dn != NULL);
375 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
428870ff 376 ASSERT3U(db->db.db_offset, ==, 0);
34dc7c2f
BB
377 } else {
378 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
379 }
380
428870ff
BB
381 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
382 ASSERT(dr->dr_dbuf == db);
383
384 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
385 ASSERT(dr->dr_dbuf == db);
386
b128c09f
BB
387 /*
388 * We can't assert that db_size matches dn_datablksz because it
389 * can be momentarily different when another thread is doing
390 * dnode_set_blksz().
391 */
392 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
428870ff 393 dr = db->db_data_pending;
b128c09f
BB
394 /*
395 * It should only be modified in syncing context, so
396 * make sure we only have one copy of the data.
397 */
398 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
34dc7c2f
BB
399 }
400
401 /* verify db->db_blkptr */
402 if (db->db_blkptr) {
403 if (db->db_parent == dn->dn_dbuf) {
404 /* db is pointed to by the dnode */
405 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
9babb374 406 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
34dc7c2f
BB
407 ASSERT(db->db_parent == NULL);
408 else
409 ASSERT(db->db_parent != NULL);
428870ff
BB
410 if (db->db_blkid != DMU_SPILL_BLKID)
411 ASSERT3P(db->db_blkptr, ==,
412 &dn->dn_phys->dn_blkptr[db->db_blkid]);
34dc7c2f
BB
413 } else {
414 /* db is pointed to by an indirect block */
1fde1e37
BB
415 ASSERTV(int epb = db->db_parent->db.db_size >>
416 SPA_BLKPTRSHIFT);
34dc7c2f
BB
417 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
418 ASSERT3U(db->db_parent->db.db_object, ==,
419 db->db.db_object);
420 /*
421 * dnode_grow_indblksz() can make this fail if we don't
422 * have the struct_rwlock. XXX indblksz no longer
423 * grows. safe to do this now?
424 */
572e2857 425 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
34dc7c2f
BB
426 ASSERT3P(db->db_blkptr, ==,
427 ((blkptr_t *)db->db_parent->db.db_data +
428 db->db_blkid % epb));
429 }
430 }
431 }
432 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
428870ff
BB
433 (db->db_buf == NULL || db->db_buf->b_data) &&
434 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
34dc7c2f
BB
435 db->db_state != DB_FILL && !dn->dn_free_txg) {
436 /*
437 * If the blkptr isn't set but they have nonzero data,
438 * it had better be dirty, otherwise we'll lose that
439 * data when we evict this buffer.
440 */
441 if (db->db_dirtycnt == 0) {
1fde1e37 442 ASSERTV(uint64_t *buf = db->db.db_data);
34dc7c2f
BB
443 int i;
444
445 for (i = 0; i < db->db.db_size >> 3; i++) {
446 ASSERT(buf[i] == 0);
447 }
448 }
449 }
572e2857 450 DB_DNODE_EXIT(db);
34dc7c2f
BB
451}
452#endif
453
454static void
455dbuf_update_data(dmu_buf_impl_t *db)
456{
457 ASSERT(MUTEX_HELD(&db->db_mtx));
458 if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
459 ASSERT(!refcount_is_zero(&db->db_holds));
460 *db->db_user_data_ptr_ptr = db->db.db_data;
461 }
462}
463
464static void
465dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
466{
467 ASSERT(MUTEX_HELD(&db->db_mtx));
468 ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf));
469 db->db_buf = buf;
470 if (buf != NULL) {
471 ASSERT(buf->b_data != NULL);
472 db->db.db_data = buf->b_data;
473 if (!arc_released(buf))
474 arc_set_callback(buf, dbuf_do_evict, db);
475 dbuf_update_data(db);
476 } else {
477 dbuf_evict_user(db);
478 db->db.db_data = NULL;
b128c09f
BB
479 if (db->db_state != DB_NOFILL)
480 db->db_state = DB_UNCACHED;
34dc7c2f
BB
481 }
482}
483
428870ff
BB
484/*
485 * Loan out an arc_buf for read. Return the loaned arc_buf.
486 */
487arc_buf_t *
488dbuf_loan_arcbuf(dmu_buf_impl_t *db)
489{
490 arc_buf_t *abuf;
491
492 mutex_enter(&db->db_mtx);
493 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
494 int blksz = db->db.db_size;
572e2857
BB
495 spa_t *spa;
496
428870ff 497 mutex_exit(&db->db_mtx);
572e2857
BB
498 DB_GET_SPA(&spa, db);
499 abuf = arc_loan_buf(spa, blksz);
428870ff
BB
500 bcopy(db->db.db_data, abuf->b_data, blksz);
501 } else {
502 abuf = db->db_buf;
503 arc_loan_inuse_buf(abuf, db);
504 dbuf_set_data(db, NULL);
505 mutex_exit(&db->db_mtx);
506 }
507 return (abuf);
508}
509
34dc7c2f
BB
510uint64_t
511dbuf_whichblock(dnode_t *dn, uint64_t offset)
512{
513 if (dn->dn_datablkshift) {
514 return (offset >> dn->dn_datablkshift);
515 } else {
516 ASSERT3U(offset, <, dn->dn_datablksz);
517 return (0);
518 }
519}
520
521static void
522dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
523{
524 dmu_buf_impl_t *db = vdb;
525
526 mutex_enter(&db->db_mtx);
527 ASSERT3U(db->db_state, ==, DB_READ);
528 /*
529 * All reads are synchronous, so we must have a hold on the dbuf
530 */
531 ASSERT(refcount_count(&db->db_holds) > 0);
532 ASSERT(db->db_buf == NULL);
533 ASSERT(db->db.db_data == NULL);
534 if (db->db_level == 0 && db->db_freed_in_flight) {
535 /* we were freed in flight; disregard any error */
536 arc_release(buf, db);
537 bzero(buf->b_data, db->db.db_size);
538 arc_buf_freeze(buf);
539 db->db_freed_in_flight = FALSE;
540 dbuf_set_data(db, buf);
541 db->db_state = DB_CACHED;
542 } else if (zio == NULL || zio->io_error == 0) {
543 dbuf_set_data(db, buf);
544 db->db_state = DB_CACHED;
545 } else {
428870ff 546 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
547 ASSERT3P(db->db_buf, ==, NULL);
548 VERIFY(arc_buf_remove_ref(buf, db) == 1);
549 db->db_state = DB_UNCACHED;
550 }
551 cv_broadcast(&db->db_changed);
428870ff 552 dbuf_rele_and_unlock(db, NULL);
34dc7c2f
BB
553}
554
555static void
556dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
557{
572e2857
BB
558 dnode_t *dn;
559 spa_t *spa;
34dc7c2f
BB
560 zbookmark_t zb;
561 uint32_t aflags = ARC_NOWAIT;
b128c09f 562 arc_buf_t *pbuf;
34dc7c2f 563
572e2857
BB
564 DB_DNODE_ENTER(db);
565 dn = DB_DNODE(db);
34dc7c2f
BB
566 ASSERT(!refcount_is_zero(&db->db_holds));
567 /* We need the struct_rwlock to prevent db_blkptr from changing. */
b128c09f 568 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
34dc7c2f
BB
569 ASSERT(MUTEX_HELD(&db->db_mtx));
570 ASSERT(db->db_state == DB_UNCACHED);
571 ASSERT(db->db_buf == NULL);
572
428870ff 573 if (db->db_blkid == DMU_BONUS_BLKID) {
9babb374 574 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
34dc7c2f
BB
575
576 ASSERT3U(bonuslen, <=, db->db.db_size);
577 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
d164b209 578 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
34dc7c2f
BB
579 if (bonuslen < DN_MAX_BONUSLEN)
580 bzero(db->db.db_data, DN_MAX_BONUSLEN);
9babb374
BB
581 if (bonuslen)
582 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
572e2857 583 DB_DNODE_EXIT(db);
34dc7c2f
BB
584 dbuf_update_data(db);
585 db->db_state = DB_CACHED;
586 mutex_exit(&db->db_mtx);
587 return;
588 }
589
b128c09f
BB
590 /*
591 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
592 * processes the delete record and clears the bp while we are waiting
593 * for the dn_mtx (resulting in a "no" from block_freed).
594 */
595 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
596 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
597 BP_IS_HOLE(db->db_blkptr)))) {
34dc7c2f
BB
598 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
599
b128c09f 600 dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa,
34dc7c2f 601 db->db.db_size, db, type));
572e2857 602 DB_DNODE_EXIT(db);
34dc7c2f
BB
603 bzero(db->db.db_data, db->db.db_size);
604 db->db_state = DB_CACHED;
605 *flags |= DB_RF_CACHED;
606 mutex_exit(&db->db_mtx);
607 return;
608 }
609
572e2857
BB
610 spa = dn->dn_objset->os_spa;
611 DB_DNODE_EXIT(db);
612
34dc7c2f
BB
613 db->db_state = DB_READ;
614 mutex_exit(&db->db_mtx);
615
b128c09f
BB
616 if (DBUF_IS_L2CACHEABLE(db))
617 aflags |= ARC_L2CACHE;
618
428870ff
BB
619 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
620 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
621 db->db.db_object, db->db_level, db->db_blkid);
34dc7c2f
BB
622
623 dbuf_add_ref(db, NULL);
624 /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */
b128c09f
BB
625
626 if (db->db_parent)
627 pbuf = db->db_parent->db_buf;
628 else
629 pbuf = db->db_objset->os_phys_buf;
630
572e2857 631 (void) dsl_read(zio, spa, db->db_blkptr, pbuf,
34dc7c2f
BB
632 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
633 (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
634 &aflags, &zb);
635 if (aflags & ARC_CACHED)
636 *flags |= DB_RF_CACHED;
637}
638
639int
640dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
641{
642 int err = 0;
643 int havepzio = (zio != NULL);
644 int prefetch;
572e2857 645 dnode_t *dn;
34dc7c2f
BB
646
647 /*
648 * We don't have to hold the mutex to check db_state because it
649 * can't be freed while we have a hold on the buffer.
650 */
651 ASSERT(!refcount_is_zero(&db->db_holds));
652
b128c09f
BB
653 if (db->db_state == DB_NOFILL)
654 return (EIO);
655
572e2857
BB
656 DB_DNODE_ENTER(db);
657 dn = DB_DNODE(db);
34dc7c2f 658 if ((flags & DB_RF_HAVESTRUCT) == 0)
572e2857 659 rw_enter(&dn->dn_struct_rwlock, RW_READER);
34dc7c2f 660
428870ff 661 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
572e2857 662 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
b128c09f 663 DBUF_IS_CACHEABLE(db);
34dc7c2f
BB
664
665 mutex_enter(&db->db_mtx);
666 if (db->db_state == DB_CACHED) {
667 mutex_exit(&db->db_mtx);
668 if (prefetch)
572e2857 669 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
34dc7c2f
BB
670 db->db.db_size, TRUE);
671 if ((flags & DB_RF_HAVESTRUCT) == 0)
572e2857
BB
672 rw_exit(&dn->dn_struct_rwlock);
673 DB_DNODE_EXIT(db);
34dc7c2f 674 } else if (db->db_state == DB_UNCACHED) {
572e2857
BB
675 spa_t *spa = dn->dn_objset->os_spa;
676
677 if (zio == NULL)
678 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
34dc7c2f
BB
679 dbuf_read_impl(db, zio, &flags);
680
681 /* dbuf_read_impl has dropped db_mtx for us */
682
683 if (prefetch)
572e2857 684 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
34dc7c2f
BB
685 db->db.db_size, flags & DB_RF_CACHED);
686
687 if ((flags & DB_RF_HAVESTRUCT) == 0)
572e2857
BB
688 rw_exit(&dn->dn_struct_rwlock);
689 DB_DNODE_EXIT(db);
34dc7c2f
BB
690
691 if (!havepzio)
692 err = zio_wait(zio);
693 } else {
694 mutex_exit(&db->db_mtx);
695 if (prefetch)
572e2857 696 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
34dc7c2f
BB
697 db->db.db_size, TRUE);
698 if ((flags & DB_RF_HAVESTRUCT) == 0)
572e2857
BB
699 rw_exit(&dn->dn_struct_rwlock);
700 DB_DNODE_EXIT(db);
34dc7c2f
BB
701
702 mutex_enter(&db->db_mtx);
703 if ((flags & DB_RF_NEVERWAIT) == 0) {
704 while (db->db_state == DB_READ ||
705 db->db_state == DB_FILL) {
706 ASSERT(db->db_state == DB_READ ||
707 (flags & DB_RF_HAVESTRUCT) == 0);
708 cv_wait(&db->db_changed, &db->db_mtx);
709 }
710 if (db->db_state == DB_UNCACHED)
711 err = EIO;
712 }
713 mutex_exit(&db->db_mtx);
714 }
715
716 ASSERT(err || havepzio || db->db_state == DB_CACHED);
717 return (err);
718}
719
720static void
721dbuf_noread(dmu_buf_impl_t *db)
722{
723 ASSERT(!refcount_is_zero(&db->db_holds));
428870ff 724 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
725 mutex_enter(&db->db_mtx);
726 while (db->db_state == DB_READ || db->db_state == DB_FILL)
727 cv_wait(&db->db_changed, &db->db_mtx);
728 if (db->db_state == DB_UNCACHED) {
729 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
572e2857 730 spa_t *spa;
34dc7c2f
BB
731
732 ASSERT(db->db_buf == NULL);
733 ASSERT(db->db.db_data == NULL);
572e2857
BB
734 DB_GET_SPA(&spa, db);
735 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
34dc7c2f 736 db->db_state = DB_FILL;
b128c09f
BB
737 } else if (db->db_state == DB_NOFILL) {
738 dbuf_set_data(db, NULL);
34dc7c2f
BB
739 } else {
740 ASSERT3U(db->db_state, ==, DB_CACHED);
741 }
742 mutex_exit(&db->db_mtx);
743}
744
745/*
746 * This is our just-in-time copy function. It makes a copy of
747 * buffers, that have been modified in a previous transaction
748 * group, before we modify them in the current active group.
749 *
750 * This function is used in two places: when we are dirtying a
751 * buffer for the first time in a txg, and when we are freeing
752 * a range in a dnode that includes this buffer.
753 *
754 * Note that when we are called from dbuf_free_range() we do
755 * not put a hold on the buffer, we just traverse the active
756 * dbuf list for the dnode.
757 */
758static void
759dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
760{
761 dbuf_dirty_record_t *dr = db->db_last_dirty;
762
763 ASSERT(MUTEX_HELD(&db->db_mtx));
764 ASSERT(db->db.db_data != NULL);
765 ASSERT(db->db_level == 0);
766 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
767
768 if (dr == NULL ||
769 (dr->dt.dl.dr_data !=
428870ff 770 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
34dc7c2f
BB
771 return;
772
773 /*
774 * If the last dirty record for this dbuf has not yet synced
775 * and its referencing the dbuf data, either:
572e2857 776 * reset the reference to point to a new copy,
34dc7c2f
BB
777 * or (if there a no active holders)
778 * just null out the current db_data pointer.
779 */
780 ASSERT(dr->dr_txg >= txg - 2);
428870ff 781 if (db->db_blkid == DMU_BONUS_BLKID) {
34dc7c2f
BB
782 /* Note that the data bufs here are zio_bufs */
783 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
d164b209 784 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
34dc7c2f
BB
785 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
786 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
787 int size = db->db.db_size;
788 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
572e2857
BB
789 spa_t *spa;
790
791 DB_GET_SPA(&spa, db);
792 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
34dc7c2f
BB
793 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
794 } else {
795 dbuf_set_data(db, NULL);
796 }
797}
798
799void
800dbuf_unoverride(dbuf_dirty_record_t *dr)
801{
802 dmu_buf_impl_t *db = dr->dr_dbuf;
428870ff 803 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
34dc7c2f
BB
804 uint64_t txg = dr->dr_txg;
805
806 ASSERT(MUTEX_HELD(&db->db_mtx));
807 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
808 ASSERT(db->db_level == 0);
809
428870ff 810 if (db->db_blkid == DMU_BONUS_BLKID ||
34dc7c2f
BB
811 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
812 return;
813
428870ff
BB
814 ASSERT(db->db_data_pending != dr);
815
34dc7c2f 816 /* free this block */
572e2857
BB
817 if (!BP_IS_HOLE(bp)) {
818 spa_t *spa;
428870ff 819
572e2857
BB
820 DB_GET_SPA(&spa, db);
821 zio_free(spa, txg, bp);
822 }
34dc7c2f
BB
823 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
824 /*
825 * Release the already-written buffer, so we leave it in
826 * a consistent dirty state. Note that all callers are
827 * modifying the buffer, so they will immediately do
828 * another (redundant) arc_release(). Therefore, leave
829 * the buf thawed to save the effort of freezing &
830 * immediately re-thawing it.
831 */
832 arc_release(dr->dt.dl.dr_data, db);
833}
834
b128c09f
BB
835/*
836 * Evict (if its unreferenced) or clear (if its referenced) any level-0
837 * data blocks in the free range, so that any future readers will find
838 * empty blocks. Also, if we happen accross any level-1 dbufs in the
839 * range that have not already been marked dirty, mark them dirty so
840 * they stay in memory.
841 */
34dc7c2f 842void
b128c09f 843dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
34dc7c2f
BB
844{
845 dmu_buf_impl_t *db, *db_next;
846 uint64_t txg = tx->tx_txg;
b128c09f
BB
847 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
848 uint64_t first_l1 = start >> epbs;
849 uint64_t last_l1 = end >> epbs;
34dc7c2f 850
428870ff 851 if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID)) {
b128c09f
BB
852 end = dn->dn_maxblkid;
853 last_l1 = end >> epbs;
854 }
855 dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
34dc7c2f
BB
856 mutex_enter(&dn->dn_dbufs_mtx);
857 for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
858 db_next = list_next(&dn->dn_dbufs, db);
428870ff 859 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
b128c09f
BB
860
861 if (db->db_level == 1 &&
862 db->db_blkid >= first_l1 && db->db_blkid <= last_l1) {
863 mutex_enter(&db->db_mtx);
864 if (db->db_last_dirty &&
865 db->db_last_dirty->dr_txg < txg) {
866 dbuf_add_ref(db, FTAG);
867 mutex_exit(&db->db_mtx);
868 dbuf_will_dirty(db, tx);
869 dbuf_rele(db, FTAG);
870 } else {
871 mutex_exit(&db->db_mtx);
872 }
873 }
874
34dc7c2f
BB
875 if (db->db_level != 0)
876 continue;
877 dprintf_dbuf(db, "found buf %s\n", "");
b128c09f 878 if (db->db_blkid < start || db->db_blkid > end)
34dc7c2f
BB
879 continue;
880
881 /* found a level 0 buffer in the range */
882 if (dbuf_undirty(db, tx))
883 continue;
884
885 mutex_enter(&db->db_mtx);
886 if (db->db_state == DB_UNCACHED ||
b128c09f 887 db->db_state == DB_NOFILL ||
34dc7c2f
BB
888 db->db_state == DB_EVICTING) {
889 ASSERT(db->db.db_data == NULL);
890 mutex_exit(&db->db_mtx);
891 continue;
892 }
893 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
894 /* will be handled in dbuf_read_done or dbuf_rele */
895 db->db_freed_in_flight = TRUE;
896 mutex_exit(&db->db_mtx);
897 continue;
898 }
899 if (refcount_count(&db->db_holds) == 0) {
900 ASSERT(db->db_buf);
901 dbuf_clear(db);
902 continue;
903 }
904 /* The dbuf is referenced */
905
906 if (db->db_last_dirty != NULL) {
907 dbuf_dirty_record_t *dr = db->db_last_dirty;
908
909 if (dr->dr_txg == txg) {
910 /*
911 * This buffer is "in-use", re-adjust the file
912 * size to reflect that this buffer may
913 * contain new data when we sync.
914 */
428870ff
BB
915 if (db->db_blkid != DMU_SPILL_BLKID &&
916 db->db_blkid > dn->dn_maxblkid)
34dc7c2f
BB
917 dn->dn_maxblkid = db->db_blkid;
918 dbuf_unoverride(dr);
919 } else {
920 /*
921 * This dbuf is not dirty in the open context.
922 * Either uncache it (if its not referenced in
923 * the open context) or reset its contents to
924 * empty.
925 */
926 dbuf_fix_old_data(db, txg);
927 }
928 }
929 /* clear the contents if its cached */
930 if (db->db_state == DB_CACHED) {
931 ASSERT(db->db.db_data != NULL);
932 arc_release(db->db_buf, db);
933 bzero(db->db.db_data, db->db.db_size);
934 arc_buf_freeze(db->db_buf);
935 }
936
937 mutex_exit(&db->db_mtx);
938 }
939 mutex_exit(&dn->dn_dbufs_mtx);
940}
941
942static int
943dbuf_block_freeable(dmu_buf_impl_t *db)
944{
945 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
946 uint64_t birth_txg = 0;
947
948 /*
949 * We don't need any locking to protect db_blkptr:
950 * If it's syncing, then db_last_dirty will be set
951 * so we'll ignore db_blkptr.
952 */
953 ASSERT(MUTEX_HELD(&db->db_mtx));
954 if (db->db_last_dirty)
955 birth_txg = db->db_last_dirty->dr_txg;
956 else if (db->db_blkptr)
957 birth_txg = db->db_blkptr->blk_birth;
958
572e2857
BB
959 /*
960 * If we don't exist or are in a snapshot, we can't be freed.
961 * Don't pass the bp to dsl_dataset_block_freeable() since we
962 * are holding the db_mtx lock and might deadlock if we are
963 * prefetching a dedup-ed block.
964 */
34dc7c2f
BB
965 if (birth_txg)
966 return (ds == NULL ||
572e2857 967 dsl_dataset_block_freeable(ds, NULL, birth_txg));
34dc7c2f
BB
968 else
969 return (FALSE);
970}
971
972void
973dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
974{
975 arc_buf_t *buf, *obuf;
976 int osize = db->db.db_size;
977 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
572e2857 978 dnode_t *dn;
34dc7c2f 979
428870ff 980 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f 981
572e2857
BB
982 DB_DNODE_ENTER(db);
983 dn = DB_DNODE(db);
984
34dc7c2f 985 /* XXX does *this* func really need the lock? */
572e2857 986 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
34dc7c2f
BB
987
988 /*
989 * This call to dbuf_will_dirty() with the dn_struct_rwlock held
990 * is OK, because there can be no other references to the db
991 * when we are changing its size, so no concurrent DB_FILL can
992 * be happening.
993 */
994 /*
995 * XXX we should be doing a dbuf_read, checking the return
996 * value and returning that up to our callers
997 */
998 dbuf_will_dirty(db, tx);
999
1000 /* create the data buffer for the new block */
572e2857 1001 buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
34dc7c2f
BB
1002
1003 /* copy old block data to the new block */
1004 obuf = db->db_buf;
1005 bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
1006 /* zero the remainder */
1007 if (size > osize)
1008 bzero((uint8_t *)buf->b_data + osize, size - osize);
1009
1010 mutex_enter(&db->db_mtx);
1011 dbuf_set_data(db, buf);
1012 VERIFY(arc_buf_remove_ref(obuf, db) == 1);
1013 db->db.db_size = size;
1014
1015 if (db->db_level == 0) {
1016 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1017 db->db_last_dirty->dt.dl.dr_data = buf;
1018 }
1019 mutex_exit(&db->db_mtx);
1020
572e2857
BB
1021 dnode_willuse_space(dn, size-osize, tx);
1022 DB_DNODE_EXIT(db);
34dc7c2f
BB
1023}
1024
428870ff
BB
1025void
1026dbuf_release_bp(dmu_buf_impl_t *db)
1027{
572e2857 1028 objset_t *os;
428870ff
BB
1029 zbookmark_t zb;
1030
572e2857 1031 DB_GET_OBJSET(&os, db);
428870ff
BB
1032 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1033 ASSERT(arc_released(os->os_phys_buf) ||
1034 list_link_active(&os->os_dsl_dataset->ds_synced_link));
1035 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1036
1037 zb.zb_objset = os->os_dsl_dataset ?
1038 os->os_dsl_dataset->ds_object : 0;
1039 zb.zb_object = db->db.db_object;
1040 zb.zb_level = db->db_level;
1041 zb.zb_blkid = db->db_blkid;
1042 (void) arc_release_bp(db->db_buf, db,
1043 db->db_blkptr, os->os_spa, &zb);
1044}
1045
34dc7c2f
BB
1046dbuf_dirty_record_t *
1047dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1048{
572e2857
BB
1049 dnode_t *dn;
1050 objset_t *os;
34dc7c2f
BB
1051 dbuf_dirty_record_t **drp, *dr;
1052 int drop_struct_lock = FALSE;
b128c09f 1053 boolean_t do_free_accounting = B_FALSE;
34dc7c2f
BB
1054 int txgoff = tx->tx_txg & TXG_MASK;
1055
1056 ASSERT(tx->tx_txg != 0);
1057 ASSERT(!refcount_is_zero(&db->db_holds));
1058 DMU_TX_DIRTY_BUF(tx, db);
1059
572e2857
BB
1060 DB_DNODE_ENTER(db);
1061 dn = DB_DNODE(db);
34dc7c2f
BB
1062 /*
1063 * Shouldn't dirty a regular buffer in syncing context. Private
1064 * objects may be dirtied in syncing context, but only if they
1065 * were already pre-dirtied in open context.
34dc7c2f
BB
1066 */
1067 ASSERT(!dmu_tx_is_syncing(tx) ||
1068 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
9babb374
BB
1069 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1070 dn->dn_objset->os_dsl_dataset == NULL);
34dc7c2f
BB
1071 /*
1072 * We make this assert for private objects as well, but after we
1073 * check if we're already dirty. They are allowed to re-dirty
1074 * in syncing context.
1075 */
1076 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1077 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1078 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1079
1080 mutex_enter(&db->db_mtx);
1081 /*
1082 * XXX make this true for indirects too? The problem is that
1083 * transactions created with dmu_tx_create_assigned() from
1084 * syncing context don't bother holding ahead.
1085 */
1086 ASSERT(db->db_level != 0 ||
b128c09f
BB
1087 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1088 db->db_state == DB_NOFILL);
34dc7c2f
BB
1089
1090 mutex_enter(&dn->dn_mtx);
1091 /*
1092 * Don't set dirtyctx to SYNC if we're just modifying this as we
1093 * initialize the objset.
1094 */
1095 if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1096 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1097 dn->dn_dirtyctx =
1098 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1099 ASSERT(dn->dn_dirtyctx_firstset == NULL);
beb98269 1100 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_PUSHPAGE);
34dc7c2f
BB
1101 }
1102 mutex_exit(&dn->dn_mtx);
1103
428870ff
BB
1104 if (db->db_blkid == DMU_SPILL_BLKID)
1105 dn->dn_have_spill = B_TRUE;
1106
34dc7c2f
BB
1107 /*
1108 * If this buffer is already dirty, we're done.
1109 */
1110 drp = &db->db_last_dirty;
1111 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1112 db->db.db_object == DMU_META_DNODE_OBJECT);
1113 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1114 drp = &dr->dr_next;
1115 if (dr && dr->dr_txg == tx->tx_txg) {
572e2857
BB
1116 DB_DNODE_EXIT(db);
1117
428870ff 1118 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
34dc7c2f
BB
1119 /*
1120 * If this buffer has already been written out,
1121 * we now need to reset its state.
1122 */
1123 dbuf_unoverride(dr);
428870ff
BB
1124 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1125 db->db_state != DB_NOFILL)
34dc7c2f
BB
1126 arc_buf_thaw(db->db_buf);
1127 }
1128 mutex_exit(&db->db_mtx);
1129 return (dr);
1130 }
1131
1132 /*
1133 * Only valid if not already dirty.
1134 */
9babb374
BB
1135 ASSERT(dn->dn_object == 0 ||
1136 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
34dc7c2f
BB
1137 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1138
1139 ASSERT3U(dn->dn_nlevels, >, db->db_level);
1140 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1141 dn->dn_phys->dn_nlevels > db->db_level ||
1142 dn->dn_next_nlevels[txgoff] > db->db_level ||
1143 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1144 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1145
1146 /*
1147 * We should only be dirtying in syncing context if it's the
9babb374
BB
1148 * mos or we're initializing the os or it's a special object.
1149 * However, we are allowed to dirty in syncing context provided
1150 * we already dirtied it in open context. Hence we must make
1151 * this assertion only if we're not already dirty.
34dc7c2f 1152 */
572e2857 1153 os = dn->dn_objset;
9babb374
BB
1154 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1155 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
34dc7c2f
BB
1156 ASSERT(db->db.db_size != 0);
1157
1158 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1159
428870ff 1160 if (db->db_blkid != DMU_BONUS_BLKID) {
34dc7c2f
BB
1161 /*
1162 * Update the accounting.
b128c09f
BB
1163 * Note: we delay "free accounting" until after we drop
1164 * the db_mtx. This keeps us from grabbing other locks
428870ff 1165 * (and possibly deadlocking) in bp_get_dsize() while
b128c09f 1166 * also holding the db_mtx.
34dc7c2f 1167 */
34dc7c2f 1168 dnode_willuse_space(dn, db->db.db_size, tx);
b128c09f 1169 do_free_accounting = dbuf_block_freeable(db);
34dc7c2f
BB
1170 }
1171
1172 /*
1173 * If this buffer is dirty in an old transaction group we need
1174 * to make a copy of it so that the changes we make in this
1175 * transaction group won't leak out when we sync the older txg.
1176 */
beb98269 1177 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_PUSHPAGE);
98f72a53 1178 list_link_init(&dr->dr_dirty_node);
34dc7c2f
BB
1179 if (db->db_level == 0) {
1180 void *data_old = db->db_buf;
1181
b128c09f 1182 if (db->db_state != DB_NOFILL) {
428870ff 1183 if (db->db_blkid == DMU_BONUS_BLKID) {
b128c09f
BB
1184 dbuf_fix_old_data(db, tx->tx_txg);
1185 data_old = db->db.db_data;
1186 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1187 /*
1188 * Release the data buffer from the cache so
1189 * that we can modify it without impacting
1190 * possible other users of this cached data
1191 * block. Note that indirect blocks and
1192 * private objects are not released until the
1193 * syncing state (since they are only modified
1194 * then).
1195 */
1196 arc_release(db->db_buf, db);
1197 dbuf_fix_old_data(db, tx->tx_txg);
1198 data_old = db->db_buf;
1199 }
1200 ASSERT(data_old != NULL);
34dc7c2f 1201 }
34dc7c2f
BB
1202 dr->dt.dl.dr_data = data_old;
1203 } else {
1204 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1205 list_create(&dr->dt.di.dr_children,
1206 sizeof (dbuf_dirty_record_t),
1207 offsetof(dbuf_dirty_record_t, dr_dirty_node));
1208 }
1209 dr->dr_dbuf = db;
1210 dr->dr_txg = tx->tx_txg;
1211 dr->dr_next = *drp;
1212 *drp = dr;
1213
1214 /*
1215 * We could have been freed_in_flight between the dbuf_noread
1216 * and dbuf_dirty. We win, as though the dbuf_noread() had
1217 * happened after the free.
1218 */
428870ff
BB
1219 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1220 db->db_blkid != DMU_SPILL_BLKID) {
34dc7c2f
BB
1221 mutex_enter(&dn->dn_mtx);
1222 dnode_clear_range(dn, db->db_blkid, 1, tx);
1223 mutex_exit(&dn->dn_mtx);
1224 db->db_freed_in_flight = FALSE;
1225 }
1226
1227 /*
1228 * This buffer is now part of this txg
1229 */
1230 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1231 db->db_dirtycnt += 1;
1232 ASSERT3U(db->db_dirtycnt, <=, 3);
1233
1234 mutex_exit(&db->db_mtx);
1235
428870ff
BB
1236 if (db->db_blkid == DMU_BONUS_BLKID ||
1237 db->db_blkid == DMU_SPILL_BLKID) {
34dc7c2f
BB
1238 mutex_enter(&dn->dn_mtx);
1239 ASSERT(!list_link_active(&dr->dr_dirty_node));
1240 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1241 mutex_exit(&dn->dn_mtx);
1242 dnode_setdirty(dn, tx);
572e2857 1243 DB_DNODE_EXIT(db);
34dc7c2f 1244 return (dr);
b128c09f
BB
1245 } else if (do_free_accounting) {
1246 blkptr_t *bp = db->db_blkptr;
1247 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
428870ff 1248 bp_get_dsize(os->os_spa, bp) : db->db.db_size;
b128c09f
BB
1249 /*
1250 * This is only a guess -- if the dbuf is dirty
1251 * in a previous txg, we don't know how much
1252 * space it will use on disk yet. We should
1253 * really have the struct_rwlock to access
1254 * db_blkptr, but since this is just a guess,
1255 * it's OK if we get an odd answer.
1256 */
572e2857 1257 ddt_prefetch(os->os_spa, bp);
b128c09f 1258 dnode_willuse_space(dn, -willfree, tx);
34dc7c2f
BB
1259 }
1260
1261 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1262 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1263 drop_struct_lock = TRUE;
1264 }
1265
b128c09f
BB
1266 if (db->db_level == 0) {
1267 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1268 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1269 }
1270
34dc7c2f
BB
1271 if (db->db_level+1 < dn->dn_nlevels) {
1272 dmu_buf_impl_t *parent = db->db_parent;
1273 dbuf_dirty_record_t *di;
1274 int parent_held = FALSE;
1275
1276 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1277 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1278
1279 parent = dbuf_hold_level(dn, db->db_level+1,
1280 db->db_blkid >> epbs, FTAG);
428870ff 1281 ASSERT(parent != NULL);
34dc7c2f
BB
1282 parent_held = TRUE;
1283 }
1284 if (drop_struct_lock)
1285 rw_exit(&dn->dn_struct_rwlock);
1286 ASSERT3U(db->db_level+1, ==, parent->db_level);
1287 di = dbuf_dirty(parent, tx);
1288 if (parent_held)
1289 dbuf_rele(parent, FTAG);
1290
1291 mutex_enter(&db->db_mtx);
1292 /* possible race with dbuf_undirty() */
1293 if (db->db_last_dirty == dr ||
1294 dn->dn_object == DMU_META_DNODE_OBJECT) {
1295 mutex_enter(&di->dt.di.dr_mtx);
1296 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1297 ASSERT(!list_link_active(&dr->dr_dirty_node));
1298 list_insert_tail(&di->dt.di.dr_children, dr);
1299 mutex_exit(&di->dt.di.dr_mtx);
1300 dr->dr_parent = di;
1301 }
1302 mutex_exit(&db->db_mtx);
1303 } else {
1304 ASSERT(db->db_level+1 == dn->dn_nlevels);
1305 ASSERT(db->db_blkid < dn->dn_nblkptr);
572e2857 1306 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
34dc7c2f
BB
1307 mutex_enter(&dn->dn_mtx);
1308 ASSERT(!list_link_active(&dr->dr_dirty_node));
1309 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1310 mutex_exit(&dn->dn_mtx);
1311 if (drop_struct_lock)
1312 rw_exit(&dn->dn_struct_rwlock);
1313 }
1314
1315 dnode_setdirty(dn, tx);
572e2857 1316 DB_DNODE_EXIT(db);
34dc7c2f
BB
1317 return (dr);
1318}
1319
1320static int
1321dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1322{
572e2857 1323 dnode_t *dn;
34dc7c2f
BB
1324 uint64_t txg = tx->tx_txg;
1325 dbuf_dirty_record_t *dr, **drp;
1326
1327 ASSERT(txg != 0);
428870ff 1328 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
1329
1330 mutex_enter(&db->db_mtx);
34dc7c2f
BB
1331 /*
1332 * If this buffer is not dirty, we're done.
1333 */
1334 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1335 if (dr->dr_txg <= txg)
1336 break;
1337 if (dr == NULL || dr->dr_txg < txg) {
1338 mutex_exit(&db->db_mtx);
1339 return (0);
1340 }
1341 ASSERT(dr->dr_txg == txg);
428870ff 1342 ASSERT(dr->dr_dbuf == db);
34dc7c2f 1343
572e2857
BB
1344 DB_DNODE_ENTER(db);
1345 dn = DB_DNODE(db);
1346
34dc7c2f
BB
1347 /*
1348 * If this buffer is currently held, we cannot undirty
1349 * it, since one of the current holders may be in the
1350 * middle of an update. Note that users of dbuf_undirty()
1351 * should not place a hold on the dbuf before the call.
ef3c1dea
GR
1352 * Also note: we can get here with a spill block, so
1353 * test for that similar to how dbuf_dirty does.
34dc7c2f
BB
1354 */
1355 if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
1356 mutex_exit(&db->db_mtx);
1357 /* Make sure we don't toss this buffer at sync phase */
ef3c1dea
GR
1358 if (db->db_blkid != DMU_SPILL_BLKID) {
1359 mutex_enter(&dn->dn_mtx);
1360 dnode_clear_range(dn, db->db_blkid, 1, tx);
1361 mutex_exit(&dn->dn_mtx);
1362 }
572e2857 1363 DB_DNODE_EXIT(db);
34dc7c2f
BB
1364 return (0);
1365 }
1366
1367 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1368
1369 ASSERT(db->db.db_size != 0);
1370
1371 /* XXX would be nice to fix up dn_towrite_space[] */
1372
1373 *drp = dr->dr_next;
1374
ef3c1dea
GR
1375 /*
1376 * Note that there are three places in dbuf_dirty()
1377 * where this dirty record may be put on a list.
1378 * Make sure to do a list_remove corresponding to
1379 * every one of those list_insert calls.
1380 */
34dc7c2f
BB
1381 if (dr->dr_parent) {
1382 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1383 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1384 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
ef3c1dea
GR
1385 } else if (db->db_blkid == DMU_SPILL_BLKID ||
1386 db->db_level+1 == dn->dn_nlevels) {
b128c09f 1387 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
34dc7c2f
BB
1388 mutex_enter(&dn->dn_mtx);
1389 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1390 mutex_exit(&dn->dn_mtx);
1391 }
572e2857 1392 DB_DNODE_EXIT(db);
34dc7c2f
BB
1393
1394 if (db->db_level == 0) {
b128c09f
BB
1395 if (db->db_state != DB_NOFILL) {
1396 dbuf_unoverride(dr);
34dc7c2f 1397
b128c09f
BB
1398 ASSERT(db->db_buf != NULL);
1399 ASSERT(dr->dt.dl.dr_data != NULL);
1400 if (dr->dt.dl.dr_data != db->db_buf)
1401 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
1402 db) == 1);
1403 }
34dc7c2f
BB
1404 } else {
1405 ASSERT(db->db_buf != NULL);
1406 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
1407 mutex_destroy(&dr->dt.di.dr_mtx);
1408 list_destroy(&dr->dt.di.dr_children);
1409 }
1410 kmem_free(dr, sizeof (dbuf_dirty_record_t));
1411
1412 ASSERT(db->db_dirtycnt > 0);
1413 db->db_dirtycnt -= 1;
1414
1415 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1416 arc_buf_t *buf = db->db_buf;
1417
428870ff 1418 ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
34dc7c2f
BB
1419 dbuf_set_data(db, NULL);
1420 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1421 dbuf_evict(db);
1422 return (1);
1423 }
1424
1425 mutex_exit(&db->db_mtx);
1426 return (0);
1427}
1428
1429#pragma weak dmu_buf_will_dirty = dbuf_will_dirty
1430void
1431dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1432{
1433 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1434
1435 ASSERT(tx->tx_txg != 0);
1436 ASSERT(!refcount_is_zero(&db->db_holds));
1437
572e2857
BB
1438 DB_DNODE_ENTER(db);
1439 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
34dc7c2f 1440 rf |= DB_RF_HAVESTRUCT;
572e2857 1441 DB_DNODE_EXIT(db);
34dc7c2f
BB
1442 (void) dbuf_read(db, NULL, rf);
1443 (void) dbuf_dirty(db, tx);
1444}
1445
b128c09f
BB
1446void
1447dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1448{
1449 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1450
1451 db->db_state = DB_NOFILL;
1452
1453 dmu_buf_will_fill(db_fake, tx);
1454}
1455
34dc7c2f
BB
1456void
1457dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1458{
1459 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1460
428870ff 1461 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
1462 ASSERT(tx->tx_txg != 0);
1463 ASSERT(db->db_level == 0);
1464 ASSERT(!refcount_is_zero(&db->db_holds));
1465
1466 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1467 dmu_tx_private_ok(tx));
1468
1469 dbuf_noread(db);
1470 (void) dbuf_dirty(db, tx);
1471}
1472
1473#pragma weak dmu_buf_fill_done = dbuf_fill_done
1474/* ARGSUSED */
1475void
1476dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1477{
1478 mutex_enter(&db->db_mtx);
1479 DBUF_VERIFY(db);
1480
1481 if (db->db_state == DB_FILL) {
1482 if (db->db_level == 0 && db->db_freed_in_flight) {
428870ff 1483 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
1484 /* we were freed while filling */
1485 /* XXX dbuf_undirty? */
1486 bzero(db->db.db_data, db->db.db_size);
1487 db->db_freed_in_flight = FALSE;
1488 }
1489 db->db_state = DB_CACHED;
1490 cv_broadcast(&db->db_changed);
1491 }
1492 mutex_exit(&db->db_mtx);
1493}
1494
9babb374
BB
1495/*
1496 * Directly assign a provided arc buf to a given dbuf if it's not referenced
1497 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1498 */
1499void
1500dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1501{
1502 ASSERT(!refcount_is_zero(&db->db_holds));
428870ff 1503 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
9babb374
BB
1504 ASSERT(db->db_level == 0);
1505 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1506 ASSERT(buf != NULL);
1507 ASSERT(arc_buf_size(buf) == db->db.db_size);
1508 ASSERT(tx->tx_txg != 0);
1509
1510 arc_return_buf(buf, db);
1511 ASSERT(arc_released(buf));
1512
1513 mutex_enter(&db->db_mtx);
1514
1515 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1516 cv_wait(&db->db_changed, &db->db_mtx);
1517
1518 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1519
1520 if (db->db_state == DB_CACHED &&
1521 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1522 mutex_exit(&db->db_mtx);
1523 (void) dbuf_dirty(db, tx);
1524 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1525 VERIFY(arc_buf_remove_ref(buf, db) == 1);
428870ff 1526 xuio_stat_wbuf_copied();
9babb374
BB
1527 return;
1528 }
1529
428870ff 1530 xuio_stat_wbuf_nocopy();
9babb374
BB
1531 if (db->db_state == DB_CACHED) {
1532 dbuf_dirty_record_t *dr = db->db_last_dirty;
1533
1534 ASSERT(db->db_buf != NULL);
1535 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1536 ASSERT(dr->dt.dl.dr_data == db->db_buf);
1537 if (!arc_released(db->db_buf)) {
1538 ASSERT(dr->dt.dl.dr_override_state ==
1539 DR_OVERRIDDEN);
1540 arc_release(db->db_buf, db);
1541 }
1542 dr->dt.dl.dr_data = buf;
1543 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
1544 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1545 arc_release(db->db_buf, db);
1546 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
1547 }
1548 db->db_buf = NULL;
1549 }
1550 ASSERT(db->db_buf == NULL);
1551 dbuf_set_data(db, buf);
1552 db->db_state = DB_FILL;
1553 mutex_exit(&db->db_mtx);
1554 (void) dbuf_dirty(db, tx);
1555 dbuf_fill_done(db, tx);
1556}
1557
34dc7c2f
BB
1558/*
1559 * "Clear" the contents of this dbuf. This will mark the dbuf
1560 * EVICTING and clear *most* of its references. Unfortunetely,
1561 * when we are not holding the dn_dbufs_mtx, we can't clear the
1562 * entry in the dn_dbufs list. We have to wait until dbuf_destroy()
1563 * in this case. For callers from the DMU we will usually see:
1564 * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1565 * For the arc callback, we will usually see:
572e2857 1566 * dbuf_do_evict()->dbuf_clear();dbuf_destroy()
34dc7c2f
BB
1567 * Sometimes, though, we will get a mix of these two:
1568 * DMU: dbuf_clear()->arc_buf_evict()
1569 * ARC: dbuf_do_evict()->dbuf_destroy()
1570 */
1571void
1572dbuf_clear(dmu_buf_impl_t *db)
1573{
572e2857 1574 dnode_t *dn;
34dc7c2f 1575 dmu_buf_impl_t *parent = db->db_parent;
572e2857 1576 dmu_buf_impl_t *dndb;
34dc7c2f
BB
1577 int dbuf_gone = FALSE;
1578
1579 ASSERT(MUTEX_HELD(&db->db_mtx));
1580 ASSERT(refcount_is_zero(&db->db_holds));
1581
1582 dbuf_evict_user(db);
1583
1584 if (db->db_state == DB_CACHED) {
1585 ASSERT(db->db.db_data != NULL);
428870ff 1586 if (db->db_blkid == DMU_BONUS_BLKID) {
34dc7c2f 1587 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
d164b209 1588 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
34dc7c2f
BB
1589 }
1590 db->db.db_data = NULL;
1591 db->db_state = DB_UNCACHED;
1592 }
1593
b128c09f 1594 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
34dc7c2f
BB
1595 ASSERT(db->db_data_pending == NULL);
1596
1597 db->db_state = DB_EVICTING;
1598 db->db_blkptr = NULL;
1599
572e2857
BB
1600 DB_DNODE_ENTER(db);
1601 dn = DB_DNODE(db);
1602 dndb = dn->dn_dbuf;
428870ff 1603 if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
34dc7c2f 1604 list_remove(&dn->dn_dbufs, db);
572e2857
BB
1605 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1606 membar_producer();
1607 DB_DNODE_EXIT(db);
1608 /*
1609 * Decrementing the dbuf count means that the hold corresponding
1610 * to the removed dbuf is no longer discounted in dnode_move(),
1611 * so the dnode cannot be moved until after we release the hold.
1612 * The membar_producer() ensures visibility of the decremented
1613 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1614 * release any lock.
1615 */
34dc7c2f 1616 dnode_rele(dn, db);
572e2857
BB
1617 db->db_dnode_handle = NULL;
1618 } else {
1619 DB_DNODE_EXIT(db);
34dc7c2f
BB
1620 }
1621
1622 if (db->db_buf)
1623 dbuf_gone = arc_buf_evict(db->db_buf);
1624
1625 if (!dbuf_gone)
1626 mutex_exit(&db->db_mtx);
1627
1628 /*
572e2857 1629 * If this dbuf is referenced from an indirect dbuf,
34dc7c2f
BB
1630 * decrement the ref count on the indirect dbuf.
1631 */
1632 if (parent && parent != dndb)
1633 dbuf_rele(parent, db);
1634}
1635
bf701a83
BB
1636__attribute__((always_inline))
1637static inline int
34dc7c2f 1638dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
fc5bb51f 1639 dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh)
34dc7c2f
BB
1640{
1641 int nlevels, epbs;
1642
1643 *parentp = NULL;
1644 *bpp = NULL;
1645
428870ff
BB
1646 ASSERT(blkid != DMU_BONUS_BLKID);
1647
1648 if (blkid == DMU_SPILL_BLKID) {
1649 mutex_enter(&dn->dn_mtx);
1650 if (dn->dn_have_spill &&
1651 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1652 *bpp = &dn->dn_phys->dn_spill;
1653 else
1654 *bpp = NULL;
1655 dbuf_add_ref(dn->dn_dbuf, NULL);
1656 *parentp = dn->dn_dbuf;
1657 mutex_exit(&dn->dn_mtx);
1658 return (0);
1659 }
34dc7c2f
BB
1660
1661 if (dn->dn_phys->dn_nlevels == 0)
1662 nlevels = 1;
1663 else
1664 nlevels = dn->dn_phys->dn_nlevels;
1665
1666 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1667
1668 ASSERT3U(level * epbs, <, 64);
1669 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1670 if (level >= nlevels ||
1671 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1672 /* the buffer has no parent yet */
1673 return (ENOENT);
1674 } else if (level < nlevels-1) {
1675 /* this block is referenced from an indirect block */
fc5bb51f
BB
1676 int err;
1677 if (dh == NULL) {
1678 err = dbuf_hold_impl(dn, level+1, blkid >> epbs,
1679 fail_sparse, NULL, parentp);
1680 }
1681 else {
1682 __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1,
1683 blkid >> epbs, fail_sparse, NULL,
1684 parentp, dh->dh_depth + 1);
1685 err = __dbuf_hold_impl(dh + 1);
1686 }
34dc7c2f
BB
1687 if (err)
1688 return (err);
1689 err = dbuf_read(*parentp, NULL,
1690 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1691 if (err) {
1692 dbuf_rele(*parentp, NULL);
1693 *parentp = NULL;
1694 return (err);
1695 }
1696 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1697 (blkid & ((1ULL << epbs) - 1));
1698 return (0);
1699 } else {
1700 /* the block is referenced from the dnode */
1701 ASSERT3U(level, ==, nlevels-1);
1702 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1703 blkid < dn->dn_phys->dn_nblkptr);
1704 if (dn->dn_dbuf) {
1705 dbuf_add_ref(dn->dn_dbuf, NULL);
1706 *parentp = dn->dn_dbuf;
1707 }
1708 *bpp = &dn->dn_phys->dn_blkptr[blkid];
1709 return (0);
1710 }
1711}
1712
1713static dmu_buf_impl_t *
1714dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1715 dmu_buf_impl_t *parent, blkptr_t *blkptr)
1716{
428870ff 1717 objset_t *os = dn->dn_objset;
34dc7c2f
BB
1718 dmu_buf_impl_t *db, *odb;
1719
1720 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1721 ASSERT(dn->dn_type != DMU_OT_NONE);
1722
b8d06fca 1723 db = kmem_cache_alloc(dbuf_cache, KM_PUSHPAGE);
34dc7c2f
BB
1724
1725 db->db_objset = os;
1726 db->db.db_object = dn->dn_object;
1727 db->db_level = level;
1728 db->db_blkid = blkid;
1729 db->db_last_dirty = NULL;
1730 db->db_dirtycnt = 0;
572e2857 1731 db->db_dnode_handle = dn->dn_handle;
34dc7c2f
BB
1732 db->db_parent = parent;
1733 db->db_blkptr = blkptr;
1734
1735 db->db_user_ptr = NULL;
1736 db->db_user_data_ptr_ptr = NULL;
1737 db->db_evict_func = NULL;
1738 db->db_immediate_evict = 0;
1739 db->db_freed_in_flight = 0;
1740
428870ff 1741 if (blkid == DMU_BONUS_BLKID) {
34dc7c2f
BB
1742 ASSERT3P(parent, ==, dn->dn_dbuf);
1743 db->db.db_size = DN_MAX_BONUSLEN -
1744 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1745 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
428870ff 1746 db->db.db_offset = DMU_BONUS_BLKID;
34dc7c2f
BB
1747 db->db_state = DB_UNCACHED;
1748 /* the bonus dbuf is not placed in the hash table */
d164b209 1749 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
34dc7c2f 1750 return (db);
428870ff
BB
1751 } else if (blkid == DMU_SPILL_BLKID) {
1752 db->db.db_size = (blkptr != NULL) ?
1753 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1754 db->db.db_offset = 0;
34dc7c2f
BB
1755 } else {
1756 int blocksize =
1757 db->db_level ? 1<<dn->dn_indblkshift : dn->dn_datablksz;
1758 db->db.db_size = blocksize;
1759 db->db.db_offset = db->db_blkid * blocksize;
1760 }
1761
1762 /*
1763 * Hold the dn_dbufs_mtx while we get the new dbuf
1764 * in the hash table *and* added to the dbufs list.
1765 * This prevents a possible deadlock with someone
1766 * trying to look up this dbuf before its added to the
1767 * dn_dbufs list.
1768 */
1769 mutex_enter(&dn->dn_dbufs_mtx);
1770 db->db_state = DB_EVICTING;
1771 if ((odb = dbuf_hash_insert(db)) != NULL) {
1772 /* someone else inserted it first */
1773 kmem_cache_free(dbuf_cache, db);
1774 mutex_exit(&dn->dn_dbufs_mtx);
1775 return (odb);
1776 }
1777 list_insert_head(&dn->dn_dbufs, db);
1778 db->db_state = DB_UNCACHED;
1779 mutex_exit(&dn->dn_dbufs_mtx);
d164b209 1780 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
34dc7c2f
BB
1781
1782 if (parent && parent != dn->dn_dbuf)
1783 dbuf_add_ref(parent, db);
1784
1785 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1786 refcount_count(&dn->dn_holds) > 0);
1787 (void) refcount_add(&dn->dn_holds, db);
572e2857 1788 (void) atomic_inc_32_nv(&dn->dn_dbufs_count);
34dc7c2f
BB
1789
1790 dprintf_dbuf(db, "db=%p\n", db);
1791
1792 return (db);
1793}
1794
1795static int
1796dbuf_do_evict(void *private)
1797{
1798 arc_buf_t *buf = private;
1799 dmu_buf_impl_t *db = buf->b_private;
1800
1801 if (!MUTEX_HELD(&db->db_mtx))
1802 mutex_enter(&db->db_mtx);
1803
1804 ASSERT(refcount_is_zero(&db->db_holds));
1805
1806 if (db->db_state != DB_EVICTING) {
1807 ASSERT(db->db_state == DB_CACHED);
1808 DBUF_VERIFY(db);
1809 db->db_buf = NULL;
1810 dbuf_evict(db);
1811 } else {
1812 mutex_exit(&db->db_mtx);
1813 dbuf_destroy(db);
1814 }
1815 return (0);
1816}
1817
1818static void
1819dbuf_destroy(dmu_buf_impl_t *db)
1820{
1821 ASSERT(refcount_is_zero(&db->db_holds));
1822
428870ff 1823 if (db->db_blkid != DMU_BONUS_BLKID) {
34dc7c2f
BB
1824 /*
1825 * If this dbuf is still on the dn_dbufs list,
1826 * remove it from that list.
1827 */
572e2857
BB
1828 if (db->db_dnode_handle != NULL) {
1829 dnode_t *dn;
34dc7c2f 1830
572e2857
BB
1831 DB_DNODE_ENTER(db);
1832 dn = DB_DNODE(db);
34dc7c2f
BB
1833 mutex_enter(&dn->dn_dbufs_mtx);
1834 list_remove(&dn->dn_dbufs, db);
572e2857 1835 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
34dc7c2f 1836 mutex_exit(&dn->dn_dbufs_mtx);
572e2857
BB
1837 DB_DNODE_EXIT(db);
1838 /*
1839 * Decrementing the dbuf count means that the hold
1840 * corresponding to the removed dbuf is no longer
1841 * discounted in dnode_move(), so the dnode cannot be
1842 * moved until after we release the hold.
1843 */
34dc7c2f 1844 dnode_rele(dn, db);
572e2857 1845 db->db_dnode_handle = NULL;
34dc7c2f
BB
1846 }
1847 dbuf_hash_remove(db);
1848 }
1849 db->db_parent = NULL;
1850 db->db_buf = NULL;
1851
1852 ASSERT(!list_link_active(&db->db_link));
1853 ASSERT(db->db.db_data == NULL);
1854 ASSERT(db->db_hash_next == NULL);
1855 ASSERT(db->db_blkptr == NULL);
1856 ASSERT(db->db_data_pending == NULL);
1857
1858 kmem_cache_free(dbuf_cache, db);
d164b209 1859 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
34dc7c2f
BB
1860}
1861
1862void
1863dbuf_prefetch(dnode_t *dn, uint64_t blkid)
1864{
1865 dmu_buf_impl_t *db = NULL;
1866 blkptr_t *bp = NULL;
1867
428870ff 1868 ASSERT(blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
1869 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1870
1871 if (dnode_block_freed(dn, blkid))
1872 return;
1873
1874 /* dbuf_find() returns with db_mtx held */
c65aa5b2 1875 if ((db = dbuf_find(dn, 0, blkid))) {
572e2857
BB
1876 /*
1877 * This dbuf is already in the cache. We assume that
1878 * it is already CACHED, or else about to be either
1879 * read or filled.
1880 */
34dc7c2f 1881 mutex_exit(&db->db_mtx);
572e2857 1882 return;
34dc7c2f
BB
1883 }
1884
fc5bb51f 1885 if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp, NULL) == 0) {
34dc7c2f 1886 if (bp && !BP_IS_HOLE(bp)) {
428870ff
BB
1887 int priority = dn->dn_type == DMU_OT_DDT_ZAP ?
1888 ZIO_PRIORITY_DDT_PREFETCH : ZIO_PRIORITY_ASYNC_READ;
b128c09f 1889 arc_buf_t *pbuf;
428870ff 1890 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
34dc7c2f
BB
1891 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1892 zbookmark_t zb;
428870ff
BB
1893
1894 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1895 dn->dn_object, 0, blkid);
34dc7c2f 1896
b128c09f
BB
1897 if (db)
1898 pbuf = db->db_buf;
1899 else
1900 pbuf = dn->dn_objset->os_phys_buf;
1901
428870ff
BB
1902 (void) dsl_read(NULL, dn->dn_objset->os_spa,
1903 bp, pbuf, NULL, NULL, priority,
34dc7c2f
BB
1904 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1905 &aflags, &zb);
1906 }
1907 if (db)
1908 dbuf_rele(db, NULL);
1909 }
1910}
1911
fc5bb51f
BB
1912#define DBUF_HOLD_IMPL_MAX_DEPTH 20
1913
34dc7c2f
BB
1914/*
1915 * Returns with db_holds incremented, and db_mtx not held.
1916 * Note: dn_struct_rwlock must be held.
1917 */
fc5bb51f
BB
1918static int
1919__dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
34dc7c2f 1920{
fc5bb51f
BB
1921 ASSERT3S(dh->dh_depth, <, DBUF_HOLD_IMPL_MAX_DEPTH);
1922 dh->dh_parent = NULL;
34dc7c2f 1923
fc5bb51f
BB
1924 ASSERT(dh->dh_blkid != DMU_BONUS_BLKID);
1925 ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock));
1926 ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level);
34dc7c2f 1927
fc5bb51f 1928 *(dh->dh_dbp) = NULL;
34dc7c2f
BB
1929top:
1930 /* dbuf_find() returns with db_mtx held */
fc5bb51f
BB
1931 dh->dh_db = dbuf_find(dh->dh_dn, dh->dh_level, dh->dh_blkid);
1932
1933 if (dh->dh_db == NULL) {
1934 dh->dh_bp = NULL;
1935
1936 ASSERT3P(dh->dh_parent, ==, NULL);
1937 dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
1938 dh->dh_fail_sparse, &dh->dh_parent,
1939 &dh->dh_bp, dh);
1940 if (dh->dh_fail_sparse) {
1941 if (dh->dh_err == 0 && dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
1942 dh->dh_err = ENOENT;
1943 if (dh->dh_err) {
1944 if (dh->dh_parent)
1945 dbuf_rele(dh->dh_parent, NULL);
1946 return (dh->dh_err);
34dc7c2f
BB
1947 }
1948 }
fc5bb51f
BB
1949 if (dh->dh_err && dh->dh_err != ENOENT)
1950 return (dh->dh_err);
1951 dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid,
1952 dh->dh_parent, dh->dh_bp);
34dc7c2f
BB
1953 }
1954
fc5bb51f
BB
1955 if (dh->dh_db->db_buf && refcount_is_zero(&dh->dh_db->db_holds)) {
1956 arc_buf_add_ref(dh->dh_db->db_buf, dh->dh_db);
1957 if (dh->dh_db->db_buf->b_data == NULL) {
1958 dbuf_clear(dh->dh_db);
1959 if (dh->dh_parent) {
1960 dbuf_rele(dh->dh_parent, NULL);
1961 dh->dh_parent = NULL;
34dc7c2f
BB
1962 }
1963 goto top;
1964 }
fc5bb51f 1965 ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data);
34dc7c2f
BB
1966 }
1967
fc5bb51f 1968 ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf));
34dc7c2f
BB
1969
1970 /*
1971 * If this buffer is currently syncing out, and we are are
1972 * still referencing it from db_data, we need to make a copy
1973 * of it in case we decide we want to dirty it again in this txg.
1974 */
fc5bb51f
BB
1975 if (dh->dh_db->db_level == 0 &&
1976 dh->dh_db->db_blkid != DMU_BONUS_BLKID &&
1977 dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT &&
1978 dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) {
1979 dh->dh_dr = dh->dh_db->db_data_pending;
1980
1981 if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf) {
1982 dh->dh_type = DBUF_GET_BUFC_TYPE(dh->dh_db);
1983
1984 dbuf_set_data(dh->dh_db,
1985 arc_buf_alloc(dh->dh_dn->dn_objset->os_spa,
1986 dh->dh_db->db.db_size, dh->dh_db, dh->dh_type));
1987 bcopy(dh->dh_dr->dt.dl.dr_data->b_data,
1988 dh->dh_db->db.db_data, dh->dh_db->db.db_size);
34dc7c2f
BB
1989 }
1990 }
1991
fc5bb51f
BB
1992 (void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
1993 dbuf_update_data(dh->dh_db);
1994 DBUF_VERIFY(dh->dh_db);
1995 mutex_exit(&dh->dh_db->db_mtx);
34dc7c2f
BB
1996
1997 /* NOTE: we can't rele the parent until after we drop the db_mtx */
fc5bb51f
BB
1998 if (dh->dh_parent)
1999 dbuf_rele(dh->dh_parent, NULL);
34dc7c2f 2000
fc5bb51f
BB
2001 ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn);
2002 ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid);
2003 ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level);
2004 *(dh->dh_dbp) = dh->dh_db;
34dc7c2f
BB
2005
2006 return (0);
2007}
2008
fc5bb51f
BB
2009/*
2010 * The following code preserves the recursive function dbuf_hold_impl()
2011 * but moves the local variables AND function arguments to the heap to
2012 * minimize the stack frame size. Enough space is initially allocated
2013 * on the stack for 20 levels of recursion.
2014 */
2015int
2016dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
2017 void *tag, dmu_buf_impl_t **dbp)
2018{
2019 struct dbuf_hold_impl_data *dh;
2020 int error;
2021
2022 dh = kmem_zalloc(sizeof(struct dbuf_hold_impl_data) *
b8d06fca 2023 DBUF_HOLD_IMPL_MAX_DEPTH, KM_PUSHPAGE);
fc5bb51f
BB
2024 __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0);
2025
2026 error = __dbuf_hold_impl(dh);
2027
2028 kmem_free(dh, sizeof(struct dbuf_hold_impl_data) *
2029 DBUF_HOLD_IMPL_MAX_DEPTH);
2030
2031 return (error);
2032}
2033
2034static void
2035__dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
2036 dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
2037 void *tag, dmu_buf_impl_t **dbp, int depth)
2038{
2039 dh->dh_dn = dn;
2040 dh->dh_level = level;
2041 dh->dh_blkid = blkid;
2042 dh->dh_fail_sparse = fail_sparse;
2043 dh->dh_tag = tag;
2044 dh->dh_dbp = dbp;
2045 dh->dh_depth = depth;
2046}
2047
34dc7c2f
BB
2048dmu_buf_impl_t *
2049dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
2050{
2051 dmu_buf_impl_t *db;
2052 int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
2053 return (err ? NULL : db);
2054}
2055
2056dmu_buf_impl_t *
2057dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
2058{
2059 dmu_buf_impl_t *db;
2060 int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
2061 return (err ? NULL : db);
2062}
2063
2064void
2065dbuf_create_bonus(dnode_t *dn)
2066{
2067 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
2068
2069 ASSERT(dn->dn_bonus == NULL);
428870ff
BB
2070 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
2071}
2072
2073int
2074dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
2075{
2076 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
572e2857
BB
2077 dnode_t *dn;
2078
428870ff
BB
2079 if (db->db_blkid != DMU_SPILL_BLKID)
2080 return (ENOTSUP);
2081 if (blksz == 0)
2082 blksz = SPA_MINBLOCKSIZE;
2083 if (blksz > SPA_MAXBLOCKSIZE)
2084 blksz = SPA_MAXBLOCKSIZE;
2085 else
2086 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2087
572e2857
BB
2088 DB_DNODE_ENTER(db);
2089 dn = DB_DNODE(db);
2090 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
428870ff 2091 dbuf_new_size(db, blksz, tx);
572e2857
BB
2092 rw_exit(&dn->dn_struct_rwlock);
2093 DB_DNODE_EXIT(db);
428870ff
BB
2094
2095 return (0);
2096}
2097
2098void
2099dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2100{
2101 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
34dc7c2f
BB
2102}
2103
2104#pragma weak dmu_buf_add_ref = dbuf_add_ref
2105void
2106dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2107{
1fde1e37 2108 VERIFY(refcount_add(&db->db_holds, tag) > 1);
34dc7c2f
BB
2109}
2110
572e2857
BB
2111/*
2112 * If you call dbuf_rele() you had better not be referencing the dnode handle
2113 * unless you have some other direct or indirect hold on the dnode. (An indirect
2114 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2115 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2116 * dnode's parent dbuf evicting its dnode handles.
2117 */
34dc7c2f
BB
2118#pragma weak dmu_buf_rele = dbuf_rele
2119void
2120dbuf_rele(dmu_buf_impl_t *db, void *tag)
428870ff
BB
2121{
2122 mutex_enter(&db->db_mtx);
2123 dbuf_rele_and_unlock(db, tag);
2124}
2125
2126/*
2127 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
2128 * db_dirtycnt and db_holds to be updated atomically.
2129 */
2130void
2131dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
34dc7c2f
BB
2132{
2133 int64_t holds;
2134
428870ff 2135 ASSERT(MUTEX_HELD(&db->db_mtx));
34dc7c2f
BB
2136 DBUF_VERIFY(db);
2137
572e2857
BB
2138 /*
2139 * Remove the reference to the dbuf before removing its hold on the
2140 * dnode so we can guarantee in dnode_move() that a referenced bonus
2141 * buffer has a corresponding dnode hold.
2142 */
34dc7c2f
BB
2143 holds = refcount_remove(&db->db_holds, tag);
2144 ASSERT(holds >= 0);
2145
2146 /*
2147 * We can't freeze indirects if there is a possibility that they
2148 * may be modified in the current syncing context.
2149 */
2150 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2151 arc_buf_freeze(db->db_buf);
2152
2153 if (holds == db->db_dirtycnt &&
2154 db->db_level == 0 && db->db_immediate_evict)
2155 dbuf_evict_user(db);
2156
2157 if (holds == 0) {
428870ff 2158 if (db->db_blkid == DMU_BONUS_BLKID) {
34dc7c2f 2159 mutex_exit(&db->db_mtx);
572e2857
BB
2160
2161 /*
2162 * If the dnode moves here, we cannot cross this barrier
2163 * until the move completes.
2164 */
2165 DB_DNODE_ENTER(db);
2166 (void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count);
2167 DB_DNODE_EXIT(db);
2168 /*
2169 * The bonus buffer's dnode hold is no longer discounted
2170 * in dnode_move(). The dnode cannot move until after
2171 * the dnode_rele().
2172 */
2173 dnode_rele(DB_DNODE(db), db);
34dc7c2f
BB
2174 } else if (db->db_buf == NULL) {
2175 /*
2176 * This is a special case: we never associated this
2177 * dbuf with any data allocated from the ARC.
2178 */
b128c09f
BB
2179 ASSERT(db->db_state == DB_UNCACHED ||
2180 db->db_state == DB_NOFILL);
34dc7c2f
BB
2181 dbuf_evict(db);
2182 } else if (arc_released(db->db_buf)) {
2183 arc_buf_t *buf = db->db_buf;
2184 /*
2185 * This dbuf has anonymous data associated with it.
2186 */
2187 dbuf_set_data(db, NULL);
2188 VERIFY(arc_buf_remove_ref(buf, db) == 1);
2189 dbuf_evict(db);
2190 } else {
2191 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0);
b128c09f
BB
2192 if (!DBUF_IS_CACHEABLE(db))
2193 dbuf_clear(db);
2194 else
2195 mutex_exit(&db->db_mtx);
34dc7c2f
BB
2196 }
2197 } else {
2198 mutex_exit(&db->db_mtx);
2199 }
2200}
2201
2202#pragma weak dmu_buf_refcount = dbuf_refcount
2203uint64_t
2204dbuf_refcount(dmu_buf_impl_t *db)
2205{
2206 return (refcount_count(&db->db_holds));
2207}
2208
2209void *
2210dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2211 dmu_buf_evict_func_t *evict_func)
2212{
2213 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2214 user_data_ptr_ptr, evict_func));
2215}
2216
2217void *
2218dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2219 dmu_buf_evict_func_t *evict_func)
2220{
2221 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2222
2223 db->db_immediate_evict = TRUE;
2224 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2225 user_data_ptr_ptr, evict_func));
2226}
2227
2228void *
2229dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2230 void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
2231{
2232 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2233 ASSERT(db->db_level == 0);
2234
2235 ASSERT((user_ptr == NULL) == (evict_func == NULL));
2236
2237 mutex_enter(&db->db_mtx);
2238
2239 if (db->db_user_ptr == old_user_ptr) {
2240 db->db_user_ptr = user_ptr;
2241 db->db_user_data_ptr_ptr = user_data_ptr_ptr;
2242 db->db_evict_func = evict_func;
2243
2244 dbuf_update_data(db);
2245 } else {
2246 old_user_ptr = db->db_user_ptr;
2247 }
2248
2249 mutex_exit(&db->db_mtx);
2250 return (old_user_ptr);
2251}
2252
2253void *
2254dmu_buf_get_user(dmu_buf_t *db_fake)
2255{
2256 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2257 ASSERT(!refcount_is_zero(&db->db_holds));
2258
2259 return (db->db_user_ptr);
2260}
2261
9babb374
BB
2262boolean_t
2263dmu_buf_freeable(dmu_buf_t *dbuf)
2264{
2265 boolean_t res = B_FALSE;
2266 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2267
2268 if (db->db_blkptr)
2269 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
428870ff 2270 db->db_blkptr, db->db_blkptr->blk_birth);
9babb374
BB
2271
2272 return (res);
2273}
2274
34dc7c2f
BB
2275static void
2276dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2277{
2278 /* ASSERT(dmu_tx_is_syncing(tx) */
2279 ASSERT(MUTEX_HELD(&db->db_mtx));
2280
2281 if (db->db_blkptr != NULL)
2282 return;
2283
428870ff
BB
2284 if (db->db_blkid == DMU_SPILL_BLKID) {
2285 db->db_blkptr = &dn->dn_phys->dn_spill;
2286 BP_ZERO(db->db_blkptr);
2287 return;
2288 }
34dc7c2f
BB
2289 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2290 /*
2291 * This buffer was allocated at a time when there was
2292 * no available blkptrs from the dnode, or it was
2293 * inappropriate to hook it in (i.e., nlevels mis-match).
2294 */
2295 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2296 ASSERT(db->db_parent == NULL);
2297 db->db_parent = dn->dn_dbuf;
2298 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2299 DBUF_VERIFY(db);
2300 } else {
2301 dmu_buf_impl_t *parent = db->db_parent;
2302 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2303
2304 ASSERT(dn->dn_phys->dn_nlevels > 1);
2305 if (parent == NULL) {
2306 mutex_exit(&db->db_mtx);
2307 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2308 (void) dbuf_hold_impl(dn, db->db_level+1,
2309 db->db_blkid >> epbs, FALSE, db, &parent);
2310 rw_exit(&dn->dn_struct_rwlock);
2311 mutex_enter(&db->db_mtx);
2312 db->db_parent = parent;
2313 }
2314 db->db_blkptr = (blkptr_t *)parent->db.db_data +
2315 (db->db_blkid & ((1ULL << epbs) - 1));
2316 DBUF_VERIFY(db);
2317 }
2318}
2319
60948de1
BB
2320/* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
2321 * is critical the we not allow the compiler to inline this function in to
2322 * dbuf_sync_list() thereby drastically bloating the stack usage.
2323 */
2324noinline static void
34dc7c2f
BB
2325dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2326{
2327 dmu_buf_impl_t *db = dr->dr_dbuf;
572e2857 2328 dnode_t *dn;
34dc7c2f
BB
2329 zio_t *zio;
2330
2331 ASSERT(dmu_tx_is_syncing(tx));
2332
2333 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2334
2335 mutex_enter(&db->db_mtx);
2336
2337 ASSERT(db->db_level > 0);
2338 DBUF_VERIFY(db);
2339
2340 if (db->db_buf == NULL) {
2341 mutex_exit(&db->db_mtx);
2342 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2343 mutex_enter(&db->db_mtx);
2344 }
2345 ASSERT3U(db->db_state, ==, DB_CACHED);
34dc7c2f
BB
2346 ASSERT(db->db_buf != NULL);
2347
572e2857
BB
2348 DB_DNODE_ENTER(db);
2349 dn = DB_DNODE(db);
2350 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
34dc7c2f 2351 dbuf_check_blkptr(dn, db);
572e2857 2352 DB_DNODE_EXIT(db);
34dc7c2f
BB
2353
2354 db->db_data_pending = dr;
2355
34dc7c2f 2356 mutex_exit(&db->db_mtx);
b128c09f 2357 dbuf_write(dr, db->db_buf, tx);
34dc7c2f
BB
2358
2359 zio = dr->dr_zio;
2360 mutex_enter(&dr->dt.di.dr_mtx);
2361 dbuf_sync_list(&dr->dt.di.dr_children, tx);
2362 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2363 mutex_exit(&dr->dt.di.dr_mtx);
2364 zio_nowait(zio);
2365}
2366
60948de1
BB
2367/* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
2368 * critical the we not allow the compiler to inline this function in to
2369 * dbuf_sync_list() thereby drastically bloating the stack usage.
2370 */
2371noinline static void
34dc7c2f
BB
2372dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2373{
2374 arc_buf_t **datap = &dr->dt.dl.dr_data;
2375 dmu_buf_impl_t *db = dr->dr_dbuf;
572e2857
BB
2376 dnode_t *dn;
2377 objset_t *os;
34dc7c2f 2378 uint64_t txg = tx->tx_txg;
34dc7c2f
BB
2379
2380 ASSERT(dmu_tx_is_syncing(tx));
2381
2382 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2383
2384 mutex_enter(&db->db_mtx);
2385 /*
2386 * To be synced, we must be dirtied. But we
2387 * might have been freed after the dirty.
2388 */
2389 if (db->db_state == DB_UNCACHED) {
2390 /* This buffer has been freed since it was dirtied */
2391 ASSERT(db->db.db_data == NULL);
2392 } else if (db->db_state == DB_FILL) {
2393 /* This buffer was freed and is now being re-filled */
2394 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2395 } else {
b128c09f 2396 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
34dc7c2f
BB
2397 }
2398 DBUF_VERIFY(db);
2399
572e2857
BB
2400 DB_DNODE_ENTER(db);
2401 dn = DB_DNODE(db);
2402
428870ff
BB
2403 if (db->db_blkid == DMU_SPILL_BLKID) {
2404 mutex_enter(&dn->dn_mtx);
2405 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2406 mutex_exit(&dn->dn_mtx);
2407 }
2408
34dc7c2f
BB
2409 /*
2410 * If this is a bonus buffer, simply copy the bonus data into the
2411 * dnode. It will be written out when the dnode is synced (and it
2412 * will be synced, since it must have been dirty for dbuf_sync to
2413 * be called).
2414 */
428870ff 2415 if (db->db_blkid == DMU_BONUS_BLKID) {
34dc7c2f
BB
2416 dbuf_dirty_record_t **drp;
2417
2418 ASSERT(*datap != NULL);
2419 ASSERT3U(db->db_level, ==, 0);
2420 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2421 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
572e2857
BB
2422 DB_DNODE_EXIT(db);
2423
34dc7c2f
BB
2424 if (*datap != db->db.db_data) {
2425 zio_buf_free(*datap, DN_MAX_BONUSLEN);
d164b209 2426 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
34dc7c2f
BB
2427 }
2428 db->db_data_pending = NULL;
2429 drp = &db->db_last_dirty;
2430 while (*drp != dr)
2431 drp = &(*drp)->dr_next;
2432 ASSERT(dr->dr_next == NULL);
428870ff 2433 ASSERT(dr->dr_dbuf == db);
34dc7c2f 2434 *drp = dr->dr_next;
753972fc
BB
2435 if (dr->dr_dbuf->db_level != 0) {
2436 mutex_destroy(&dr->dt.di.dr_mtx);
2437 list_destroy(&dr->dt.di.dr_children);
2438 }
34dc7c2f
BB
2439 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2440 ASSERT(db->db_dirtycnt > 0);
2441 db->db_dirtycnt -= 1;
428870ff 2442 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
34dc7c2f
BB
2443 return;
2444 }
2445
572e2857
BB
2446 os = dn->dn_objset;
2447
34dc7c2f
BB
2448 /*
2449 * This function may have dropped the db_mtx lock allowing a dmu_sync
2450 * operation to sneak in. As a result, we need to ensure that we
2451 * don't check the dr_override_state until we have returned from
2452 * dbuf_check_blkptr.
2453 */
2454 dbuf_check_blkptr(dn, db);
2455
2456 /*
572e2857 2457 * If this buffer is in the middle of an immediate write,
34dc7c2f
BB
2458 * wait for the synchronous IO to complete.
2459 */
2460 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2461 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2462 cv_wait(&db->db_changed, &db->db_mtx);
2463 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2464 }
2465
9babb374
BB
2466 if (db->db_state != DB_NOFILL &&
2467 dn->dn_object != DMU_META_DNODE_OBJECT &&
2468 refcount_count(&db->db_holds) > 1 &&
428870ff 2469 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
9babb374
BB
2470 *datap == db->db_buf) {
2471 /*
2472 * If this buffer is currently "in use" (i.e., there
2473 * are active holds and db_data still references it),
2474 * then make a copy before we start the write so that
2475 * any modifications from the open txg will not leak
2476 * into this write.
2477 *
2478 * NOTE: this copy does not need to be made for
2479 * objects only modified in the syncing context (e.g.
2480 * DNONE_DNODE blocks).
2481 */
2482 int blksz = arc_buf_size(*datap);
2483 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2484 *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2485 bcopy(db->db.db_data, (*datap)->b_data, blksz);
b128c09f 2486 }
34dc7c2f
BB
2487 db->db_data_pending = dr;
2488
2489 mutex_exit(&db->db_mtx);
2490
b128c09f 2491 dbuf_write(dr, *datap, tx);
34dc7c2f
BB
2492
2493 ASSERT(!list_link_active(&dr->dr_dirty_node));
572e2857 2494 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
34dc7c2f 2495 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
572e2857
BB
2496 DB_DNODE_EXIT(db);
2497 } else {
2498 /*
2499 * Although zio_nowait() does not "wait for an IO", it does
2500 * initiate the IO. If this is an empty write it seems plausible
2501 * that the IO could actually be completed before the nowait
2502 * returns. We need to DB_DNODE_EXIT() first in case
2503 * zio_nowait() invalidates the dbuf.
2504 */
2505 DB_DNODE_EXIT(db);
34dc7c2f 2506 zio_nowait(dr->dr_zio);
572e2857 2507 }
34dc7c2f
BB
2508}
2509
2510void
2511dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2512{
2513 dbuf_dirty_record_t *dr;
2514
c65aa5b2 2515 while ((dr = list_head(list))) {
34dc7c2f
BB
2516 if (dr->dr_zio != NULL) {
2517 /*
2518 * If we find an already initialized zio then we
2519 * are processing the meta-dnode, and we have finished.
2520 * The dbufs for all dnodes are put back on the list
2521 * during processing, so that we can zio_wait()
2522 * these IOs after initiating all child IOs.
2523 */
2524 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2525 DMU_META_DNODE_OBJECT);
2526 break;
2527 }
2528 list_remove(list, dr);
2529 if (dr->dr_dbuf->db_level > 0)
2530 dbuf_sync_indirect(dr, tx);
2531 else
2532 dbuf_sync_leaf(dr, tx);
2533 }
2534}
2535
34dc7c2f
BB
2536/* ARGSUSED */
2537static void
2538dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2539{
2540 dmu_buf_impl_t *db = vdb;
572e2857 2541 dnode_t *dn;
b128c09f 2542 blkptr_t *bp = zio->io_bp;
34dc7c2f 2543 blkptr_t *bp_orig = &zio->io_bp_orig;
428870ff
BB
2544 spa_t *spa = zio->io_spa;
2545 int64_t delta;
34dc7c2f 2546 uint64_t fill = 0;
428870ff 2547 int i;
34dc7c2f 2548
b128c09f
BB
2549 ASSERT(db->db_blkptr == bp);
2550
572e2857
BB
2551 DB_DNODE_ENTER(db);
2552 dn = DB_DNODE(db);
428870ff
BB
2553 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2554 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2555 zio->io_prev_space_delta = delta;
34dc7c2f 2556
b128c09f 2557 if (BP_IS_HOLE(bp)) {
428870ff 2558 ASSERT(bp->blk_fill == 0);
572e2857 2559 DB_DNODE_EXIT(db);
34dc7c2f
BB
2560 return;
2561 }
2562
428870ff
BB
2563 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2564 BP_GET_TYPE(bp) == dn->dn_type) ||
2565 (db->db_blkid == DMU_SPILL_BLKID &&
2566 BP_GET_TYPE(bp) == dn->dn_bonustype));
b128c09f
BB
2567 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2568
34dc7c2f
BB
2569 mutex_enter(&db->db_mtx);
2570
428870ff
BB
2571#ifdef ZFS_DEBUG
2572 if (db->db_blkid == DMU_SPILL_BLKID) {
428870ff
BB
2573 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2574 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2575 db->db_blkptr == &dn->dn_phys->dn_spill);
2576 }
2577#endif
2578
34dc7c2f
BB
2579 if (db->db_level == 0) {
2580 mutex_enter(&dn->dn_mtx);
428870ff
BB
2581 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2582 db->db_blkid != DMU_SPILL_BLKID)
34dc7c2f
BB
2583 dn->dn_phys->dn_maxblkid = db->db_blkid;
2584 mutex_exit(&dn->dn_mtx);
2585
2586 if (dn->dn_type == DMU_OT_DNODE) {
2587 dnode_phys_t *dnp = db->db.db_data;
2588 for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2589 i--, dnp++) {
2590 if (dnp->dn_type != DMU_OT_NONE)
2591 fill++;
2592 }
2593 } else {
2594 fill = 1;
2595 }
2596 } else {
b128c09f 2597 blkptr_t *ibp = db->db.db_data;
34dc7c2f 2598 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
b128c09f
BB
2599 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2600 if (BP_IS_HOLE(ibp))
34dc7c2f 2601 continue;
b128c09f 2602 fill += ibp->blk_fill;
34dc7c2f
BB
2603 }
2604 }
572e2857 2605 DB_DNODE_EXIT(db);
34dc7c2f 2606
b128c09f 2607 bp->blk_fill = fill;
34dc7c2f
BB
2608
2609 mutex_exit(&db->db_mtx);
34dc7c2f
BB
2610}
2611
2612/* ARGSUSED */
2613static void
2614dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2615{
2616 dmu_buf_impl_t *db = vdb;
428870ff
BB
2617 blkptr_t *bp = zio->io_bp;
2618 blkptr_t *bp_orig = &zio->io_bp_orig;
34dc7c2f
BB
2619 uint64_t txg = zio->io_txg;
2620 dbuf_dirty_record_t **drp, *dr;
2621
2622 ASSERT3U(zio->io_error, ==, 0);
428870ff
BB
2623 ASSERT(db->db_blkptr == bp);
2624
2625 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
2626 ASSERT(BP_EQUAL(bp, bp_orig));
2627 } else {
572e2857
BB
2628 objset_t *os;
2629 dsl_dataset_t *ds;
2630 dmu_tx_t *tx;
2631
2632 DB_GET_OBJSET(&os, db);
2633 ds = os->os_dsl_dataset;
2634 tx = os->os_synctx;
428870ff
BB
2635
2636 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2637 dsl_dataset_block_born(ds, bp, tx);
2638 }
34dc7c2f
BB
2639
2640 mutex_enter(&db->db_mtx);
2641
428870ff
BB
2642 DBUF_VERIFY(db);
2643
34dc7c2f
BB
2644 drp = &db->db_last_dirty;
2645 while ((dr = *drp) != db->db_data_pending)
2646 drp = &dr->dr_next;
2647 ASSERT(!list_link_active(&dr->dr_dirty_node));
2648 ASSERT(dr->dr_txg == txg);
428870ff 2649 ASSERT(dr->dr_dbuf == db);
34dc7c2f
BB
2650 ASSERT(dr->dr_next == NULL);
2651 *drp = dr->dr_next;
2652
428870ff
BB
2653#ifdef ZFS_DEBUG
2654 if (db->db_blkid == DMU_SPILL_BLKID) {
572e2857
BB
2655 dnode_t *dn;
2656
2657 DB_DNODE_ENTER(db);
2658 dn = DB_DNODE(db);
428870ff
BB
2659 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2660 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2661 db->db_blkptr == &dn->dn_phys->dn_spill);
572e2857 2662 DB_DNODE_EXIT(db);
428870ff
BB
2663 }
2664#endif
2665
34dc7c2f 2666 if (db->db_level == 0) {
428870ff 2667 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f 2668 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
b128c09f
BB
2669 if (db->db_state != DB_NOFILL) {
2670 if (dr->dt.dl.dr_data != db->db_buf)
2671 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
2672 db) == 1);
428870ff 2673 else if (!arc_released(db->db_buf))
b128c09f 2674 arc_set_callback(db->db_buf, dbuf_do_evict, db);
b128c09f 2675 }
34dc7c2f 2676 } else {
572e2857
BB
2677 dnode_t *dn;
2678
2679 DB_DNODE_ENTER(db);
2680 dn = DB_DNODE(db);
34dc7c2f
BB
2681 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2682 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2683 if (!BP_IS_HOLE(db->db_blkptr)) {
1fde1e37
BB
2684 ASSERTV(int epbs = dn->dn_phys->dn_indblkshift -
2685 SPA_BLKPTRSHIFT);
34dc7c2f
BB
2686 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2687 db->db.db_size);
2688 ASSERT3U(dn->dn_phys->dn_maxblkid
2689 >> (db->db_level * epbs), >=, db->db_blkid);
2690 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2691 }
572e2857 2692 DB_DNODE_EXIT(db);
34dc7c2f
BB
2693 mutex_destroy(&dr->dt.di.dr_mtx);
2694 list_destroy(&dr->dt.di.dr_children);
2695 }
2696 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2697
2698 cv_broadcast(&db->db_changed);
2699 ASSERT(db->db_dirtycnt > 0);
2700 db->db_dirtycnt -= 1;
2701 db->db_data_pending = NULL;
428870ff
BB
2702 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2703}
2704
2705static void
2706dbuf_write_nofill_ready(zio_t *zio)
2707{
2708 dbuf_write_ready(zio, NULL, zio->io_private);
2709}
2710
2711static void
2712dbuf_write_nofill_done(zio_t *zio)
2713{
2714 dbuf_write_done(zio, NULL, zio->io_private);
2715}
2716
2717static void
2718dbuf_write_override_ready(zio_t *zio)
2719{
2720 dbuf_dirty_record_t *dr = zio->io_private;
2721 dmu_buf_impl_t *db = dr->dr_dbuf;
2722
2723 dbuf_write_ready(zio, NULL, db);
2724}
2725
2726static void
2727dbuf_write_override_done(zio_t *zio)
2728{
2729 dbuf_dirty_record_t *dr = zio->io_private;
2730 dmu_buf_impl_t *db = dr->dr_dbuf;
2731 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2732
2733 mutex_enter(&db->db_mtx);
2734 if (!BP_EQUAL(zio->io_bp, obp)) {
2735 if (!BP_IS_HOLE(obp))
2736 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2737 arc_release(dr->dt.dl.dr_data, db);
2738 }
34dc7c2f
BB
2739 mutex_exit(&db->db_mtx);
2740
428870ff
BB
2741 dbuf_write_done(zio, NULL, db);
2742}
2743
2744static void
2745dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2746{
2747 dmu_buf_impl_t *db = dr->dr_dbuf;
572e2857
BB
2748 dnode_t *dn;
2749 objset_t *os;
428870ff
BB
2750 dmu_buf_impl_t *parent = db->db_parent;
2751 uint64_t txg = tx->tx_txg;
2752 zbookmark_t zb;
2753 zio_prop_t zp;
2754 zio_t *zio;
2755 int wp_flag = 0;
34dc7c2f 2756
572e2857
BB
2757 DB_DNODE_ENTER(db);
2758 dn = DB_DNODE(db);
2759 os = dn->dn_objset;
2760
428870ff
BB
2761 if (db->db_state != DB_NOFILL) {
2762 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2763 /*
2764 * Private object buffers are released here rather
2765 * than in dbuf_dirty() since they are only modified
2766 * in the syncing context and we don't want the
2767 * overhead of making multiple copies of the data.
2768 */
2769 if (BP_IS_HOLE(db->db_blkptr)) {
2770 arc_buf_thaw(data);
2771 } else {
2772 dbuf_release_bp(db);
2773 }
2774 }
2775 }
2776
2777 if (parent != dn->dn_dbuf) {
2778 ASSERT(parent && parent->db_data_pending);
2779 ASSERT(db->db_level == parent->db_level-1);
2780 ASSERT(arc_released(parent->db_buf));
2781 zio = parent->db_data_pending->dr_zio;
2782 } else {
2783 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2784 db->db_blkid != DMU_SPILL_BLKID) ||
2785 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2786 if (db->db_blkid != DMU_SPILL_BLKID)
2787 ASSERT3P(db->db_blkptr, ==,
2788 &dn->dn_phys->dn_blkptr[db->db_blkid]);
2789 zio = dn->dn_zio;
2790 }
2791
2792 ASSERT(db->db_level == 0 || data == db->db_buf);
2793 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2794 ASSERT(zio);
2795
2796 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2797 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2798 db->db.db_object, db->db_level, db->db_blkid);
2799
2800 if (db->db_blkid == DMU_SPILL_BLKID)
2801 wp_flag = WP_SPILL;
2802 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2803
2804 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
572e2857 2805 DB_DNODE_EXIT(db);
428870ff
BB
2806
2807 if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2808 ASSERT(db->db_state != DB_NOFILL);
2809 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2810 db->db_blkptr, data->b_data, arc_buf_size(data), &zp,
2811 dbuf_write_override_ready, dbuf_write_override_done, dr,
2812 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2813 mutex_enter(&db->db_mtx);
2814 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2815 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
2816 dr->dt.dl.dr_copies);
2817 mutex_exit(&db->db_mtx);
2818 } else if (db->db_state == DB_NOFILL) {
2819 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF);
2820 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2821 db->db_blkptr, NULL, db->db.db_size, &zp,
2822 dbuf_write_nofill_ready, dbuf_write_nofill_done, db,
2823 ZIO_PRIORITY_ASYNC_WRITE,
2824 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2825 } else {
2826 ASSERT(arc_released(data));
2827 dr->dr_zio = arc_write(zio, os->os_spa, txg,
2828 db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), &zp,
2829 dbuf_write_ready, dbuf_write_done, db,
2830 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2831 }
34dc7c2f 2832}
c28b2279
BB
2833
2834#if defined(_KERNEL) && defined(HAVE_SPL)
8f576c23
BB
2835EXPORT_SYMBOL(dbuf_find);
2836EXPORT_SYMBOL(dbuf_is_metadata);
2837EXPORT_SYMBOL(dbuf_evict);
2838EXPORT_SYMBOL(dbuf_loan_arcbuf);
2839EXPORT_SYMBOL(dbuf_whichblock);
2840EXPORT_SYMBOL(dbuf_read);
2841EXPORT_SYMBOL(dbuf_unoverride);
2842EXPORT_SYMBOL(dbuf_free_range);
2843EXPORT_SYMBOL(dbuf_new_size);
2844EXPORT_SYMBOL(dbuf_release_bp);
2845EXPORT_SYMBOL(dbuf_dirty);
c28b2279 2846EXPORT_SYMBOL(dmu_buf_will_dirty);
8f576c23
BB
2847EXPORT_SYMBOL(dmu_buf_will_not_fill);
2848EXPORT_SYMBOL(dmu_buf_will_fill);
2849EXPORT_SYMBOL(dmu_buf_fill_done);
4047414a 2850EXPORT_SYMBOL(dmu_buf_rele);
8f576c23
BB
2851EXPORT_SYMBOL(dbuf_assign_arcbuf);
2852EXPORT_SYMBOL(dbuf_clear);
2853EXPORT_SYMBOL(dbuf_prefetch);
2854EXPORT_SYMBOL(dbuf_hold_impl);
2855EXPORT_SYMBOL(dbuf_hold);
2856EXPORT_SYMBOL(dbuf_hold_level);
2857EXPORT_SYMBOL(dbuf_create_bonus);
2858EXPORT_SYMBOL(dbuf_spill_set_blksz);
2859EXPORT_SYMBOL(dbuf_rm_spill);
2860EXPORT_SYMBOL(dbuf_add_ref);
2861EXPORT_SYMBOL(dbuf_rele);
2862EXPORT_SYMBOL(dbuf_rele_and_unlock);
2863EXPORT_SYMBOL(dbuf_refcount);
2864EXPORT_SYMBOL(dbuf_sync_list);
2865EXPORT_SYMBOL(dmu_buf_set_user);
2866EXPORT_SYMBOL(dmu_buf_set_user_ie);
2867EXPORT_SYMBOL(dmu_buf_update_user);
2868EXPORT_SYMBOL(dmu_buf_get_user);
2869EXPORT_SYMBOL(dmu_buf_freeable);
c28b2279 2870#endif