]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dbuf.c
cstyle: Resolve C style issues
[mirror_zfs.git] / module / zfs / dbuf.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
ef3c1dea 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
2e528b49 24 * Copyright (c) 2013 by Delphix. All rights reserved.
3a17a7a9 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
34dc7c2f
BB
26 */
27
34dc7c2f 28#include <sys/zfs_context.h>
c28b2279 29#include <sys/arc.h>
34dc7c2f 30#include <sys/dmu.h>
ea97f8ce 31#include <sys/dmu_send.h>
34dc7c2f
BB
32#include <sys/dmu_impl.h>
33#include <sys/dbuf.h>
34#include <sys/dmu_objset.h>
35#include <sys/dsl_dataset.h>
36#include <sys/dsl_dir.h>
37#include <sys/dmu_tx.h>
38#include <sys/spa.h>
39#include <sys/zio.h>
40#include <sys/dmu_zfetch.h>
428870ff
BB
41#include <sys/sa.h>
42#include <sys/sa_impl.h>
34dc7c2f 43
fc5bb51f
BB
44struct dbuf_hold_impl_data {
45 /* Function arguments */
46 dnode_t *dh_dn;
47 uint8_t dh_level;
48 uint64_t dh_blkid;
49 int dh_fail_sparse;
50 void *dh_tag;
51 dmu_buf_impl_t **dh_dbp;
52 /* Local variables */
53 dmu_buf_impl_t *dh_db;
54 dmu_buf_impl_t *dh_parent;
55 blkptr_t *dh_bp;
56 int dh_err;
57 dbuf_dirty_record_t *dh_dr;
58 arc_buf_contents_t dh_type;
59 int dh_depth;
60};
61
62static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
63 dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
64 void *tag, dmu_buf_impl_t **dbp, int depth);
65static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh);
66
b663a23d
MA
67/*
68 * Number of times that zfs_free_range() took the slow path while doing
69 * a zfs receive. A nonzero value indicates a potential performance problem.
70 */
71uint64_t zfs_free_range_recv_miss;
72
34dc7c2f 73static void dbuf_destroy(dmu_buf_impl_t *db);
13fe0198 74static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
b128c09f 75static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
34dc7c2f
BB
76
77/*
78 * Global data structures and functions for the dbuf cache.
79 */
80static kmem_cache_t *dbuf_cache;
81
82/* ARGSUSED */
83static int
84dbuf_cons(void *vdb, void *unused, int kmflag)
85{
86 dmu_buf_impl_t *db = vdb;
87 bzero(db, sizeof (dmu_buf_impl_t));
88
89 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
90 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
91 refcount_create(&db->db_holds);
98f72a53 92 list_link_init(&db->db_link);
34dc7c2f
BB
93 return (0);
94}
95
96/* ARGSUSED */
97static void
98dbuf_dest(void *vdb, void *unused)
99{
100 dmu_buf_impl_t *db = vdb;
101 mutex_destroy(&db->db_mtx);
102 cv_destroy(&db->db_changed);
103 refcount_destroy(&db->db_holds);
104}
105
106/*
107 * dbuf hash table routines
108 */
109static dbuf_hash_table_t dbuf_hash_table;
110
111static uint64_t dbuf_hash_count;
112
113static uint64_t
114dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
115{
116 uintptr_t osv = (uintptr_t)os;
117 uint64_t crc = -1ULL;
118
119 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
120 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
121 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
122 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
123 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
124 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
125 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
126
127 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
128
129 return (crc);
130}
131
132#define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
133
134#define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
135 ((dbuf)->db.db_object == (obj) && \
136 (dbuf)->db_objset == (os) && \
137 (dbuf)->db_level == (level) && \
138 (dbuf)->db_blkid == (blkid))
139
140dmu_buf_impl_t *
141dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
142{
143 dbuf_hash_table_t *h = &dbuf_hash_table;
428870ff 144 objset_t *os = dn->dn_objset;
d6320ddb
BB
145 uint64_t obj;
146 uint64_t hv;
147 uint64_t idx;
34dc7c2f
BB
148 dmu_buf_impl_t *db;
149
d6320ddb
BB
150 obj = dn->dn_object;
151 hv = DBUF_HASH(os, obj, level, blkid);
152 idx = hv & h->hash_table_mask;
153
34dc7c2f
BB
154 mutex_enter(DBUF_HASH_MUTEX(h, idx));
155 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
156 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
157 mutex_enter(&db->db_mtx);
158 if (db->db_state != DB_EVICTING) {
159 mutex_exit(DBUF_HASH_MUTEX(h, idx));
160 return (db);
161 }
162 mutex_exit(&db->db_mtx);
163 }
164 }
165 mutex_exit(DBUF_HASH_MUTEX(h, idx));
166 return (NULL);
167}
168
169/*
170 * Insert an entry into the hash table. If there is already an element
171 * equal to elem in the hash table, then the already existing element
172 * will be returned and the new element will not be inserted.
173 * Otherwise returns NULL.
174 */
175static dmu_buf_impl_t *
176dbuf_hash_insert(dmu_buf_impl_t *db)
177{
178 dbuf_hash_table_t *h = &dbuf_hash_table;
428870ff 179 objset_t *os = db->db_objset;
34dc7c2f
BB
180 uint64_t obj = db->db.db_object;
181 int level = db->db_level;
d6320ddb 182 uint64_t blkid, hv, idx;
34dc7c2f
BB
183 dmu_buf_impl_t *dbf;
184
d6320ddb
BB
185 blkid = db->db_blkid;
186 hv = DBUF_HASH(os, obj, level, blkid);
187 idx = hv & h->hash_table_mask;
188
34dc7c2f
BB
189 mutex_enter(DBUF_HASH_MUTEX(h, idx));
190 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
191 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
192 mutex_enter(&dbf->db_mtx);
193 if (dbf->db_state != DB_EVICTING) {
194 mutex_exit(DBUF_HASH_MUTEX(h, idx));
195 return (dbf);
196 }
197 mutex_exit(&dbf->db_mtx);
198 }
199 }
200
201 mutex_enter(&db->db_mtx);
202 db->db_hash_next = h->hash_table[idx];
203 h->hash_table[idx] = db;
204 mutex_exit(DBUF_HASH_MUTEX(h, idx));
205 atomic_add_64(&dbuf_hash_count, 1);
206
207 return (NULL);
208}
209
210/*
211 * Remove an entry from the hash table. This operation will
212 * fail if there are any existing holds on the db.
213 */
214static void
215dbuf_hash_remove(dmu_buf_impl_t *db)
216{
217 dbuf_hash_table_t *h = &dbuf_hash_table;
d6320ddb 218 uint64_t hv, idx;
34dc7c2f
BB
219 dmu_buf_impl_t *dbf, **dbp;
220
d6320ddb
BB
221 hv = DBUF_HASH(db->db_objset, db->db.db_object,
222 db->db_level, db->db_blkid);
223 idx = hv & h->hash_table_mask;
224
34dc7c2f
BB
225 /*
226 * We musn't hold db_mtx to maintin lock ordering:
227 * DBUF_HASH_MUTEX > db_mtx.
228 */
229 ASSERT(refcount_is_zero(&db->db_holds));
230 ASSERT(db->db_state == DB_EVICTING);
231 ASSERT(!MUTEX_HELD(&db->db_mtx));
232
233 mutex_enter(DBUF_HASH_MUTEX(h, idx));
234 dbp = &h->hash_table[idx];
235 while ((dbf = *dbp) != db) {
236 dbp = &dbf->db_hash_next;
237 ASSERT(dbf != NULL);
238 }
239 *dbp = db->db_hash_next;
240 db->db_hash_next = NULL;
241 mutex_exit(DBUF_HASH_MUTEX(h, idx));
242 atomic_add_64(&dbuf_hash_count, -1);
243}
244
245static arc_evict_func_t dbuf_do_evict;
246
247static void
248dbuf_evict_user(dmu_buf_impl_t *db)
249{
250 ASSERT(MUTEX_HELD(&db->db_mtx));
251
252 if (db->db_level != 0 || db->db_evict_func == NULL)
253 return;
254
255 if (db->db_user_data_ptr_ptr)
256 *db->db_user_data_ptr_ptr = db->db.db_data;
257 db->db_evict_func(&db->db, db->db_user_ptr);
258 db->db_user_ptr = NULL;
259 db->db_user_data_ptr_ptr = NULL;
260 db->db_evict_func = NULL;
261}
262
572e2857
BB
263boolean_t
264dbuf_is_metadata(dmu_buf_impl_t *db)
265{
266 if (db->db_level > 0) {
267 return (B_TRUE);
268 } else {
269 boolean_t is_metadata;
270
271 DB_DNODE_ENTER(db);
9ae529ec 272 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
572e2857
BB
273 DB_DNODE_EXIT(db);
274
275 return (is_metadata);
276 }
277}
278
34dc7c2f
BB
279void
280dbuf_evict(dmu_buf_impl_t *db)
281{
282 ASSERT(MUTEX_HELD(&db->db_mtx));
283 ASSERT(db->db_buf == NULL);
284 ASSERT(db->db_data_pending == NULL);
285
286 dbuf_clear(db);
287 dbuf_destroy(db);
288}
289
290void
291dbuf_init(void)
292{
293 uint64_t hsize = 1ULL << 16;
294 dbuf_hash_table_t *h = &dbuf_hash_table;
295 int i;
296
297 /*
298 * The hash table is big enough to fill all of physical memory
299 * with an average 4K block size. The table will take up
300 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
301 */
302 while (hsize * 4096 < physmem * PAGESIZE)
303 hsize <<= 1;
304
305retry:
306 h->hash_table_mask = hsize - 1;
00b46022 307#if defined(_KERNEL) && defined(HAVE_SPL)
d1d7e268
MK
308 /*
309 * Large allocations which do not require contiguous pages
310 * should be using vmem_alloc() in the linux kernel
311 */
b8d06fca 312 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_PUSHPAGE);
00b46022 313#else
34dc7c2f 314 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
00b46022 315#endif
34dc7c2f
BB
316 if (h->hash_table == NULL) {
317 /* XXX - we should really return an error instead of assert */
318 ASSERT(hsize > (1ULL << 10));
319 hsize >>= 1;
320 goto retry;
321 }
322
323 dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
324 sizeof (dmu_buf_impl_t),
325 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
326
327 for (i = 0; i < DBUF_MUTEXES; i++)
328 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
e0b0ca98
BB
329
330 dbuf_stats_init(h);
34dc7c2f
BB
331}
332
333void
334dbuf_fini(void)
335{
336 dbuf_hash_table_t *h = &dbuf_hash_table;
337 int i;
338
e0b0ca98
BB
339 dbuf_stats_destroy();
340
34dc7c2f
BB
341 for (i = 0; i < DBUF_MUTEXES; i++)
342 mutex_destroy(&h->hash_mutexes[i]);
00b46022 343#if defined(_KERNEL) && defined(HAVE_SPL)
d1d7e268
MK
344 /*
345 * Large allocations which do not require contiguous pages
346 * should be using vmem_free() in the linux kernel
347 */
00b46022
BB
348 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
349#else
34dc7c2f 350 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
00b46022 351#endif
34dc7c2f
BB
352 kmem_cache_destroy(dbuf_cache);
353}
354
355/*
356 * Other stuff.
357 */
358
359#ifdef ZFS_DEBUG
360static void
361dbuf_verify(dmu_buf_impl_t *db)
362{
572e2857 363 dnode_t *dn;
428870ff 364 dbuf_dirty_record_t *dr;
34dc7c2f
BB
365
366 ASSERT(MUTEX_HELD(&db->db_mtx));
367
368 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
369 return;
370
371 ASSERT(db->db_objset != NULL);
572e2857
BB
372 DB_DNODE_ENTER(db);
373 dn = DB_DNODE(db);
34dc7c2f
BB
374 if (dn == NULL) {
375 ASSERT(db->db_parent == NULL);
376 ASSERT(db->db_blkptr == NULL);
377 } else {
378 ASSERT3U(db->db.db_object, ==, dn->dn_object);
379 ASSERT3P(db->db_objset, ==, dn->dn_objset);
380 ASSERT3U(db->db_level, <, dn->dn_nlevels);
572e2857
BB
381 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
382 db->db_blkid == DMU_SPILL_BLKID ||
383 !list_is_empty(&dn->dn_dbufs));
34dc7c2f 384 }
428870ff
BB
385 if (db->db_blkid == DMU_BONUS_BLKID) {
386 ASSERT(dn != NULL);
387 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
388 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
389 } else if (db->db_blkid == DMU_SPILL_BLKID) {
34dc7c2f
BB
390 ASSERT(dn != NULL);
391 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
c99c9001 392 ASSERT0(db->db.db_offset);
34dc7c2f
BB
393 } else {
394 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
395 }
396
428870ff
BB
397 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
398 ASSERT(dr->dr_dbuf == db);
399
400 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
401 ASSERT(dr->dr_dbuf == db);
402
b128c09f
BB
403 /*
404 * We can't assert that db_size matches dn_datablksz because it
405 * can be momentarily different when another thread is doing
406 * dnode_set_blksz().
407 */
408 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
428870ff 409 dr = db->db_data_pending;
b128c09f
BB
410 /*
411 * It should only be modified in syncing context, so
412 * make sure we only have one copy of the data.
413 */
414 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
34dc7c2f
BB
415 }
416
417 /* verify db->db_blkptr */
418 if (db->db_blkptr) {
419 if (db->db_parent == dn->dn_dbuf) {
420 /* db is pointed to by the dnode */
421 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
9babb374 422 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
34dc7c2f
BB
423 ASSERT(db->db_parent == NULL);
424 else
425 ASSERT(db->db_parent != NULL);
428870ff
BB
426 if (db->db_blkid != DMU_SPILL_BLKID)
427 ASSERT3P(db->db_blkptr, ==,
428 &dn->dn_phys->dn_blkptr[db->db_blkid]);
34dc7c2f
BB
429 } else {
430 /* db is pointed to by an indirect block */
1fde1e37
BB
431 ASSERTV(int epb = db->db_parent->db.db_size >>
432 SPA_BLKPTRSHIFT);
34dc7c2f
BB
433 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
434 ASSERT3U(db->db_parent->db.db_object, ==,
435 db->db.db_object);
436 /*
437 * dnode_grow_indblksz() can make this fail if we don't
438 * have the struct_rwlock. XXX indblksz no longer
439 * grows. safe to do this now?
440 */
572e2857 441 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
34dc7c2f
BB
442 ASSERT3P(db->db_blkptr, ==,
443 ((blkptr_t *)db->db_parent->db.db_data +
444 db->db_blkid % epb));
445 }
446 }
447 }
448 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
428870ff
BB
449 (db->db_buf == NULL || db->db_buf->b_data) &&
450 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
34dc7c2f
BB
451 db->db_state != DB_FILL && !dn->dn_free_txg) {
452 /*
453 * If the blkptr isn't set but they have nonzero data,
454 * it had better be dirty, otherwise we'll lose that
455 * data when we evict this buffer.
456 */
457 if (db->db_dirtycnt == 0) {
1fde1e37 458 ASSERTV(uint64_t *buf = db->db.db_data);
34dc7c2f
BB
459 int i;
460
461 for (i = 0; i < db->db.db_size >> 3; i++) {
462 ASSERT(buf[i] == 0);
463 }
464 }
465 }
572e2857 466 DB_DNODE_EXIT(db);
34dc7c2f
BB
467}
468#endif
469
470static void
471dbuf_update_data(dmu_buf_impl_t *db)
472{
473 ASSERT(MUTEX_HELD(&db->db_mtx));
474 if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
475 ASSERT(!refcount_is_zero(&db->db_holds));
476 *db->db_user_data_ptr_ptr = db->db.db_data;
477 }
478}
479
480static void
481dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
482{
483 ASSERT(MUTEX_HELD(&db->db_mtx));
484 ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf));
485 db->db_buf = buf;
486 if (buf != NULL) {
487 ASSERT(buf->b_data != NULL);
488 db->db.db_data = buf->b_data;
489 if (!arc_released(buf))
490 arc_set_callback(buf, dbuf_do_evict, db);
491 dbuf_update_data(db);
492 } else {
493 dbuf_evict_user(db);
494 db->db.db_data = NULL;
b128c09f
BB
495 if (db->db_state != DB_NOFILL)
496 db->db_state = DB_UNCACHED;
34dc7c2f
BB
497 }
498}
499
428870ff
BB
500/*
501 * Loan out an arc_buf for read. Return the loaned arc_buf.
502 */
503arc_buf_t *
504dbuf_loan_arcbuf(dmu_buf_impl_t *db)
505{
506 arc_buf_t *abuf;
507
508 mutex_enter(&db->db_mtx);
509 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
510 int blksz = db->db.db_size;
572e2857
BB
511 spa_t *spa;
512
428870ff 513 mutex_exit(&db->db_mtx);
572e2857
BB
514 DB_GET_SPA(&spa, db);
515 abuf = arc_loan_buf(spa, blksz);
428870ff
BB
516 bcopy(db->db.db_data, abuf->b_data, blksz);
517 } else {
518 abuf = db->db_buf;
519 arc_loan_inuse_buf(abuf, db);
520 dbuf_set_data(db, NULL);
521 mutex_exit(&db->db_mtx);
522 }
523 return (abuf);
524}
525
34dc7c2f
BB
526uint64_t
527dbuf_whichblock(dnode_t *dn, uint64_t offset)
528{
529 if (dn->dn_datablkshift) {
530 return (offset >> dn->dn_datablkshift);
531 } else {
532 ASSERT3U(offset, <, dn->dn_datablksz);
533 return (0);
534 }
535}
536
537static void
538dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
539{
540 dmu_buf_impl_t *db = vdb;
541
542 mutex_enter(&db->db_mtx);
543 ASSERT3U(db->db_state, ==, DB_READ);
544 /*
545 * All reads are synchronous, so we must have a hold on the dbuf
546 */
547 ASSERT(refcount_count(&db->db_holds) > 0);
548 ASSERT(db->db_buf == NULL);
549 ASSERT(db->db.db_data == NULL);
550 if (db->db_level == 0 && db->db_freed_in_flight) {
551 /* we were freed in flight; disregard any error */
552 arc_release(buf, db);
553 bzero(buf->b_data, db->db.db_size);
554 arc_buf_freeze(buf);
555 db->db_freed_in_flight = FALSE;
556 dbuf_set_data(db, buf);
557 db->db_state = DB_CACHED;
558 } else if (zio == NULL || zio->io_error == 0) {
559 dbuf_set_data(db, buf);
560 db->db_state = DB_CACHED;
561 } else {
428870ff 562 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f 563 ASSERT3P(db->db_buf, ==, NULL);
13fe0198 564 VERIFY(arc_buf_remove_ref(buf, db));
34dc7c2f
BB
565 db->db_state = DB_UNCACHED;
566 }
567 cv_broadcast(&db->db_changed);
428870ff 568 dbuf_rele_and_unlock(db, NULL);
34dc7c2f
BB
569}
570
571static void
572dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
573{
572e2857
BB
574 dnode_t *dn;
575 spa_t *spa;
34dc7c2f
BB
576 zbookmark_t zb;
577 uint32_t aflags = ARC_NOWAIT;
578
572e2857
BB
579 DB_DNODE_ENTER(db);
580 dn = DB_DNODE(db);
34dc7c2f
BB
581 ASSERT(!refcount_is_zero(&db->db_holds));
582 /* We need the struct_rwlock to prevent db_blkptr from changing. */
b128c09f 583 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
34dc7c2f
BB
584 ASSERT(MUTEX_HELD(&db->db_mtx));
585 ASSERT(db->db_state == DB_UNCACHED);
586 ASSERT(db->db_buf == NULL);
587
428870ff 588 if (db->db_blkid == DMU_BONUS_BLKID) {
9babb374 589 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
34dc7c2f
BB
590
591 ASSERT3U(bonuslen, <=, db->db.db_size);
592 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
d164b209 593 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
34dc7c2f
BB
594 if (bonuslen < DN_MAX_BONUSLEN)
595 bzero(db->db.db_data, DN_MAX_BONUSLEN);
9babb374
BB
596 if (bonuslen)
597 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
572e2857 598 DB_DNODE_EXIT(db);
34dc7c2f
BB
599 dbuf_update_data(db);
600 db->db_state = DB_CACHED;
601 mutex_exit(&db->db_mtx);
602 return;
603 }
604
b128c09f
BB
605 /*
606 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
607 * processes the delete record and clears the bp while we are waiting
608 * for the dn_mtx (resulting in a "no" from block_freed).
609 */
610 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
611 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
612 BP_IS_HOLE(db->db_blkptr)))) {
34dc7c2f
BB
613 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
614
b128c09f 615 dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa,
34dc7c2f 616 db->db.db_size, db, type));
572e2857 617 DB_DNODE_EXIT(db);
34dc7c2f
BB
618 bzero(db->db.db_data, db->db.db_size);
619 db->db_state = DB_CACHED;
620 *flags |= DB_RF_CACHED;
621 mutex_exit(&db->db_mtx);
622 return;
623 }
624
572e2857
BB
625 spa = dn->dn_objset->os_spa;
626 DB_DNODE_EXIT(db);
627
34dc7c2f
BB
628 db->db_state = DB_READ;
629 mutex_exit(&db->db_mtx);
630
b128c09f
BB
631 if (DBUF_IS_L2CACHEABLE(db))
632 aflags |= ARC_L2CACHE;
3a17a7a9
SK
633 if (DBUF_IS_L2COMPRESSIBLE(db))
634 aflags |= ARC_L2COMPRESS;
b128c09f 635
428870ff
BB
636 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
637 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
638 db->db.db_object, db->db_level, db->db_blkid);
34dc7c2f
BB
639
640 dbuf_add_ref(db, NULL);
b128c09f 641
294f6806 642 (void) arc_read(zio, spa, db->db_blkptr,
34dc7c2f
BB
643 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
644 (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
645 &aflags, &zb);
646 if (aflags & ARC_CACHED)
647 *flags |= DB_RF_CACHED;
648}
649
650int
651dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
652{
653 int err = 0;
654 int havepzio = (zio != NULL);
655 int prefetch;
572e2857 656 dnode_t *dn;
34dc7c2f
BB
657
658 /*
659 * We don't have to hold the mutex to check db_state because it
660 * can't be freed while we have a hold on the buffer.
661 */
662 ASSERT(!refcount_is_zero(&db->db_holds));
663
b128c09f 664 if (db->db_state == DB_NOFILL)
2e528b49 665 return (SET_ERROR(EIO));
b128c09f 666
572e2857
BB
667 DB_DNODE_ENTER(db);
668 dn = DB_DNODE(db);
34dc7c2f 669 if ((flags & DB_RF_HAVESTRUCT) == 0)
572e2857 670 rw_enter(&dn->dn_struct_rwlock, RW_READER);
34dc7c2f 671
428870ff 672 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
572e2857 673 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
b128c09f 674 DBUF_IS_CACHEABLE(db);
34dc7c2f
BB
675
676 mutex_enter(&db->db_mtx);
677 if (db->db_state == DB_CACHED) {
678 mutex_exit(&db->db_mtx);
679 if (prefetch)
572e2857 680 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
34dc7c2f
BB
681 db->db.db_size, TRUE);
682 if ((flags & DB_RF_HAVESTRUCT) == 0)
572e2857
BB
683 rw_exit(&dn->dn_struct_rwlock);
684 DB_DNODE_EXIT(db);
34dc7c2f 685 } else if (db->db_state == DB_UNCACHED) {
572e2857
BB
686 spa_t *spa = dn->dn_objset->os_spa;
687
688 if (zio == NULL)
689 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
34dc7c2f
BB
690 dbuf_read_impl(db, zio, &flags);
691
692 /* dbuf_read_impl has dropped db_mtx for us */
693
694 if (prefetch)
572e2857 695 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
34dc7c2f
BB
696 db->db.db_size, flags & DB_RF_CACHED);
697
698 if ((flags & DB_RF_HAVESTRUCT) == 0)
572e2857
BB
699 rw_exit(&dn->dn_struct_rwlock);
700 DB_DNODE_EXIT(db);
34dc7c2f
BB
701
702 if (!havepzio)
703 err = zio_wait(zio);
704 } else {
e49f1e20
WA
705 /*
706 * Another reader came in while the dbuf was in flight
707 * between UNCACHED and CACHED. Either a writer will finish
708 * writing the buffer (sending the dbuf to CACHED) or the
709 * first reader's request will reach the read_done callback
710 * and send the dbuf to CACHED. Otherwise, a failure
711 * occurred and the dbuf went to UNCACHED.
712 */
34dc7c2f
BB
713 mutex_exit(&db->db_mtx);
714 if (prefetch)
572e2857 715 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
34dc7c2f
BB
716 db->db.db_size, TRUE);
717 if ((flags & DB_RF_HAVESTRUCT) == 0)
572e2857
BB
718 rw_exit(&dn->dn_struct_rwlock);
719 DB_DNODE_EXIT(db);
34dc7c2f 720
e49f1e20 721 /* Skip the wait per the caller's request. */
34dc7c2f
BB
722 mutex_enter(&db->db_mtx);
723 if ((flags & DB_RF_NEVERWAIT) == 0) {
724 while (db->db_state == DB_READ ||
725 db->db_state == DB_FILL) {
726 ASSERT(db->db_state == DB_READ ||
727 (flags & DB_RF_HAVESTRUCT) == 0);
728 cv_wait(&db->db_changed, &db->db_mtx);
729 }
730 if (db->db_state == DB_UNCACHED)
2e528b49 731 err = SET_ERROR(EIO);
34dc7c2f
BB
732 }
733 mutex_exit(&db->db_mtx);
734 }
735
736 ASSERT(err || havepzio || db->db_state == DB_CACHED);
737 return (err);
738}
739
740static void
741dbuf_noread(dmu_buf_impl_t *db)
742{
743 ASSERT(!refcount_is_zero(&db->db_holds));
428870ff 744 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
745 mutex_enter(&db->db_mtx);
746 while (db->db_state == DB_READ || db->db_state == DB_FILL)
747 cv_wait(&db->db_changed, &db->db_mtx);
748 if (db->db_state == DB_UNCACHED) {
749 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
572e2857 750 spa_t *spa;
34dc7c2f
BB
751
752 ASSERT(db->db_buf == NULL);
753 ASSERT(db->db.db_data == NULL);
572e2857
BB
754 DB_GET_SPA(&spa, db);
755 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
34dc7c2f 756 db->db_state = DB_FILL;
b128c09f
BB
757 } else if (db->db_state == DB_NOFILL) {
758 dbuf_set_data(db, NULL);
34dc7c2f
BB
759 } else {
760 ASSERT3U(db->db_state, ==, DB_CACHED);
761 }
762 mutex_exit(&db->db_mtx);
763}
764
765/*
766 * This is our just-in-time copy function. It makes a copy of
767 * buffers, that have been modified in a previous transaction
768 * group, before we modify them in the current active group.
769 *
770 * This function is used in two places: when we are dirtying a
771 * buffer for the first time in a txg, and when we are freeing
772 * a range in a dnode that includes this buffer.
773 *
774 * Note that when we are called from dbuf_free_range() we do
775 * not put a hold on the buffer, we just traverse the active
776 * dbuf list for the dnode.
777 */
778static void
779dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
780{
781 dbuf_dirty_record_t *dr = db->db_last_dirty;
782
783 ASSERT(MUTEX_HELD(&db->db_mtx));
784 ASSERT(db->db.db_data != NULL);
785 ASSERT(db->db_level == 0);
786 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
787
788 if (dr == NULL ||
789 (dr->dt.dl.dr_data !=
428870ff 790 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
34dc7c2f
BB
791 return;
792
793 /*
794 * If the last dirty record for this dbuf has not yet synced
795 * and its referencing the dbuf data, either:
572e2857 796 * reset the reference to point to a new copy,
34dc7c2f
BB
797 * or (if there a no active holders)
798 * just null out the current db_data pointer.
799 */
800 ASSERT(dr->dr_txg >= txg - 2);
428870ff 801 if (db->db_blkid == DMU_BONUS_BLKID) {
34dc7c2f
BB
802 /* Note that the data bufs here are zio_bufs */
803 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
d164b209 804 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
34dc7c2f
BB
805 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
806 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
807 int size = db->db.db_size;
808 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
572e2857
BB
809 spa_t *spa;
810
811 DB_GET_SPA(&spa, db);
812 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
34dc7c2f
BB
813 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
814 } else {
815 dbuf_set_data(db, NULL);
816 }
817}
818
819void
820dbuf_unoverride(dbuf_dirty_record_t *dr)
821{
822 dmu_buf_impl_t *db = dr->dr_dbuf;
428870ff 823 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
34dc7c2f
BB
824 uint64_t txg = dr->dr_txg;
825
826 ASSERT(MUTEX_HELD(&db->db_mtx));
827 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
828 ASSERT(db->db_level == 0);
829
428870ff 830 if (db->db_blkid == DMU_BONUS_BLKID ||
34dc7c2f
BB
831 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
832 return;
833
428870ff
BB
834 ASSERT(db->db_data_pending != dr);
835
34dc7c2f 836 /* free this block */
03c6040b 837 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) {
572e2857 838 spa_t *spa;
428870ff 839
572e2857
BB
840 DB_GET_SPA(&spa, db);
841 zio_free(spa, txg, bp);
842 }
34dc7c2f 843 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
03c6040b
GW
844 dr->dt.dl.dr_nopwrite = B_FALSE;
845
34dc7c2f
BB
846 /*
847 * Release the already-written buffer, so we leave it in
848 * a consistent dirty state. Note that all callers are
849 * modifying the buffer, so they will immediately do
850 * another (redundant) arc_release(). Therefore, leave
851 * the buf thawed to save the effort of freezing &
852 * immediately re-thawing it.
853 */
854 arc_release(dr->dt.dl.dr_data, db);
855}
856
b128c09f
BB
857/*
858 * Evict (if its unreferenced) or clear (if its referenced) any level-0
859 * data blocks in the free range, so that any future readers will find
ea97f8ce 860 * empty blocks. Also, if we happen across any level-1 dbufs in the
b128c09f
BB
861 * range that have not already been marked dirty, mark them dirty so
862 * they stay in memory.
ea97f8ce
MA
863 *
864 * This is a no-op if the dataset is in the middle of an incremental
865 * receive; see comment below for details.
b128c09f 866 */
34dc7c2f 867void
b128c09f 868dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
34dc7c2f
BB
869{
870 dmu_buf_impl_t *db, *db_next;
871 uint64_t txg = tx->tx_txg;
b128c09f
BB
872 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
873 uint64_t first_l1 = start >> epbs;
874 uint64_t last_l1 = end >> epbs;
34dc7c2f 875
428870ff 876 if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID)) {
b128c09f
BB
877 end = dn->dn_maxblkid;
878 last_l1 = end >> epbs;
879 }
880 dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
ea97f8ce 881
b663a23d
MA
882 mutex_enter(&dn->dn_dbufs_mtx);
883 if (start >= dn->dn_unlisted_l0_blkid * dn->dn_datablksz) {
884 /* There can't be any dbufs in this range; no need to search. */
885 mutex_exit(&dn->dn_dbufs_mtx);
886 return;
887 } else if (dmu_objset_is_receiving(dn->dn_objset)) {
ea97f8ce 888 /*
b663a23d
MA
889 * If we are receiving, we expect there to be no dbufs in
890 * the range to be freed, because receive modifies each
891 * block at most once, and in offset order. If this is
892 * not the case, it can lead to performance problems,
893 * so note that we unexpectedly took the slow path.
ea97f8ce 894 */
b663a23d 895 atomic_inc_64(&zfs_free_range_recv_miss);
ea97f8ce
MA
896 }
897
e8b96c60 898 for (db = list_head(&dn->dn_dbufs); db != NULL; db = db_next) {
34dc7c2f 899 db_next = list_next(&dn->dn_dbufs, db);
428870ff 900 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
b128c09f
BB
901
902 if (db->db_level == 1 &&
903 db->db_blkid >= first_l1 && db->db_blkid <= last_l1) {
904 mutex_enter(&db->db_mtx);
905 if (db->db_last_dirty &&
906 db->db_last_dirty->dr_txg < txg) {
907 dbuf_add_ref(db, FTAG);
908 mutex_exit(&db->db_mtx);
909 dbuf_will_dirty(db, tx);
910 dbuf_rele(db, FTAG);
911 } else {
912 mutex_exit(&db->db_mtx);
913 }
914 }
915
34dc7c2f
BB
916 if (db->db_level != 0)
917 continue;
918 dprintf_dbuf(db, "found buf %s\n", "");
b128c09f 919 if (db->db_blkid < start || db->db_blkid > end)
34dc7c2f
BB
920 continue;
921
922 /* found a level 0 buffer in the range */
13fe0198
MA
923 mutex_enter(&db->db_mtx);
924 if (dbuf_undirty(db, tx)) {
925 /* mutex has been dropped and dbuf destroyed */
34dc7c2f 926 continue;
13fe0198 927 }
34dc7c2f 928
34dc7c2f 929 if (db->db_state == DB_UNCACHED ||
b128c09f 930 db->db_state == DB_NOFILL ||
34dc7c2f
BB
931 db->db_state == DB_EVICTING) {
932 ASSERT(db->db.db_data == NULL);
933 mutex_exit(&db->db_mtx);
934 continue;
935 }
936 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
937 /* will be handled in dbuf_read_done or dbuf_rele */
938 db->db_freed_in_flight = TRUE;
939 mutex_exit(&db->db_mtx);
940 continue;
941 }
942 if (refcount_count(&db->db_holds) == 0) {
943 ASSERT(db->db_buf);
944 dbuf_clear(db);
945 continue;
946 }
947 /* The dbuf is referenced */
948
949 if (db->db_last_dirty != NULL) {
950 dbuf_dirty_record_t *dr = db->db_last_dirty;
951
952 if (dr->dr_txg == txg) {
953 /*
954 * This buffer is "in-use", re-adjust the file
955 * size to reflect that this buffer may
956 * contain new data when we sync.
957 */
428870ff
BB
958 if (db->db_blkid != DMU_SPILL_BLKID &&
959 db->db_blkid > dn->dn_maxblkid)
34dc7c2f
BB
960 dn->dn_maxblkid = db->db_blkid;
961 dbuf_unoverride(dr);
962 } else {
963 /*
964 * This dbuf is not dirty in the open context.
965 * Either uncache it (if its not referenced in
966 * the open context) or reset its contents to
967 * empty.
968 */
969 dbuf_fix_old_data(db, txg);
970 }
971 }
972 /* clear the contents if its cached */
973 if (db->db_state == DB_CACHED) {
974 ASSERT(db->db.db_data != NULL);
975 arc_release(db->db_buf, db);
976 bzero(db->db.db_data, db->db.db_size);
977 arc_buf_freeze(db->db_buf);
978 }
979
980 mutex_exit(&db->db_mtx);
981 }
982 mutex_exit(&dn->dn_dbufs_mtx);
983}
984
985static int
986dbuf_block_freeable(dmu_buf_impl_t *db)
987{
988 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
989 uint64_t birth_txg = 0;
990
991 /*
992 * We don't need any locking to protect db_blkptr:
993 * If it's syncing, then db_last_dirty will be set
994 * so we'll ignore db_blkptr.
995 */
996 ASSERT(MUTEX_HELD(&db->db_mtx));
997 if (db->db_last_dirty)
998 birth_txg = db->db_last_dirty->dr_txg;
999 else if (db->db_blkptr)
1000 birth_txg = db->db_blkptr->blk_birth;
1001
572e2857
BB
1002 /*
1003 * If we don't exist or are in a snapshot, we can't be freed.
1004 * Don't pass the bp to dsl_dataset_block_freeable() since we
1005 * are holding the db_mtx lock and might deadlock if we are
1006 * prefetching a dedup-ed block.
1007 */
34dc7c2f
BB
1008 if (birth_txg)
1009 return (ds == NULL ||
572e2857 1010 dsl_dataset_block_freeable(ds, NULL, birth_txg));
34dc7c2f
BB
1011 else
1012 return (FALSE);
1013}
1014
1015void
1016dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
1017{
1018 arc_buf_t *buf, *obuf;
1019 int osize = db->db.db_size;
1020 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
572e2857 1021 dnode_t *dn;
34dc7c2f 1022
428870ff 1023 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f 1024
572e2857
BB
1025 DB_DNODE_ENTER(db);
1026 dn = DB_DNODE(db);
1027
34dc7c2f 1028 /* XXX does *this* func really need the lock? */
572e2857 1029 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
34dc7c2f
BB
1030
1031 /*
1032 * This call to dbuf_will_dirty() with the dn_struct_rwlock held
1033 * is OK, because there can be no other references to the db
1034 * when we are changing its size, so no concurrent DB_FILL can
1035 * be happening.
1036 */
1037 /*
1038 * XXX we should be doing a dbuf_read, checking the return
1039 * value and returning that up to our callers
1040 */
1041 dbuf_will_dirty(db, tx);
1042
1043 /* create the data buffer for the new block */
572e2857 1044 buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
34dc7c2f
BB
1045
1046 /* copy old block data to the new block */
1047 obuf = db->db_buf;
1048 bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
1049 /* zero the remainder */
1050 if (size > osize)
1051 bzero((uint8_t *)buf->b_data + osize, size - osize);
1052
1053 mutex_enter(&db->db_mtx);
1054 dbuf_set_data(db, buf);
13fe0198 1055 VERIFY(arc_buf_remove_ref(obuf, db));
34dc7c2f
BB
1056 db->db.db_size = size;
1057
1058 if (db->db_level == 0) {
1059 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1060 db->db_last_dirty->dt.dl.dr_data = buf;
1061 }
1062 mutex_exit(&db->db_mtx);
1063
572e2857
BB
1064 dnode_willuse_space(dn, size-osize, tx);
1065 DB_DNODE_EXIT(db);
34dc7c2f
BB
1066}
1067
428870ff
BB
1068void
1069dbuf_release_bp(dmu_buf_impl_t *db)
1070{
572e2857 1071 objset_t *os;
428870ff 1072
572e2857 1073 DB_GET_OBJSET(&os, db);
428870ff
BB
1074 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1075 ASSERT(arc_released(os->os_phys_buf) ||
1076 list_link_active(&os->os_dsl_dataset->ds_synced_link));
1077 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1078
294f6806 1079 (void) arc_release(db->db_buf, db);
428870ff
BB
1080}
1081
34dc7c2f
BB
1082dbuf_dirty_record_t *
1083dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1084{
572e2857
BB
1085 dnode_t *dn;
1086 objset_t *os;
34dc7c2f
BB
1087 dbuf_dirty_record_t **drp, *dr;
1088 int drop_struct_lock = FALSE;
b128c09f 1089 boolean_t do_free_accounting = B_FALSE;
34dc7c2f
BB
1090 int txgoff = tx->tx_txg & TXG_MASK;
1091
1092 ASSERT(tx->tx_txg != 0);
1093 ASSERT(!refcount_is_zero(&db->db_holds));
1094 DMU_TX_DIRTY_BUF(tx, db);
1095
572e2857
BB
1096 DB_DNODE_ENTER(db);
1097 dn = DB_DNODE(db);
34dc7c2f
BB
1098 /*
1099 * Shouldn't dirty a regular buffer in syncing context. Private
1100 * objects may be dirtied in syncing context, but only if they
1101 * were already pre-dirtied in open context.
34dc7c2f
BB
1102 */
1103 ASSERT(!dmu_tx_is_syncing(tx) ||
1104 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
9babb374
BB
1105 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1106 dn->dn_objset->os_dsl_dataset == NULL);
34dc7c2f
BB
1107 /*
1108 * We make this assert for private objects as well, but after we
1109 * check if we're already dirty. They are allowed to re-dirty
1110 * in syncing context.
1111 */
1112 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1113 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1114 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1115
1116 mutex_enter(&db->db_mtx);
1117 /*
1118 * XXX make this true for indirects too? The problem is that
1119 * transactions created with dmu_tx_create_assigned() from
1120 * syncing context don't bother holding ahead.
1121 */
1122 ASSERT(db->db_level != 0 ||
b128c09f
BB
1123 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1124 db->db_state == DB_NOFILL);
34dc7c2f
BB
1125
1126 mutex_enter(&dn->dn_mtx);
1127 /*
1128 * Don't set dirtyctx to SYNC if we're just modifying this as we
1129 * initialize the objset.
1130 */
1131 if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1132 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1133 dn->dn_dirtyctx =
1134 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1135 ASSERT(dn->dn_dirtyctx_firstset == NULL);
beb98269 1136 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_PUSHPAGE);
34dc7c2f
BB
1137 }
1138 mutex_exit(&dn->dn_mtx);
1139
428870ff
BB
1140 if (db->db_blkid == DMU_SPILL_BLKID)
1141 dn->dn_have_spill = B_TRUE;
1142
34dc7c2f
BB
1143 /*
1144 * If this buffer is already dirty, we're done.
1145 */
1146 drp = &db->db_last_dirty;
1147 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1148 db->db.db_object == DMU_META_DNODE_OBJECT);
1149 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1150 drp = &dr->dr_next;
1151 if (dr && dr->dr_txg == tx->tx_txg) {
572e2857
BB
1152 DB_DNODE_EXIT(db);
1153
428870ff 1154 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
34dc7c2f
BB
1155 /*
1156 * If this buffer has already been written out,
1157 * we now need to reset its state.
1158 */
1159 dbuf_unoverride(dr);
428870ff
BB
1160 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1161 db->db_state != DB_NOFILL)
34dc7c2f
BB
1162 arc_buf_thaw(db->db_buf);
1163 }
1164 mutex_exit(&db->db_mtx);
1165 return (dr);
1166 }
1167
1168 /*
1169 * Only valid if not already dirty.
1170 */
9babb374
BB
1171 ASSERT(dn->dn_object == 0 ||
1172 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
34dc7c2f
BB
1173 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1174
1175 ASSERT3U(dn->dn_nlevels, >, db->db_level);
1176 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1177 dn->dn_phys->dn_nlevels > db->db_level ||
1178 dn->dn_next_nlevels[txgoff] > db->db_level ||
1179 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1180 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1181
1182 /*
1183 * We should only be dirtying in syncing context if it's the
9babb374
BB
1184 * mos or we're initializing the os or it's a special object.
1185 * However, we are allowed to dirty in syncing context provided
1186 * we already dirtied it in open context. Hence we must make
1187 * this assertion only if we're not already dirty.
34dc7c2f 1188 */
572e2857 1189 os = dn->dn_objset;
9babb374
BB
1190 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1191 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
34dc7c2f
BB
1192 ASSERT(db->db.db_size != 0);
1193
1194 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1195
428870ff 1196 if (db->db_blkid != DMU_BONUS_BLKID) {
34dc7c2f
BB
1197 /*
1198 * Update the accounting.
b128c09f
BB
1199 * Note: we delay "free accounting" until after we drop
1200 * the db_mtx. This keeps us from grabbing other locks
428870ff 1201 * (and possibly deadlocking) in bp_get_dsize() while
b128c09f 1202 * also holding the db_mtx.
34dc7c2f 1203 */
34dc7c2f 1204 dnode_willuse_space(dn, db->db.db_size, tx);
b128c09f 1205 do_free_accounting = dbuf_block_freeable(db);
34dc7c2f
BB
1206 }
1207
1208 /*
1209 * If this buffer is dirty in an old transaction group we need
1210 * to make a copy of it so that the changes we make in this
1211 * transaction group won't leak out when we sync the older txg.
1212 */
beb98269 1213 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_PUSHPAGE);
98f72a53 1214 list_link_init(&dr->dr_dirty_node);
34dc7c2f
BB
1215 if (db->db_level == 0) {
1216 void *data_old = db->db_buf;
1217
b128c09f 1218 if (db->db_state != DB_NOFILL) {
428870ff 1219 if (db->db_blkid == DMU_BONUS_BLKID) {
b128c09f
BB
1220 dbuf_fix_old_data(db, tx->tx_txg);
1221 data_old = db->db.db_data;
1222 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1223 /*
1224 * Release the data buffer from the cache so
1225 * that we can modify it without impacting
1226 * possible other users of this cached data
1227 * block. Note that indirect blocks and
1228 * private objects are not released until the
1229 * syncing state (since they are only modified
1230 * then).
1231 */
1232 arc_release(db->db_buf, db);
1233 dbuf_fix_old_data(db, tx->tx_txg);
1234 data_old = db->db_buf;
1235 }
1236 ASSERT(data_old != NULL);
34dc7c2f 1237 }
34dc7c2f
BB
1238 dr->dt.dl.dr_data = data_old;
1239 } else {
1240 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1241 list_create(&dr->dt.di.dr_children,
1242 sizeof (dbuf_dirty_record_t),
1243 offsetof(dbuf_dirty_record_t, dr_dirty_node));
1244 }
e8b96c60
MA
1245 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
1246 dr->dr_accounted = db->db.db_size;
34dc7c2f
BB
1247 dr->dr_dbuf = db;
1248 dr->dr_txg = tx->tx_txg;
1249 dr->dr_next = *drp;
1250 *drp = dr;
1251
1252 /*
1253 * We could have been freed_in_flight between the dbuf_noread
1254 * and dbuf_dirty. We win, as though the dbuf_noread() had
1255 * happened after the free.
1256 */
428870ff
BB
1257 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1258 db->db_blkid != DMU_SPILL_BLKID) {
34dc7c2f
BB
1259 mutex_enter(&dn->dn_mtx);
1260 dnode_clear_range(dn, db->db_blkid, 1, tx);
1261 mutex_exit(&dn->dn_mtx);
1262 db->db_freed_in_flight = FALSE;
1263 }
1264
1265 /*
1266 * This buffer is now part of this txg
1267 */
1268 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1269 db->db_dirtycnt += 1;
1270 ASSERT3U(db->db_dirtycnt, <=, 3);
1271
1272 mutex_exit(&db->db_mtx);
1273
428870ff
BB
1274 if (db->db_blkid == DMU_BONUS_BLKID ||
1275 db->db_blkid == DMU_SPILL_BLKID) {
34dc7c2f
BB
1276 mutex_enter(&dn->dn_mtx);
1277 ASSERT(!list_link_active(&dr->dr_dirty_node));
1278 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1279 mutex_exit(&dn->dn_mtx);
1280 dnode_setdirty(dn, tx);
572e2857 1281 DB_DNODE_EXIT(db);
34dc7c2f 1282 return (dr);
b128c09f
BB
1283 } else if (do_free_accounting) {
1284 blkptr_t *bp = db->db_blkptr;
1285 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
428870ff 1286 bp_get_dsize(os->os_spa, bp) : db->db.db_size;
b128c09f
BB
1287 /*
1288 * This is only a guess -- if the dbuf is dirty
1289 * in a previous txg, we don't know how much
1290 * space it will use on disk yet. We should
1291 * really have the struct_rwlock to access
1292 * db_blkptr, but since this is just a guess,
1293 * it's OK if we get an odd answer.
1294 */
572e2857 1295 ddt_prefetch(os->os_spa, bp);
b128c09f 1296 dnode_willuse_space(dn, -willfree, tx);
34dc7c2f
BB
1297 }
1298
1299 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1300 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1301 drop_struct_lock = TRUE;
1302 }
1303
b128c09f
BB
1304 if (db->db_level == 0) {
1305 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1306 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1307 }
1308
34dc7c2f
BB
1309 if (db->db_level+1 < dn->dn_nlevels) {
1310 dmu_buf_impl_t *parent = db->db_parent;
1311 dbuf_dirty_record_t *di;
1312 int parent_held = FALSE;
1313
1314 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1315 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1316
1317 parent = dbuf_hold_level(dn, db->db_level+1,
1318 db->db_blkid >> epbs, FTAG);
428870ff 1319 ASSERT(parent != NULL);
34dc7c2f
BB
1320 parent_held = TRUE;
1321 }
1322 if (drop_struct_lock)
1323 rw_exit(&dn->dn_struct_rwlock);
1324 ASSERT3U(db->db_level+1, ==, parent->db_level);
1325 di = dbuf_dirty(parent, tx);
1326 if (parent_held)
1327 dbuf_rele(parent, FTAG);
1328
1329 mutex_enter(&db->db_mtx);
e8b96c60
MA
1330 /*
1331 * Since we've dropped the mutex, it's possible that
1332 * dbuf_undirty() might have changed this out from under us.
1333 */
34dc7c2f
BB
1334 if (db->db_last_dirty == dr ||
1335 dn->dn_object == DMU_META_DNODE_OBJECT) {
1336 mutex_enter(&di->dt.di.dr_mtx);
1337 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1338 ASSERT(!list_link_active(&dr->dr_dirty_node));
1339 list_insert_tail(&di->dt.di.dr_children, dr);
1340 mutex_exit(&di->dt.di.dr_mtx);
1341 dr->dr_parent = di;
1342 }
1343 mutex_exit(&db->db_mtx);
1344 } else {
1345 ASSERT(db->db_level+1 == dn->dn_nlevels);
1346 ASSERT(db->db_blkid < dn->dn_nblkptr);
572e2857 1347 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
34dc7c2f
BB
1348 mutex_enter(&dn->dn_mtx);
1349 ASSERT(!list_link_active(&dr->dr_dirty_node));
1350 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1351 mutex_exit(&dn->dn_mtx);
1352 if (drop_struct_lock)
1353 rw_exit(&dn->dn_struct_rwlock);
1354 }
1355
1356 dnode_setdirty(dn, tx);
572e2857 1357 DB_DNODE_EXIT(db);
34dc7c2f
BB
1358 return (dr);
1359}
1360
13fe0198 1361/*
e49f1e20
WA
1362 * Undirty a buffer in the transaction group referenced by the given
1363 * transaction. Return whether this evicted the dbuf.
13fe0198
MA
1364 */
1365static boolean_t
34dc7c2f
BB
1366dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1367{
572e2857 1368 dnode_t *dn;
34dc7c2f
BB
1369 uint64_t txg = tx->tx_txg;
1370 dbuf_dirty_record_t *dr, **drp;
1371
1372 ASSERT(txg != 0);
428870ff 1373 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
13fe0198
MA
1374 ASSERT0(db->db_level);
1375 ASSERT(MUTEX_HELD(&db->db_mtx));
34dc7c2f 1376
34dc7c2f
BB
1377 /*
1378 * If this buffer is not dirty, we're done.
1379 */
1380 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1381 if (dr->dr_txg <= txg)
1382 break;
13fe0198
MA
1383 if (dr == NULL || dr->dr_txg < txg)
1384 return (B_FALSE);
34dc7c2f 1385 ASSERT(dr->dr_txg == txg);
428870ff 1386 ASSERT(dr->dr_dbuf == db);
34dc7c2f 1387
572e2857
BB
1388 DB_DNODE_ENTER(db);
1389 dn = DB_DNODE(db);
1390
34dc7c2f 1391 /*
13fe0198
MA
1392 * Note: This code will probably work even if there are concurrent
1393 * holders, but it is untested in that scenerio, as the ZPL and
1394 * ztest have additional locking (the range locks) that prevents
1395 * that type of concurrent access.
34dc7c2f 1396 */
13fe0198 1397 ASSERT3U(refcount_count(&db->db_holds), ==, db->db_dirtycnt);
34dc7c2f
BB
1398
1399 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1400
1401 ASSERT(db->db.db_size != 0);
1402
e8b96c60
MA
1403 /*
1404 * Any space we accounted for in dp_dirty_* will be cleaned up by
1405 * dsl_pool_sync(). This is relatively rare so the discrepancy
1406 * is not a big deal.
1407 */
34dc7c2f
BB
1408
1409 *drp = dr->dr_next;
1410
ef3c1dea
GR
1411 /*
1412 * Note that there are three places in dbuf_dirty()
1413 * where this dirty record may be put on a list.
1414 * Make sure to do a list_remove corresponding to
1415 * every one of those list_insert calls.
1416 */
34dc7c2f
BB
1417 if (dr->dr_parent) {
1418 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1419 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1420 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
ef3c1dea
GR
1421 } else if (db->db_blkid == DMU_SPILL_BLKID ||
1422 db->db_level+1 == dn->dn_nlevels) {
b128c09f 1423 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
34dc7c2f
BB
1424 mutex_enter(&dn->dn_mtx);
1425 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1426 mutex_exit(&dn->dn_mtx);
1427 }
572e2857 1428 DB_DNODE_EXIT(db);
34dc7c2f 1429
13fe0198
MA
1430 if (db->db_state != DB_NOFILL) {
1431 dbuf_unoverride(dr);
34dc7c2f 1432
34dc7c2f 1433 ASSERT(db->db_buf != NULL);
13fe0198
MA
1434 ASSERT(dr->dt.dl.dr_data != NULL);
1435 if (dr->dt.dl.dr_data != db->db_buf)
1436 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
34dc7c2f
BB
1437 }
1438 kmem_free(dr, sizeof (dbuf_dirty_record_t));
1439
1440 ASSERT(db->db_dirtycnt > 0);
1441 db->db_dirtycnt -= 1;
1442
1443 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1444 arc_buf_t *buf = db->db_buf;
1445
428870ff 1446 ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
34dc7c2f 1447 dbuf_set_data(db, NULL);
13fe0198 1448 VERIFY(arc_buf_remove_ref(buf, db));
34dc7c2f 1449 dbuf_evict(db);
13fe0198 1450 return (B_TRUE);
34dc7c2f
BB
1451 }
1452
13fe0198 1453 return (B_FALSE);
34dc7c2f
BB
1454}
1455
1456#pragma weak dmu_buf_will_dirty = dbuf_will_dirty
1457void
1458dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1459{
1460 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1461
1462 ASSERT(tx->tx_txg != 0);
1463 ASSERT(!refcount_is_zero(&db->db_holds));
1464
572e2857
BB
1465 DB_DNODE_ENTER(db);
1466 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
34dc7c2f 1467 rf |= DB_RF_HAVESTRUCT;
572e2857 1468 DB_DNODE_EXIT(db);
34dc7c2f
BB
1469 (void) dbuf_read(db, NULL, rf);
1470 (void) dbuf_dirty(db, tx);
1471}
1472
b128c09f
BB
1473void
1474dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1475{
1476 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1477
1478 db->db_state = DB_NOFILL;
1479
1480 dmu_buf_will_fill(db_fake, tx);
1481}
1482
34dc7c2f
BB
1483void
1484dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1485{
1486 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1487
428870ff 1488 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
1489 ASSERT(tx->tx_txg != 0);
1490 ASSERT(db->db_level == 0);
1491 ASSERT(!refcount_is_zero(&db->db_holds));
1492
1493 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1494 dmu_tx_private_ok(tx));
1495
1496 dbuf_noread(db);
1497 (void) dbuf_dirty(db, tx);
1498}
1499
1500#pragma weak dmu_buf_fill_done = dbuf_fill_done
1501/* ARGSUSED */
1502void
1503dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1504{
1505 mutex_enter(&db->db_mtx);
1506 DBUF_VERIFY(db);
1507
1508 if (db->db_state == DB_FILL) {
1509 if (db->db_level == 0 && db->db_freed_in_flight) {
428870ff 1510 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
1511 /* we were freed while filling */
1512 /* XXX dbuf_undirty? */
1513 bzero(db->db.db_data, db->db.db_size);
1514 db->db_freed_in_flight = FALSE;
1515 }
1516 db->db_state = DB_CACHED;
1517 cv_broadcast(&db->db_changed);
1518 }
1519 mutex_exit(&db->db_mtx);
1520}
1521
9babb374
BB
1522/*
1523 * Directly assign a provided arc buf to a given dbuf if it's not referenced
1524 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1525 */
1526void
1527dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1528{
1529 ASSERT(!refcount_is_zero(&db->db_holds));
428870ff 1530 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
9babb374
BB
1531 ASSERT(db->db_level == 0);
1532 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1533 ASSERT(buf != NULL);
1534 ASSERT(arc_buf_size(buf) == db->db.db_size);
1535 ASSERT(tx->tx_txg != 0);
1536
1537 arc_return_buf(buf, db);
1538 ASSERT(arc_released(buf));
1539
1540 mutex_enter(&db->db_mtx);
1541
1542 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1543 cv_wait(&db->db_changed, &db->db_mtx);
1544
1545 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1546
1547 if (db->db_state == DB_CACHED &&
1548 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1549 mutex_exit(&db->db_mtx);
1550 (void) dbuf_dirty(db, tx);
1551 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
13fe0198 1552 VERIFY(arc_buf_remove_ref(buf, db));
428870ff 1553 xuio_stat_wbuf_copied();
9babb374
BB
1554 return;
1555 }
1556
428870ff 1557 xuio_stat_wbuf_nocopy();
9babb374
BB
1558 if (db->db_state == DB_CACHED) {
1559 dbuf_dirty_record_t *dr = db->db_last_dirty;
1560
1561 ASSERT(db->db_buf != NULL);
1562 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1563 ASSERT(dr->dt.dl.dr_data == db->db_buf);
1564 if (!arc_released(db->db_buf)) {
1565 ASSERT(dr->dt.dl.dr_override_state ==
1566 DR_OVERRIDDEN);
1567 arc_release(db->db_buf, db);
1568 }
1569 dr->dt.dl.dr_data = buf;
13fe0198 1570 VERIFY(arc_buf_remove_ref(db->db_buf, db));
9babb374
BB
1571 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1572 arc_release(db->db_buf, db);
13fe0198 1573 VERIFY(arc_buf_remove_ref(db->db_buf, db));
9babb374
BB
1574 }
1575 db->db_buf = NULL;
1576 }
1577 ASSERT(db->db_buf == NULL);
1578 dbuf_set_data(db, buf);
1579 db->db_state = DB_FILL;
1580 mutex_exit(&db->db_mtx);
1581 (void) dbuf_dirty(db, tx);
1582 dbuf_fill_done(db, tx);
1583}
1584
34dc7c2f
BB
1585/*
1586 * "Clear" the contents of this dbuf. This will mark the dbuf
e8b96c60 1587 * EVICTING and clear *most* of its references. Unfortunately,
34dc7c2f
BB
1588 * when we are not holding the dn_dbufs_mtx, we can't clear the
1589 * entry in the dn_dbufs list. We have to wait until dbuf_destroy()
1590 * in this case. For callers from the DMU we will usually see:
1591 * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1592 * For the arc callback, we will usually see:
572e2857 1593 * dbuf_do_evict()->dbuf_clear();dbuf_destroy()
34dc7c2f
BB
1594 * Sometimes, though, we will get a mix of these two:
1595 * DMU: dbuf_clear()->arc_buf_evict()
1596 * ARC: dbuf_do_evict()->dbuf_destroy()
1597 */
1598void
1599dbuf_clear(dmu_buf_impl_t *db)
1600{
572e2857 1601 dnode_t *dn;
34dc7c2f 1602 dmu_buf_impl_t *parent = db->db_parent;
572e2857 1603 dmu_buf_impl_t *dndb;
34dc7c2f
BB
1604 int dbuf_gone = FALSE;
1605
1606 ASSERT(MUTEX_HELD(&db->db_mtx));
1607 ASSERT(refcount_is_zero(&db->db_holds));
1608
1609 dbuf_evict_user(db);
1610
1611 if (db->db_state == DB_CACHED) {
1612 ASSERT(db->db.db_data != NULL);
428870ff 1613 if (db->db_blkid == DMU_BONUS_BLKID) {
34dc7c2f 1614 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
d164b209 1615 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
34dc7c2f
BB
1616 }
1617 db->db.db_data = NULL;
1618 db->db_state = DB_UNCACHED;
1619 }
1620
b128c09f 1621 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
34dc7c2f
BB
1622 ASSERT(db->db_data_pending == NULL);
1623
1624 db->db_state = DB_EVICTING;
1625 db->db_blkptr = NULL;
1626
572e2857
BB
1627 DB_DNODE_ENTER(db);
1628 dn = DB_DNODE(db);
1629 dndb = dn->dn_dbuf;
428870ff 1630 if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
34dc7c2f 1631 list_remove(&dn->dn_dbufs, db);
572e2857
BB
1632 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1633 membar_producer();
1634 DB_DNODE_EXIT(db);
1635 /*
1636 * Decrementing the dbuf count means that the hold corresponding
1637 * to the removed dbuf is no longer discounted in dnode_move(),
1638 * so the dnode cannot be moved until after we release the hold.
1639 * The membar_producer() ensures visibility of the decremented
1640 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1641 * release any lock.
1642 */
34dc7c2f 1643 dnode_rele(dn, db);
572e2857
BB
1644 db->db_dnode_handle = NULL;
1645 } else {
1646 DB_DNODE_EXIT(db);
34dc7c2f
BB
1647 }
1648
1649 if (db->db_buf)
1650 dbuf_gone = arc_buf_evict(db->db_buf);
1651
1652 if (!dbuf_gone)
1653 mutex_exit(&db->db_mtx);
1654
1655 /*
572e2857 1656 * If this dbuf is referenced from an indirect dbuf,
34dc7c2f
BB
1657 * decrement the ref count on the indirect dbuf.
1658 */
1659 if (parent && parent != dndb)
1660 dbuf_rele(parent, db);
1661}
1662
bf701a83
BB
1663__attribute__((always_inline))
1664static inline int
34dc7c2f 1665dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
fc5bb51f 1666 dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh)
34dc7c2f
BB
1667{
1668 int nlevels, epbs;
1669
1670 *parentp = NULL;
1671 *bpp = NULL;
1672
428870ff
BB
1673 ASSERT(blkid != DMU_BONUS_BLKID);
1674
1675 if (blkid == DMU_SPILL_BLKID) {
1676 mutex_enter(&dn->dn_mtx);
1677 if (dn->dn_have_spill &&
1678 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1679 *bpp = &dn->dn_phys->dn_spill;
1680 else
1681 *bpp = NULL;
1682 dbuf_add_ref(dn->dn_dbuf, NULL);
1683 *parentp = dn->dn_dbuf;
1684 mutex_exit(&dn->dn_mtx);
1685 return (0);
1686 }
34dc7c2f
BB
1687
1688 if (dn->dn_phys->dn_nlevels == 0)
1689 nlevels = 1;
1690 else
1691 nlevels = dn->dn_phys->dn_nlevels;
1692
1693 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1694
1695 ASSERT3U(level * epbs, <, 64);
1696 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1697 if (level >= nlevels ||
1698 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1699 /* the buffer has no parent yet */
2e528b49 1700 return (SET_ERROR(ENOENT));
34dc7c2f
BB
1701 } else if (level < nlevels-1) {
1702 /* this block is referenced from an indirect block */
fc5bb51f
BB
1703 int err;
1704 if (dh == NULL) {
1705 err = dbuf_hold_impl(dn, level+1, blkid >> epbs,
1706 fail_sparse, NULL, parentp);
d1d7e268 1707 } else {
fc5bb51f
BB
1708 __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1,
1709 blkid >> epbs, fail_sparse, NULL,
1710 parentp, dh->dh_depth + 1);
1711 err = __dbuf_hold_impl(dh + 1);
1712 }
34dc7c2f
BB
1713 if (err)
1714 return (err);
1715 err = dbuf_read(*parentp, NULL,
1716 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1717 if (err) {
1718 dbuf_rele(*parentp, NULL);
1719 *parentp = NULL;
1720 return (err);
1721 }
1722 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1723 (blkid & ((1ULL << epbs) - 1));
1724 return (0);
1725 } else {
1726 /* the block is referenced from the dnode */
1727 ASSERT3U(level, ==, nlevels-1);
1728 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1729 blkid < dn->dn_phys->dn_nblkptr);
1730 if (dn->dn_dbuf) {
1731 dbuf_add_ref(dn->dn_dbuf, NULL);
1732 *parentp = dn->dn_dbuf;
1733 }
1734 *bpp = &dn->dn_phys->dn_blkptr[blkid];
1735 return (0);
1736 }
1737}
1738
1739static dmu_buf_impl_t *
1740dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1741 dmu_buf_impl_t *parent, blkptr_t *blkptr)
1742{
428870ff 1743 objset_t *os = dn->dn_objset;
34dc7c2f
BB
1744 dmu_buf_impl_t *db, *odb;
1745
1746 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1747 ASSERT(dn->dn_type != DMU_OT_NONE);
1748
b8d06fca 1749 db = kmem_cache_alloc(dbuf_cache, KM_PUSHPAGE);
34dc7c2f
BB
1750
1751 db->db_objset = os;
1752 db->db.db_object = dn->dn_object;
1753 db->db_level = level;
1754 db->db_blkid = blkid;
1755 db->db_last_dirty = NULL;
1756 db->db_dirtycnt = 0;
572e2857 1757 db->db_dnode_handle = dn->dn_handle;
34dc7c2f
BB
1758 db->db_parent = parent;
1759 db->db_blkptr = blkptr;
1760
1761 db->db_user_ptr = NULL;
1762 db->db_user_data_ptr_ptr = NULL;
1763 db->db_evict_func = NULL;
1764 db->db_immediate_evict = 0;
1765 db->db_freed_in_flight = 0;
1766
428870ff 1767 if (blkid == DMU_BONUS_BLKID) {
34dc7c2f
BB
1768 ASSERT3P(parent, ==, dn->dn_dbuf);
1769 db->db.db_size = DN_MAX_BONUSLEN -
1770 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1771 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
428870ff 1772 db->db.db_offset = DMU_BONUS_BLKID;
34dc7c2f
BB
1773 db->db_state = DB_UNCACHED;
1774 /* the bonus dbuf is not placed in the hash table */
d164b209 1775 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
34dc7c2f 1776 return (db);
428870ff
BB
1777 } else if (blkid == DMU_SPILL_BLKID) {
1778 db->db.db_size = (blkptr != NULL) ?
1779 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1780 db->db.db_offset = 0;
34dc7c2f
BB
1781 } else {
1782 int blocksize =
e8b96c60 1783 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
34dc7c2f
BB
1784 db->db.db_size = blocksize;
1785 db->db.db_offset = db->db_blkid * blocksize;
1786 }
1787
1788 /*
1789 * Hold the dn_dbufs_mtx while we get the new dbuf
1790 * in the hash table *and* added to the dbufs list.
1791 * This prevents a possible deadlock with someone
1792 * trying to look up this dbuf before its added to the
1793 * dn_dbufs list.
1794 */
1795 mutex_enter(&dn->dn_dbufs_mtx);
1796 db->db_state = DB_EVICTING;
1797 if ((odb = dbuf_hash_insert(db)) != NULL) {
1798 /* someone else inserted it first */
1799 kmem_cache_free(dbuf_cache, db);
1800 mutex_exit(&dn->dn_dbufs_mtx);
1801 return (odb);
1802 }
1803 list_insert_head(&dn->dn_dbufs, db);
b663a23d
MA
1804 if (db->db_level == 0 && db->db_blkid >=
1805 dn->dn_unlisted_l0_blkid)
1806 dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
34dc7c2f
BB
1807 db->db_state = DB_UNCACHED;
1808 mutex_exit(&dn->dn_dbufs_mtx);
d164b209 1809 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
34dc7c2f
BB
1810
1811 if (parent && parent != dn->dn_dbuf)
1812 dbuf_add_ref(parent, db);
1813
1814 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1815 refcount_count(&dn->dn_holds) > 0);
1816 (void) refcount_add(&dn->dn_holds, db);
572e2857 1817 (void) atomic_inc_32_nv(&dn->dn_dbufs_count);
34dc7c2f
BB
1818
1819 dprintf_dbuf(db, "db=%p\n", db);
1820
1821 return (db);
1822}
1823
1824static int
1825dbuf_do_evict(void *private)
1826{
1827 arc_buf_t *buf = private;
1828 dmu_buf_impl_t *db = buf->b_private;
1829
1830 if (!MUTEX_HELD(&db->db_mtx))
1831 mutex_enter(&db->db_mtx);
1832
1833 ASSERT(refcount_is_zero(&db->db_holds));
1834
1835 if (db->db_state != DB_EVICTING) {
1836 ASSERT(db->db_state == DB_CACHED);
1837 DBUF_VERIFY(db);
1838 db->db_buf = NULL;
1839 dbuf_evict(db);
1840 } else {
1841 mutex_exit(&db->db_mtx);
1842 dbuf_destroy(db);
1843 }
1844 return (0);
1845}
1846
1847static void
1848dbuf_destroy(dmu_buf_impl_t *db)
1849{
1850 ASSERT(refcount_is_zero(&db->db_holds));
1851
428870ff 1852 if (db->db_blkid != DMU_BONUS_BLKID) {
34dc7c2f
BB
1853 /*
1854 * If this dbuf is still on the dn_dbufs list,
1855 * remove it from that list.
1856 */
572e2857
BB
1857 if (db->db_dnode_handle != NULL) {
1858 dnode_t *dn;
34dc7c2f 1859
572e2857
BB
1860 DB_DNODE_ENTER(db);
1861 dn = DB_DNODE(db);
34dc7c2f
BB
1862 mutex_enter(&dn->dn_dbufs_mtx);
1863 list_remove(&dn->dn_dbufs, db);
572e2857 1864 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
34dc7c2f 1865 mutex_exit(&dn->dn_dbufs_mtx);
572e2857
BB
1866 DB_DNODE_EXIT(db);
1867 /*
1868 * Decrementing the dbuf count means that the hold
1869 * corresponding to the removed dbuf is no longer
1870 * discounted in dnode_move(), so the dnode cannot be
1871 * moved until after we release the hold.
1872 */
34dc7c2f 1873 dnode_rele(dn, db);
572e2857 1874 db->db_dnode_handle = NULL;
34dc7c2f
BB
1875 }
1876 dbuf_hash_remove(db);
1877 }
1878 db->db_parent = NULL;
1879 db->db_buf = NULL;
1880
1881 ASSERT(!list_link_active(&db->db_link));
1882 ASSERT(db->db.db_data == NULL);
1883 ASSERT(db->db_hash_next == NULL);
1884 ASSERT(db->db_blkptr == NULL);
1885 ASSERT(db->db_data_pending == NULL);
1886
1887 kmem_cache_free(dbuf_cache, db);
d164b209 1888 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
34dc7c2f
BB
1889}
1890
1891void
e8b96c60 1892dbuf_prefetch(dnode_t *dn, uint64_t blkid, zio_priority_t prio)
34dc7c2f
BB
1893{
1894 dmu_buf_impl_t *db = NULL;
1895 blkptr_t *bp = NULL;
1896
428870ff 1897 ASSERT(blkid != DMU_BONUS_BLKID);
34dc7c2f
BB
1898 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1899
1900 if (dnode_block_freed(dn, blkid))
1901 return;
1902
1903 /* dbuf_find() returns with db_mtx held */
c65aa5b2 1904 if ((db = dbuf_find(dn, 0, blkid))) {
572e2857
BB
1905 /*
1906 * This dbuf is already in the cache. We assume that
1907 * it is already CACHED, or else about to be either
1908 * read or filled.
1909 */
34dc7c2f 1910 mutex_exit(&db->db_mtx);
572e2857 1911 return;
34dc7c2f
BB
1912 }
1913
fc5bb51f 1914 if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp, NULL) == 0) {
34dc7c2f 1915 if (bp && !BP_IS_HOLE(bp)) {
428870ff 1916 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
34dc7c2f
BB
1917 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1918 zbookmark_t zb;
428870ff
BB
1919
1920 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1921 dn->dn_object, 0, blkid);
34dc7c2f 1922
294f6806 1923 (void) arc_read(NULL, dn->dn_objset->os_spa,
e8b96c60 1924 bp, NULL, NULL, prio,
34dc7c2f
BB
1925 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1926 &aflags, &zb);
1927 }
1928 if (db)
1929 dbuf_rele(db, NULL);
1930 }
1931}
1932
d1d7e268 1933#define DBUF_HOLD_IMPL_MAX_DEPTH 20
fc5bb51f 1934
34dc7c2f
BB
1935/*
1936 * Returns with db_holds incremented, and db_mtx not held.
1937 * Note: dn_struct_rwlock must be held.
1938 */
fc5bb51f
BB
1939static int
1940__dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
34dc7c2f 1941{
fc5bb51f
BB
1942 ASSERT3S(dh->dh_depth, <, DBUF_HOLD_IMPL_MAX_DEPTH);
1943 dh->dh_parent = NULL;
34dc7c2f 1944
fc5bb51f
BB
1945 ASSERT(dh->dh_blkid != DMU_BONUS_BLKID);
1946 ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock));
1947 ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level);
34dc7c2f 1948
fc5bb51f 1949 *(dh->dh_dbp) = NULL;
34dc7c2f
BB
1950top:
1951 /* dbuf_find() returns with db_mtx held */
fc5bb51f
BB
1952 dh->dh_db = dbuf_find(dh->dh_dn, dh->dh_level, dh->dh_blkid);
1953
1954 if (dh->dh_db == NULL) {
1955 dh->dh_bp = NULL;
1956
1957 ASSERT3P(dh->dh_parent, ==, NULL);
1958 dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
1959 dh->dh_fail_sparse, &dh->dh_parent,
1960 &dh->dh_bp, dh);
1961 if (dh->dh_fail_sparse) {
d1d7e268
MK
1962 if (dh->dh_err == 0 &&
1963 dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
2e528b49 1964 dh->dh_err = SET_ERROR(ENOENT);
fc5bb51f
BB
1965 if (dh->dh_err) {
1966 if (dh->dh_parent)
1967 dbuf_rele(dh->dh_parent, NULL);
1968 return (dh->dh_err);
34dc7c2f
BB
1969 }
1970 }
fc5bb51f
BB
1971 if (dh->dh_err && dh->dh_err != ENOENT)
1972 return (dh->dh_err);
1973 dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid,
1974 dh->dh_parent, dh->dh_bp);
34dc7c2f
BB
1975 }
1976
fc5bb51f
BB
1977 if (dh->dh_db->db_buf && refcount_is_zero(&dh->dh_db->db_holds)) {
1978 arc_buf_add_ref(dh->dh_db->db_buf, dh->dh_db);
1979 if (dh->dh_db->db_buf->b_data == NULL) {
1980 dbuf_clear(dh->dh_db);
1981 if (dh->dh_parent) {
1982 dbuf_rele(dh->dh_parent, NULL);
1983 dh->dh_parent = NULL;
34dc7c2f
BB
1984 }
1985 goto top;
1986 }
fc5bb51f 1987 ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data);
34dc7c2f
BB
1988 }
1989
fc5bb51f 1990 ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf));
34dc7c2f
BB
1991
1992 /*
1993 * If this buffer is currently syncing out, and we are are
1994 * still referencing it from db_data, we need to make a copy
1995 * of it in case we decide we want to dirty it again in this txg.
1996 */
fc5bb51f
BB
1997 if (dh->dh_db->db_level == 0 &&
1998 dh->dh_db->db_blkid != DMU_BONUS_BLKID &&
1999 dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT &&
2000 dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) {
2001 dh->dh_dr = dh->dh_db->db_data_pending;
2002
2003 if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf) {
2004 dh->dh_type = DBUF_GET_BUFC_TYPE(dh->dh_db);
2005
2006 dbuf_set_data(dh->dh_db,
2007 arc_buf_alloc(dh->dh_dn->dn_objset->os_spa,
2008 dh->dh_db->db.db_size, dh->dh_db, dh->dh_type));
2009 bcopy(dh->dh_dr->dt.dl.dr_data->b_data,
2010 dh->dh_db->db.db_data, dh->dh_db->db.db_size);
34dc7c2f
BB
2011 }
2012 }
2013
fc5bb51f
BB
2014 (void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
2015 dbuf_update_data(dh->dh_db);
2016 DBUF_VERIFY(dh->dh_db);
2017 mutex_exit(&dh->dh_db->db_mtx);
34dc7c2f
BB
2018
2019 /* NOTE: we can't rele the parent until after we drop the db_mtx */
fc5bb51f
BB
2020 if (dh->dh_parent)
2021 dbuf_rele(dh->dh_parent, NULL);
34dc7c2f 2022
fc5bb51f
BB
2023 ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn);
2024 ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid);
2025 ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level);
2026 *(dh->dh_dbp) = dh->dh_db;
34dc7c2f
BB
2027
2028 return (0);
2029}
2030
fc5bb51f
BB
2031/*
2032 * The following code preserves the recursive function dbuf_hold_impl()
2033 * but moves the local variables AND function arguments to the heap to
2034 * minimize the stack frame size. Enough space is initially allocated
2035 * on the stack for 20 levels of recursion.
2036 */
2037int
2038dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
2039 void *tag, dmu_buf_impl_t **dbp)
2040{
2041 struct dbuf_hold_impl_data *dh;
2042 int error;
2043
d1d7e268 2044 dh = kmem_zalloc(sizeof (struct dbuf_hold_impl_data) *
b8d06fca 2045 DBUF_HOLD_IMPL_MAX_DEPTH, KM_PUSHPAGE);
fc5bb51f
BB
2046 __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0);
2047
2048 error = __dbuf_hold_impl(dh);
2049
d1d7e268 2050 kmem_free(dh, sizeof (struct dbuf_hold_impl_data) *
fc5bb51f
BB
2051 DBUF_HOLD_IMPL_MAX_DEPTH);
2052
2053 return (error);
2054}
2055
2056static void
2057__dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
2058 dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
2059 void *tag, dmu_buf_impl_t **dbp, int depth)
2060{
2061 dh->dh_dn = dn;
2062 dh->dh_level = level;
2063 dh->dh_blkid = blkid;
2064 dh->dh_fail_sparse = fail_sparse;
2065 dh->dh_tag = tag;
2066 dh->dh_dbp = dbp;
2067 dh->dh_depth = depth;
2068}
2069
34dc7c2f
BB
2070dmu_buf_impl_t *
2071dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
2072{
2073 dmu_buf_impl_t *db;
2074 int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
2075 return (err ? NULL : db);
2076}
2077
2078dmu_buf_impl_t *
2079dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
2080{
2081 dmu_buf_impl_t *db;
2082 int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
2083 return (err ? NULL : db);
2084}
2085
2086void
2087dbuf_create_bonus(dnode_t *dn)
2088{
2089 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
2090
2091 ASSERT(dn->dn_bonus == NULL);
428870ff
BB
2092 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
2093}
2094
2095int
2096dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
2097{
2098 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
572e2857
BB
2099 dnode_t *dn;
2100
428870ff 2101 if (db->db_blkid != DMU_SPILL_BLKID)
2e528b49 2102 return (SET_ERROR(ENOTSUP));
428870ff
BB
2103 if (blksz == 0)
2104 blksz = SPA_MINBLOCKSIZE;
2105 if (blksz > SPA_MAXBLOCKSIZE)
2106 blksz = SPA_MAXBLOCKSIZE;
2107 else
2108 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2109
572e2857
BB
2110 DB_DNODE_ENTER(db);
2111 dn = DB_DNODE(db);
2112 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
428870ff 2113 dbuf_new_size(db, blksz, tx);
572e2857
BB
2114 rw_exit(&dn->dn_struct_rwlock);
2115 DB_DNODE_EXIT(db);
428870ff
BB
2116
2117 return (0);
2118}
2119
2120void
2121dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2122{
2123 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
34dc7c2f
BB
2124}
2125
2126#pragma weak dmu_buf_add_ref = dbuf_add_ref
2127void
2128dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2129{
1fde1e37 2130 VERIFY(refcount_add(&db->db_holds, tag) > 1);
34dc7c2f
BB
2131}
2132
572e2857
BB
2133/*
2134 * If you call dbuf_rele() you had better not be referencing the dnode handle
2135 * unless you have some other direct or indirect hold on the dnode. (An indirect
2136 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2137 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2138 * dnode's parent dbuf evicting its dnode handles.
2139 */
34dc7c2f
BB
2140#pragma weak dmu_buf_rele = dbuf_rele
2141void
2142dbuf_rele(dmu_buf_impl_t *db, void *tag)
428870ff
BB
2143{
2144 mutex_enter(&db->db_mtx);
2145 dbuf_rele_and_unlock(db, tag);
2146}
2147
2148/*
2149 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
2150 * db_dirtycnt and db_holds to be updated atomically.
2151 */
2152void
2153dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
34dc7c2f
BB
2154{
2155 int64_t holds;
2156
428870ff 2157 ASSERT(MUTEX_HELD(&db->db_mtx));
34dc7c2f
BB
2158 DBUF_VERIFY(db);
2159
572e2857
BB
2160 /*
2161 * Remove the reference to the dbuf before removing its hold on the
2162 * dnode so we can guarantee in dnode_move() that a referenced bonus
2163 * buffer has a corresponding dnode hold.
2164 */
34dc7c2f
BB
2165 holds = refcount_remove(&db->db_holds, tag);
2166 ASSERT(holds >= 0);
2167
2168 /*
2169 * We can't freeze indirects if there is a possibility that they
2170 * may be modified in the current syncing context.
2171 */
2172 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2173 arc_buf_freeze(db->db_buf);
2174
2175 if (holds == db->db_dirtycnt &&
2176 db->db_level == 0 && db->db_immediate_evict)
2177 dbuf_evict_user(db);
2178
2179 if (holds == 0) {
428870ff 2180 if (db->db_blkid == DMU_BONUS_BLKID) {
34dc7c2f 2181 mutex_exit(&db->db_mtx);
572e2857
BB
2182
2183 /*
2184 * If the dnode moves here, we cannot cross this barrier
2185 * until the move completes.
2186 */
2187 DB_DNODE_ENTER(db);
2188 (void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count);
2189 DB_DNODE_EXIT(db);
2190 /*
2191 * The bonus buffer's dnode hold is no longer discounted
2192 * in dnode_move(). The dnode cannot move until after
2193 * the dnode_rele().
2194 */
2195 dnode_rele(DB_DNODE(db), db);
34dc7c2f
BB
2196 } else if (db->db_buf == NULL) {
2197 /*
2198 * This is a special case: we never associated this
2199 * dbuf with any data allocated from the ARC.
2200 */
b128c09f
BB
2201 ASSERT(db->db_state == DB_UNCACHED ||
2202 db->db_state == DB_NOFILL);
34dc7c2f
BB
2203 dbuf_evict(db);
2204 } else if (arc_released(db->db_buf)) {
2205 arc_buf_t *buf = db->db_buf;
2206 /*
2207 * This dbuf has anonymous data associated with it.
2208 */
2209 dbuf_set_data(db, NULL);
13fe0198 2210 VERIFY(arc_buf_remove_ref(buf, db));
34dc7c2f
BB
2211 dbuf_evict(db);
2212 } else {
13fe0198 2213 VERIFY(!arc_buf_remove_ref(db->db_buf, db));
1eb5bfa3
GW
2214
2215 /*
2216 * A dbuf will be eligible for eviction if either the
2217 * 'primarycache' property is set or a duplicate
2218 * copy of this buffer is already cached in the arc.
2219 *
2220 * In the case of the 'primarycache' a buffer
2221 * is considered for eviction if it matches the
2222 * criteria set in the property.
2223 *
2224 * To decide if our buffer is considered a
2225 * duplicate, we must call into the arc to determine
2226 * if multiple buffers are referencing the same
2227 * block on-disk. If so, then we simply evict
2228 * ourselves.
2229 */
2230 if (!DBUF_IS_CACHEABLE(db) ||
2231 arc_buf_eviction_needed(db->db_buf))
b128c09f
BB
2232 dbuf_clear(db);
2233 else
2234 mutex_exit(&db->db_mtx);
34dc7c2f
BB
2235 }
2236 } else {
2237 mutex_exit(&db->db_mtx);
2238 }
2239}
2240
2241#pragma weak dmu_buf_refcount = dbuf_refcount
2242uint64_t
2243dbuf_refcount(dmu_buf_impl_t *db)
2244{
2245 return (refcount_count(&db->db_holds));
2246}
2247
2248void *
2249dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2250 dmu_buf_evict_func_t *evict_func)
2251{
2252 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2253 user_data_ptr_ptr, evict_func));
2254}
2255
2256void *
2257dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2258 dmu_buf_evict_func_t *evict_func)
2259{
2260 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2261
2262 db->db_immediate_evict = TRUE;
2263 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2264 user_data_ptr_ptr, evict_func));
2265}
2266
2267void *
2268dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2269 void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
2270{
2271 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2272 ASSERT(db->db_level == 0);
2273
2274 ASSERT((user_ptr == NULL) == (evict_func == NULL));
2275
2276 mutex_enter(&db->db_mtx);
2277
2278 if (db->db_user_ptr == old_user_ptr) {
2279 db->db_user_ptr = user_ptr;
2280 db->db_user_data_ptr_ptr = user_data_ptr_ptr;
2281 db->db_evict_func = evict_func;
2282
2283 dbuf_update_data(db);
2284 } else {
2285 old_user_ptr = db->db_user_ptr;
2286 }
2287
2288 mutex_exit(&db->db_mtx);
2289 return (old_user_ptr);
2290}
2291
2292void *
2293dmu_buf_get_user(dmu_buf_t *db_fake)
2294{
2295 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2296 ASSERT(!refcount_is_zero(&db->db_holds));
2297
2298 return (db->db_user_ptr);
2299}
2300
9babb374
BB
2301boolean_t
2302dmu_buf_freeable(dmu_buf_t *dbuf)
2303{
2304 boolean_t res = B_FALSE;
2305 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2306
2307 if (db->db_blkptr)
2308 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
428870ff 2309 db->db_blkptr, db->db_blkptr->blk_birth);
9babb374
BB
2310
2311 return (res);
2312}
2313
03c6040b
GW
2314blkptr_t *
2315dmu_buf_get_blkptr(dmu_buf_t *db)
2316{
2317 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
2318 return (dbi->db_blkptr);
2319}
2320
34dc7c2f
BB
2321static void
2322dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2323{
2324 /* ASSERT(dmu_tx_is_syncing(tx) */
2325 ASSERT(MUTEX_HELD(&db->db_mtx));
2326
2327 if (db->db_blkptr != NULL)
2328 return;
2329
428870ff
BB
2330 if (db->db_blkid == DMU_SPILL_BLKID) {
2331 db->db_blkptr = &dn->dn_phys->dn_spill;
2332 BP_ZERO(db->db_blkptr);
2333 return;
2334 }
34dc7c2f
BB
2335 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2336 /*
2337 * This buffer was allocated at a time when there was
2338 * no available blkptrs from the dnode, or it was
2339 * inappropriate to hook it in (i.e., nlevels mis-match).
2340 */
2341 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2342 ASSERT(db->db_parent == NULL);
2343 db->db_parent = dn->dn_dbuf;
2344 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2345 DBUF_VERIFY(db);
2346 } else {
2347 dmu_buf_impl_t *parent = db->db_parent;
2348 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2349
2350 ASSERT(dn->dn_phys->dn_nlevels > 1);
2351 if (parent == NULL) {
2352 mutex_exit(&db->db_mtx);
2353 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2354 (void) dbuf_hold_impl(dn, db->db_level+1,
2355 db->db_blkid >> epbs, FALSE, db, &parent);
2356 rw_exit(&dn->dn_struct_rwlock);
2357 mutex_enter(&db->db_mtx);
2358 db->db_parent = parent;
2359 }
2360 db->db_blkptr = (blkptr_t *)parent->db.db_data +
2361 (db->db_blkid & ((1ULL << epbs) - 1));
2362 DBUF_VERIFY(db);
2363 }
2364}
2365
d1d7e268
MK
2366/*
2367 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
60948de1
BB
2368 * is critical the we not allow the compiler to inline this function in to
2369 * dbuf_sync_list() thereby drastically bloating the stack usage.
2370 */
2371noinline static void
34dc7c2f
BB
2372dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2373{
2374 dmu_buf_impl_t *db = dr->dr_dbuf;
572e2857 2375 dnode_t *dn;
34dc7c2f
BB
2376 zio_t *zio;
2377
2378 ASSERT(dmu_tx_is_syncing(tx));
2379
2380 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2381
2382 mutex_enter(&db->db_mtx);
2383
2384 ASSERT(db->db_level > 0);
2385 DBUF_VERIFY(db);
2386
e49f1e20 2387 /* Read the block if it hasn't been read yet. */
34dc7c2f
BB
2388 if (db->db_buf == NULL) {
2389 mutex_exit(&db->db_mtx);
2390 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2391 mutex_enter(&db->db_mtx);
2392 }
2393 ASSERT3U(db->db_state, ==, DB_CACHED);
34dc7c2f
BB
2394 ASSERT(db->db_buf != NULL);
2395
572e2857
BB
2396 DB_DNODE_ENTER(db);
2397 dn = DB_DNODE(db);
e49f1e20 2398 /* Indirect block size must match what the dnode thinks it is. */
572e2857 2399 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
34dc7c2f 2400 dbuf_check_blkptr(dn, db);
572e2857 2401 DB_DNODE_EXIT(db);
34dc7c2f 2402
e49f1e20 2403 /* Provide the pending dirty record to child dbufs */
34dc7c2f
BB
2404 db->db_data_pending = dr;
2405
34dc7c2f 2406 mutex_exit(&db->db_mtx);
b128c09f 2407 dbuf_write(dr, db->db_buf, tx);
34dc7c2f
BB
2408
2409 zio = dr->dr_zio;
2410 mutex_enter(&dr->dt.di.dr_mtx);
2411 dbuf_sync_list(&dr->dt.di.dr_children, tx);
2412 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2413 mutex_exit(&dr->dt.di.dr_mtx);
2414 zio_nowait(zio);
2415}
2416
d1d7e268
MK
2417/*
2418 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
60948de1
BB
2419 * critical the we not allow the compiler to inline this function in to
2420 * dbuf_sync_list() thereby drastically bloating the stack usage.
2421 */
2422noinline static void
34dc7c2f
BB
2423dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2424{
2425 arc_buf_t **datap = &dr->dt.dl.dr_data;
2426 dmu_buf_impl_t *db = dr->dr_dbuf;
572e2857
BB
2427 dnode_t *dn;
2428 objset_t *os;
34dc7c2f 2429 uint64_t txg = tx->tx_txg;
34dc7c2f
BB
2430
2431 ASSERT(dmu_tx_is_syncing(tx));
2432
2433 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2434
2435 mutex_enter(&db->db_mtx);
2436 /*
2437 * To be synced, we must be dirtied. But we
2438 * might have been freed after the dirty.
2439 */
2440 if (db->db_state == DB_UNCACHED) {
2441 /* This buffer has been freed since it was dirtied */
2442 ASSERT(db->db.db_data == NULL);
2443 } else if (db->db_state == DB_FILL) {
2444 /* This buffer was freed and is now being re-filled */
2445 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2446 } else {
b128c09f 2447 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
34dc7c2f
BB
2448 }
2449 DBUF_VERIFY(db);
2450
572e2857
BB
2451 DB_DNODE_ENTER(db);
2452 dn = DB_DNODE(db);
2453
428870ff
BB
2454 if (db->db_blkid == DMU_SPILL_BLKID) {
2455 mutex_enter(&dn->dn_mtx);
2456 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2457 mutex_exit(&dn->dn_mtx);
2458 }
2459
34dc7c2f
BB
2460 /*
2461 * If this is a bonus buffer, simply copy the bonus data into the
2462 * dnode. It will be written out when the dnode is synced (and it
2463 * will be synced, since it must have been dirty for dbuf_sync to
2464 * be called).
2465 */
428870ff 2466 if (db->db_blkid == DMU_BONUS_BLKID) {
34dc7c2f
BB
2467 dbuf_dirty_record_t **drp;
2468
2469 ASSERT(*datap != NULL);
c99c9001 2470 ASSERT0(db->db_level);
34dc7c2f
BB
2471 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2472 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
572e2857
BB
2473 DB_DNODE_EXIT(db);
2474
34dc7c2f
BB
2475 if (*datap != db->db.db_data) {
2476 zio_buf_free(*datap, DN_MAX_BONUSLEN);
d164b209 2477 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
34dc7c2f
BB
2478 }
2479 db->db_data_pending = NULL;
2480 drp = &db->db_last_dirty;
2481 while (*drp != dr)
2482 drp = &(*drp)->dr_next;
2483 ASSERT(dr->dr_next == NULL);
428870ff 2484 ASSERT(dr->dr_dbuf == db);
34dc7c2f 2485 *drp = dr->dr_next;
753972fc
BB
2486 if (dr->dr_dbuf->db_level != 0) {
2487 mutex_destroy(&dr->dt.di.dr_mtx);
2488 list_destroy(&dr->dt.di.dr_children);
2489 }
34dc7c2f
BB
2490 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2491 ASSERT(db->db_dirtycnt > 0);
2492 db->db_dirtycnt -= 1;
428870ff 2493 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
34dc7c2f
BB
2494 return;
2495 }
2496
572e2857
BB
2497 os = dn->dn_objset;
2498
34dc7c2f
BB
2499 /*
2500 * This function may have dropped the db_mtx lock allowing a dmu_sync
2501 * operation to sneak in. As a result, we need to ensure that we
2502 * don't check the dr_override_state until we have returned from
2503 * dbuf_check_blkptr.
2504 */
2505 dbuf_check_blkptr(dn, db);
2506
2507 /*
572e2857 2508 * If this buffer is in the middle of an immediate write,
34dc7c2f
BB
2509 * wait for the synchronous IO to complete.
2510 */
2511 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2512 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2513 cv_wait(&db->db_changed, &db->db_mtx);
2514 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2515 }
2516
9babb374
BB
2517 if (db->db_state != DB_NOFILL &&
2518 dn->dn_object != DMU_META_DNODE_OBJECT &&
2519 refcount_count(&db->db_holds) > 1 &&
428870ff 2520 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
9babb374
BB
2521 *datap == db->db_buf) {
2522 /*
2523 * If this buffer is currently "in use" (i.e., there
2524 * are active holds and db_data still references it),
2525 * then make a copy before we start the write so that
2526 * any modifications from the open txg will not leak
2527 * into this write.
2528 *
2529 * NOTE: this copy does not need to be made for
2530 * objects only modified in the syncing context (e.g.
2531 * DNONE_DNODE blocks).
2532 */
2533 int blksz = arc_buf_size(*datap);
2534 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2535 *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2536 bcopy(db->db.db_data, (*datap)->b_data, blksz);
b128c09f 2537 }
34dc7c2f
BB
2538 db->db_data_pending = dr;
2539
2540 mutex_exit(&db->db_mtx);
2541
b128c09f 2542 dbuf_write(dr, *datap, tx);
34dc7c2f
BB
2543
2544 ASSERT(!list_link_active(&dr->dr_dirty_node));
572e2857 2545 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
34dc7c2f 2546 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
572e2857
BB
2547 DB_DNODE_EXIT(db);
2548 } else {
2549 /*
2550 * Although zio_nowait() does not "wait for an IO", it does
2551 * initiate the IO. If this is an empty write it seems plausible
2552 * that the IO could actually be completed before the nowait
2553 * returns. We need to DB_DNODE_EXIT() first in case
2554 * zio_nowait() invalidates the dbuf.
2555 */
2556 DB_DNODE_EXIT(db);
34dc7c2f 2557 zio_nowait(dr->dr_zio);
572e2857 2558 }
34dc7c2f
BB
2559}
2560
2561void
2562dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2563{
2564 dbuf_dirty_record_t *dr;
2565
c65aa5b2 2566 while ((dr = list_head(list))) {
34dc7c2f
BB
2567 if (dr->dr_zio != NULL) {
2568 /*
2569 * If we find an already initialized zio then we
2570 * are processing the meta-dnode, and we have finished.
2571 * The dbufs for all dnodes are put back on the list
2572 * during processing, so that we can zio_wait()
2573 * these IOs after initiating all child IOs.
2574 */
2575 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2576 DMU_META_DNODE_OBJECT);
2577 break;
2578 }
2579 list_remove(list, dr);
2580 if (dr->dr_dbuf->db_level > 0)
2581 dbuf_sync_indirect(dr, tx);
2582 else
2583 dbuf_sync_leaf(dr, tx);
2584 }
2585}
2586
34dc7c2f
BB
2587/* ARGSUSED */
2588static void
2589dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2590{
2591 dmu_buf_impl_t *db = vdb;
572e2857 2592 dnode_t *dn;
b128c09f 2593 blkptr_t *bp = zio->io_bp;
34dc7c2f 2594 blkptr_t *bp_orig = &zio->io_bp_orig;
428870ff
BB
2595 spa_t *spa = zio->io_spa;
2596 int64_t delta;
34dc7c2f 2597 uint64_t fill = 0;
428870ff 2598 int i;
34dc7c2f 2599
b128c09f
BB
2600 ASSERT(db->db_blkptr == bp);
2601
572e2857
BB
2602 DB_DNODE_ENTER(db);
2603 dn = DB_DNODE(db);
428870ff
BB
2604 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2605 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2606 zio->io_prev_space_delta = delta;
34dc7c2f 2607
b128c09f 2608 if (BP_IS_HOLE(bp)) {
428870ff 2609 ASSERT(bp->blk_fill == 0);
572e2857 2610 DB_DNODE_EXIT(db);
34dc7c2f
BB
2611 return;
2612 }
2613
428870ff
BB
2614 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2615 BP_GET_TYPE(bp) == dn->dn_type) ||
2616 (db->db_blkid == DMU_SPILL_BLKID &&
2617 BP_GET_TYPE(bp) == dn->dn_bonustype));
b128c09f
BB
2618 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2619
34dc7c2f
BB
2620 mutex_enter(&db->db_mtx);
2621
428870ff
BB
2622#ifdef ZFS_DEBUG
2623 if (db->db_blkid == DMU_SPILL_BLKID) {
428870ff
BB
2624 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2625 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2626 db->db_blkptr == &dn->dn_phys->dn_spill);
2627 }
2628#endif
2629
34dc7c2f
BB
2630 if (db->db_level == 0) {
2631 mutex_enter(&dn->dn_mtx);
428870ff
BB
2632 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2633 db->db_blkid != DMU_SPILL_BLKID)
34dc7c2f
BB
2634 dn->dn_phys->dn_maxblkid = db->db_blkid;
2635 mutex_exit(&dn->dn_mtx);
2636
2637 if (dn->dn_type == DMU_OT_DNODE) {
2638 dnode_phys_t *dnp = db->db.db_data;
2639 for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2640 i--, dnp++) {
2641 if (dnp->dn_type != DMU_OT_NONE)
2642 fill++;
2643 }
2644 } else {
2645 fill = 1;
2646 }
2647 } else {
b128c09f 2648 blkptr_t *ibp = db->db.db_data;
34dc7c2f 2649 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
b128c09f
BB
2650 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2651 if (BP_IS_HOLE(ibp))
34dc7c2f 2652 continue;
b128c09f 2653 fill += ibp->blk_fill;
34dc7c2f
BB
2654 }
2655 }
572e2857 2656 DB_DNODE_EXIT(db);
34dc7c2f 2657
b128c09f 2658 bp->blk_fill = fill;
34dc7c2f
BB
2659
2660 mutex_exit(&db->db_mtx);
34dc7c2f
BB
2661}
2662
e8b96c60
MA
2663/*
2664 * The SPA will call this callback several times for each zio - once
2665 * for every physical child i/o (zio->io_phys_children times). This
2666 * allows the DMU to monitor the progress of each logical i/o. For example,
2667 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
2668 * block. There may be a long delay before all copies/fragments are completed,
2669 * so this callback allows us to retire dirty space gradually, as the physical
2670 * i/os complete.
2671 */
2672/* ARGSUSED */
2673static void
2674dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
2675{
2676 dmu_buf_impl_t *db = arg;
2677 objset_t *os = db->db_objset;
2678 dsl_pool_t *dp = dmu_objset_pool(os);
2679 dbuf_dirty_record_t *dr;
2680 int delta = 0;
2681
2682 dr = db->db_data_pending;
2683 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
2684
2685 /*
2686 * The callback will be called io_phys_children times. Retire one
2687 * portion of our dirty space each time we are called. Any rounding
2688 * error will be cleaned up by dsl_pool_sync()'s call to
2689 * dsl_pool_undirty_space().
2690 */
2691 delta = dr->dr_accounted / zio->io_phys_children;
2692 dsl_pool_undirty_space(dp, delta, zio->io_txg);
2693}
2694
34dc7c2f
BB
2695/* ARGSUSED */
2696static void
2697dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2698{
2699 dmu_buf_impl_t *db = vdb;
428870ff
BB
2700 blkptr_t *bp = zio->io_bp;
2701 blkptr_t *bp_orig = &zio->io_bp_orig;
34dc7c2f
BB
2702 uint64_t txg = zio->io_txg;
2703 dbuf_dirty_record_t **drp, *dr;
2704
c99c9001 2705 ASSERT0(zio->io_error);
428870ff
BB
2706 ASSERT(db->db_blkptr == bp);
2707
03c6040b
GW
2708 /*
2709 * For nopwrites and rewrites we ensure that the bp matches our
2710 * original and bypass all the accounting.
2711 */
2712 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
428870ff
BB
2713 ASSERT(BP_EQUAL(bp, bp_orig));
2714 } else {
572e2857
BB
2715 objset_t *os;
2716 dsl_dataset_t *ds;
2717 dmu_tx_t *tx;
2718
2719 DB_GET_OBJSET(&os, db);
2720 ds = os->os_dsl_dataset;
2721 tx = os->os_synctx;
428870ff
BB
2722
2723 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2724 dsl_dataset_block_born(ds, bp, tx);
2725 }
34dc7c2f
BB
2726
2727 mutex_enter(&db->db_mtx);
2728
428870ff
BB
2729 DBUF_VERIFY(db);
2730
34dc7c2f
BB
2731 drp = &db->db_last_dirty;
2732 while ((dr = *drp) != db->db_data_pending)
2733 drp = &dr->dr_next;
2734 ASSERT(!list_link_active(&dr->dr_dirty_node));
2735 ASSERT(dr->dr_txg == txg);
428870ff 2736 ASSERT(dr->dr_dbuf == db);
34dc7c2f
BB
2737 ASSERT(dr->dr_next == NULL);
2738 *drp = dr->dr_next;
2739
428870ff
BB
2740#ifdef ZFS_DEBUG
2741 if (db->db_blkid == DMU_SPILL_BLKID) {
572e2857
BB
2742 dnode_t *dn;
2743
2744 DB_DNODE_ENTER(db);
2745 dn = DB_DNODE(db);
428870ff
BB
2746 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2747 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2748 db->db_blkptr == &dn->dn_phys->dn_spill);
572e2857 2749 DB_DNODE_EXIT(db);
428870ff
BB
2750 }
2751#endif
2752
34dc7c2f 2753 if (db->db_level == 0) {
428870ff 2754 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
34dc7c2f 2755 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
b128c09f
BB
2756 if (db->db_state != DB_NOFILL) {
2757 if (dr->dt.dl.dr_data != db->db_buf)
2758 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
13fe0198 2759 db));
428870ff 2760 else if (!arc_released(db->db_buf))
b128c09f 2761 arc_set_callback(db->db_buf, dbuf_do_evict, db);
b128c09f 2762 }
34dc7c2f 2763 } else {
572e2857
BB
2764 dnode_t *dn;
2765
2766 DB_DNODE_ENTER(db);
2767 dn = DB_DNODE(db);
34dc7c2f
BB
2768 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2769 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2770 if (!BP_IS_HOLE(db->db_blkptr)) {
1fde1e37
BB
2771 ASSERTV(int epbs = dn->dn_phys->dn_indblkshift -
2772 SPA_BLKPTRSHIFT);
34dc7c2f
BB
2773 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2774 db->db.db_size);
2775 ASSERT3U(dn->dn_phys->dn_maxblkid
2776 >> (db->db_level * epbs), >=, db->db_blkid);
2777 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2778 }
572e2857 2779 DB_DNODE_EXIT(db);
34dc7c2f
BB
2780 mutex_destroy(&dr->dt.di.dr_mtx);
2781 list_destroy(&dr->dt.di.dr_children);
2782 }
2783 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2784
2785 cv_broadcast(&db->db_changed);
2786 ASSERT(db->db_dirtycnt > 0);
2787 db->db_dirtycnt -= 1;
2788 db->db_data_pending = NULL;
e8b96c60 2789
428870ff
BB
2790 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2791}
2792
2793static void
2794dbuf_write_nofill_ready(zio_t *zio)
2795{
2796 dbuf_write_ready(zio, NULL, zio->io_private);
2797}
2798
2799static void
2800dbuf_write_nofill_done(zio_t *zio)
2801{
2802 dbuf_write_done(zio, NULL, zio->io_private);
2803}
2804
2805static void
2806dbuf_write_override_ready(zio_t *zio)
2807{
2808 dbuf_dirty_record_t *dr = zio->io_private;
2809 dmu_buf_impl_t *db = dr->dr_dbuf;
2810
2811 dbuf_write_ready(zio, NULL, db);
2812}
2813
2814static void
2815dbuf_write_override_done(zio_t *zio)
2816{
2817 dbuf_dirty_record_t *dr = zio->io_private;
2818 dmu_buf_impl_t *db = dr->dr_dbuf;
2819 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2820
2821 mutex_enter(&db->db_mtx);
2822 if (!BP_EQUAL(zio->io_bp, obp)) {
2823 if (!BP_IS_HOLE(obp))
2824 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2825 arc_release(dr->dt.dl.dr_data, db);
2826 }
34dc7c2f
BB
2827 mutex_exit(&db->db_mtx);
2828
428870ff
BB
2829 dbuf_write_done(zio, NULL, db);
2830}
2831
e49f1e20 2832/* Issue I/O to commit a dirty buffer to disk. */
428870ff
BB
2833static void
2834dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2835{
2836 dmu_buf_impl_t *db = dr->dr_dbuf;
572e2857
BB
2837 dnode_t *dn;
2838 objset_t *os;
428870ff
BB
2839 dmu_buf_impl_t *parent = db->db_parent;
2840 uint64_t txg = tx->tx_txg;
2841 zbookmark_t zb;
2842 zio_prop_t zp;
2843 zio_t *zio;
2844 int wp_flag = 0;
34dc7c2f 2845
572e2857
BB
2846 DB_DNODE_ENTER(db);
2847 dn = DB_DNODE(db);
2848 os = dn->dn_objset;
2849
428870ff
BB
2850 if (db->db_state != DB_NOFILL) {
2851 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2852 /*
2853 * Private object buffers are released here rather
2854 * than in dbuf_dirty() since they are only modified
2855 * in the syncing context and we don't want the
2856 * overhead of making multiple copies of the data.
2857 */
2858 if (BP_IS_HOLE(db->db_blkptr)) {
2859 arc_buf_thaw(data);
2860 } else {
2861 dbuf_release_bp(db);
2862 }
2863 }
2864 }
2865
2866 if (parent != dn->dn_dbuf) {
e49f1e20
WA
2867 /* Our parent is an indirect block. */
2868 /* We have a dirty parent that has been scheduled for write. */
428870ff 2869 ASSERT(parent && parent->db_data_pending);
e49f1e20 2870 /* Our parent's buffer is one level closer to the dnode. */
428870ff 2871 ASSERT(db->db_level == parent->db_level-1);
e49f1e20
WA
2872 /*
2873 * We're about to modify our parent's db_data by modifying
2874 * our block pointer, so the parent must be released.
2875 */
428870ff
BB
2876 ASSERT(arc_released(parent->db_buf));
2877 zio = parent->db_data_pending->dr_zio;
2878 } else {
e49f1e20 2879 /* Our parent is the dnode itself. */
428870ff
BB
2880 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2881 db->db_blkid != DMU_SPILL_BLKID) ||
2882 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2883 if (db->db_blkid != DMU_SPILL_BLKID)
2884 ASSERT3P(db->db_blkptr, ==,
2885 &dn->dn_phys->dn_blkptr[db->db_blkid]);
2886 zio = dn->dn_zio;
2887 }
2888
2889 ASSERT(db->db_level == 0 || data == db->db_buf);
2890 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2891 ASSERT(zio);
2892
2893 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2894 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2895 db->db.db_object, db->db_level, db->db_blkid);
2896
2897 if (db->db_blkid == DMU_SPILL_BLKID)
2898 wp_flag = WP_SPILL;
2899 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2900
2901 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
572e2857 2902 DB_DNODE_EXIT(db);
428870ff
BB
2903
2904 if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2905 ASSERT(db->db_state != DB_NOFILL);
2906 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2907 db->db_blkptr, data->b_data, arc_buf_size(data), &zp,
e8b96c60
MA
2908 dbuf_write_override_ready, NULL, dbuf_write_override_done,
2909 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
428870ff
BB
2910 mutex_enter(&db->db_mtx);
2911 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2912 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
03c6040b 2913 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
428870ff
BB
2914 mutex_exit(&db->db_mtx);
2915 } else if (db->db_state == DB_NOFILL) {
2916 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF);
2917 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2918 db->db_blkptr, NULL, db->db.db_size, &zp,
e8b96c60 2919 dbuf_write_nofill_ready, NULL, dbuf_write_nofill_done, db,
428870ff
BB
2920 ZIO_PRIORITY_ASYNC_WRITE,
2921 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2922 } else {
2923 ASSERT(arc_released(data));
2924 dr->dr_zio = arc_write(zio, os->os_spa, txg,
3a17a7a9
SK
2925 db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db),
2926 DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready,
e8b96c60
MA
2927 dbuf_write_physdone, dbuf_write_done, db,
2928 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
428870ff 2929 }
34dc7c2f 2930}
c28b2279
BB
2931
2932#if defined(_KERNEL) && defined(HAVE_SPL)
8f576c23
BB
2933EXPORT_SYMBOL(dbuf_find);
2934EXPORT_SYMBOL(dbuf_is_metadata);
2935EXPORT_SYMBOL(dbuf_evict);
2936EXPORT_SYMBOL(dbuf_loan_arcbuf);
2937EXPORT_SYMBOL(dbuf_whichblock);
2938EXPORT_SYMBOL(dbuf_read);
2939EXPORT_SYMBOL(dbuf_unoverride);
2940EXPORT_SYMBOL(dbuf_free_range);
2941EXPORT_SYMBOL(dbuf_new_size);
2942EXPORT_SYMBOL(dbuf_release_bp);
2943EXPORT_SYMBOL(dbuf_dirty);
c28b2279 2944EXPORT_SYMBOL(dmu_buf_will_dirty);
8f576c23
BB
2945EXPORT_SYMBOL(dmu_buf_will_not_fill);
2946EXPORT_SYMBOL(dmu_buf_will_fill);
2947EXPORT_SYMBOL(dmu_buf_fill_done);
4047414a 2948EXPORT_SYMBOL(dmu_buf_rele);
8f576c23
BB
2949EXPORT_SYMBOL(dbuf_assign_arcbuf);
2950EXPORT_SYMBOL(dbuf_clear);
2951EXPORT_SYMBOL(dbuf_prefetch);
2952EXPORT_SYMBOL(dbuf_hold_impl);
2953EXPORT_SYMBOL(dbuf_hold);
2954EXPORT_SYMBOL(dbuf_hold_level);
2955EXPORT_SYMBOL(dbuf_create_bonus);
2956EXPORT_SYMBOL(dbuf_spill_set_blksz);
2957EXPORT_SYMBOL(dbuf_rm_spill);
2958EXPORT_SYMBOL(dbuf_add_ref);
2959EXPORT_SYMBOL(dbuf_rele);
2960EXPORT_SYMBOL(dbuf_rele_and_unlock);
2961EXPORT_SYMBOL(dbuf_refcount);
2962EXPORT_SYMBOL(dbuf_sync_list);
2963EXPORT_SYMBOL(dmu_buf_set_user);
2964EXPORT_SYMBOL(dmu_buf_set_user_ie);
2965EXPORT_SYMBOL(dmu_buf_update_user);
2966EXPORT_SYMBOL(dmu_buf_get_user);
2967EXPORT_SYMBOL(dmu_buf_freeable);
c28b2279 2968#endif