]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dbuf.c
Illumos #3006
[mirror_zfs.git] / module / zfs / dbuf.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/arc.h>
29 #include <sys/dmu.h>
30 #include <sys/dmu_impl.h>
31 #include <sys/dbuf.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dmu_tx.h>
36 #include <sys/spa.h>
37 #include <sys/zio.h>
38 #include <sys/dmu_zfetch.h>
39 #include <sys/sa.h>
40 #include <sys/sa_impl.h>
41
42 struct dbuf_hold_impl_data {
43 /* Function arguments */
44 dnode_t *dh_dn;
45 uint8_t dh_level;
46 uint64_t dh_blkid;
47 int dh_fail_sparse;
48 void *dh_tag;
49 dmu_buf_impl_t **dh_dbp;
50 /* Local variables */
51 dmu_buf_impl_t *dh_db;
52 dmu_buf_impl_t *dh_parent;
53 blkptr_t *dh_bp;
54 int dh_err;
55 dbuf_dirty_record_t *dh_dr;
56 arc_buf_contents_t dh_type;
57 int dh_depth;
58 };
59
60 static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
61 dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
62 void *tag, dmu_buf_impl_t **dbp, int depth);
63 static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh);
64
65 static void dbuf_destroy(dmu_buf_impl_t *db);
66 static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
67 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
68
69 /*
70 * Global data structures and functions for the dbuf cache.
71 */
72 static kmem_cache_t *dbuf_cache;
73
74 /* ARGSUSED */
75 static int
76 dbuf_cons(void *vdb, void *unused, int kmflag)
77 {
78 dmu_buf_impl_t *db = vdb;
79 bzero(db, sizeof (dmu_buf_impl_t));
80
81 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
82 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
83 refcount_create(&db->db_holds);
84 list_link_init(&db->db_link);
85 return (0);
86 }
87
88 /* ARGSUSED */
89 static void
90 dbuf_dest(void *vdb, void *unused)
91 {
92 dmu_buf_impl_t *db = vdb;
93 mutex_destroy(&db->db_mtx);
94 cv_destroy(&db->db_changed);
95 refcount_destroy(&db->db_holds);
96 }
97
98 /*
99 * dbuf hash table routines
100 */
101 static dbuf_hash_table_t dbuf_hash_table;
102
103 static uint64_t dbuf_hash_count;
104
105 static uint64_t
106 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
107 {
108 uintptr_t osv = (uintptr_t)os;
109 uint64_t crc = -1ULL;
110
111 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
112 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
113 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
114 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
115 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
116 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
117 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
118
119 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
120
121 return (crc);
122 }
123
124 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
125
126 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
127 ((dbuf)->db.db_object == (obj) && \
128 (dbuf)->db_objset == (os) && \
129 (dbuf)->db_level == (level) && \
130 (dbuf)->db_blkid == (blkid))
131
132 dmu_buf_impl_t *
133 dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
134 {
135 dbuf_hash_table_t *h = &dbuf_hash_table;
136 objset_t *os = dn->dn_objset;
137 uint64_t obj;
138 uint64_t hv;
139 uint64_t idx;
140 dmu_buf_impl_t *db;
141
142 obj = dn->dn_object;
143 hv = DBUF_HASH(os, obj, level, blkid);
144 idx = hv & h->hash_table_mask;
145
146 mutex_enter(DBUF_HASH_MUTEX(h, idx));
147 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
148 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
149 mutex_enter(&db->db_mtx);
150 if (db->db_state != DB_EVICTING) {
151 mutex_exit(DBUF_HASH_MUTEX(h, idx));
152 return (db);
153 }
154 mutex_exit(&db->db_mtx);
155 }
156 }
157 mutex_exit(DBUF_HASH_MUTEX(h, idx));
158 return (NULL);
159 }
160
161 /*
162 * Insert an entry into the hash table. If there is already an element
163 * equal to elem in the hash table, then the already existing element
164 * will be returned and the new element will not be inserted.
165 * Otherwise returns NULL.
166 */
167 static dmu_buf_impl_t *
168 dbuf_hash_insert(dmu_buf_impl_t *db)
169 {
170 dbuf_hash_table_t *h = &dbuf_hash_table;
171 objset_t *os = db->db_objset;
172 uint64_t obj = db->db.db_object;
173 int level = db->db_level;
174 uint64_t blkid, hv, idx;
175 dmu_buf_impl_t *dbf;
176
177 blkid = db->db_blkid;
178 hv = DBUF_HASH(os, obj, level, blkid);
179 idx = hv & h->hash_table_mask;
180
181 mutex_enter(DBUF_HASH_MUTEX(h, idx));
182 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
183 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
184 mutex_enter(&dbf->db_mtx);
185 if (dbf->db_state != DB_EVICTING) {
186 mutex_exit(DBUF_HASH_MUTEX(h, idx));
187 return (dbf);
188 }
189 mutex_exit(&dbf->db_mtx);
190 }
191 }
192
193 mutex_enter(&db->db_mtx);
194 db->db_hash_next = h->hash_table[idx];
195 h->hash_table[idx] = db;
196 mutex_exit(DBUF_HASH_MUTEX(h, idx));
197 atomic_add_64(&dbuf_hash_count, 1);
198
199 return (NULL);
200 }
201
202 /*
203 * Remove an entry from the hash table. This operation will
204 * fail if there are any existing holds on the db.
205 */
206 static void
207 dbuf_hash_remove(dmu_buf_impl_t *db)
208 {
209 dbuf_hash_table_t *h = &dbuf_hash_table;
210 uint64_t hv, idx;
211 dmu_buf_impl_t *dbf, **dbp;
212
213 hv = DBUF_HASH(db->db_objset, db->db.db_object,
214 db->db_level, db->db_blkid);
215 idx = hv & h->hash_table_mask;
216
217 /*
218 * We musn't hold db_mtx to maintin lock ordering:
219 * DBUF_HASH_MUTEX > db_mtx.
220 */
221 ASSERT(refcount_is_zero(&db->db_holds));
222 ASSERT(db->db_state == DB_EVICTING);
223 ASSERT(!MUTEX_HELD(&db->db_mtx));
224
225 mutex_enter(DBUF_HASH_MUTEX(h, idx));
226 dbp = &h->hash_table[idx];
227 while ((dbf = *dbp) != db) {
228 dbp = &dbf->db_hash_next;
229 ASSERT(dbf != NULL);
230 }
231 *dbp = db->db_hash_next;
232 db->db_hash_next = NULL;
233 mutex_exit(DBUF_HASH_MUTEX(h, idx));
234 atomic_add_64(&dbuf_hash_count, -1);
235 }
236
237 static arc_evict_func_t dbuf_do_evict;
238
239 static void
240 dbuf_evict_user(dmu_buf_impl_t *db)
241 {
242 ASSERT(MUTEX_HELD(&db->db_mtx));
243
244 if (db->db_level != 0 || db->db_evict_func == NULL)
245 return;
246
247 if (db->db_user_data_ptr_ptr)
248 *db->db_user_data_ptr_ptr = db->db.db_data;
249 db->db_evict_func(&db->db, db->db_user_ptr);
250 db->db_user_ptr = NULL;
251 db->db_user_data_ptr_ptr = NULL;
252 db->db_evict_func = NULL;
253 }
254
255 boolean_t
256 dbuf_is_metadata(dmu_buf_impl_t *db)
257 {
258 if (db->db_level > 0) {
259 return (B_TRUE);
260 } else {
261 boolean_t is_metadata;
262
263 DB_DNODE_ENTER(db);
264 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
265 DB_DNODE_EXIT(db);
266
267 return (is_metadata);
268 }
269 }
270
271 void
272 dbuf_evict(dmu_buf_impl_t *db)
273 {
274 ASSERT(MUTEX_HELD(&db->db_mtx));
275 ASSERT(db->db_buf == NULL);
276 ASSERT(db->db_data_pending == NULL);
277
278 dbuf_clear(db);
279 dbuf_destroy(db);
280 }
281
282 void
283 dbuf_init(void)
284 {
285 uint64_t hsize = 1ULL << 16;
286 dbuf_hash_table_t *h = &dbuf_hash_table;
287 int i;
288
289 /*
290 * The hash table is big enough to fill all of physical memory
291 * with an average 4K block size. The table will take up
292 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
293 */
294 while (hsize * 4096 < physmem * PAGESIZE)
295 hsize <<= 1;
296
297 retry:
298 h->hash_table_mask = hsize - 1;
299 #if defined(_KERNEL) && defined(HAVE_SPL)
300 /* Large allocations which do not require contiguous pages
301 * should be using vmem_alloc() in the linux kernel */
302 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_PUSHPAGE);
303 #else
304 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
305 #endif
306 if (h->hash_table == NULL) {
307 /* XXX - we should really return an error instead of assert */
308 ASSERT(hsize > (1ULL << 10));
309 hsize >>= 1;
310 goto retry;
311 }
312
313 dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
314 sizeof (dmu_buf_impl_t),
315 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
316
317 for (i = 0; i < DBUF_MUTEXES; i++)
318 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
319 }
320
321 void
322 dbuf_fini(void)
323 {
324 dbuf_hash_table_t *h = &dbuf_hash_table;
325 int i;
326
327 for (i = 0; i < DBUF_MUTEXES; i++)
328 mutex_destroy(&h->hash_mutexes[i]);
329 #if defined(_KERNEL) && defined(HAVE_SPL)
330 /* Large allocations which do not require contiguous pages
331 * should be using vmem_free() in the linux kernel */
332 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
333 #else
334 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
335 #endif
336 kmem_cache_destroy(dbuf_cache);
337 }
338
339 /*
340 * Other stuff.
341 */
342
343 #ifdef ZFS_DEBUG
344 static void
345 dbuf_verify(dmu_buf_impl_t *db)
346 {
347 dnode_t *dn;
348 dbuf_dirty_record_t *dr;
349
350 ASSERT(MUTEX_HELD(&db->db_mtx));
351
352 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
353 return;
354
355 ASSERT(db->db_objset != NULL);
356 DB_DNODE_ENTER(db);
357 dn = DB_DNODE(db);
358 if (dn == NULL) {
359 ASSERT(db->db_parent == NULL);
360 ASSERT(db->db_blkptr == NULL);
361 } else {
362 ASSERT3U(db->db.db_object, ==, dn->dn_object);
363 ASSERT3P(db->db_objset, ==, dn->dn_objset);
364 ASSERT3U(db->db_level, <, dn->dn_nlevels);
365 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
366 db->db_blkid == DMU_SPILL_BLKID ||
367 !list_is_empty(&dn->dn_dbufs));
368 }
369 if (db->db_blkid == DMU_BONUS_BLKID) {
370 ASSERT(dn != NULL);
371 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
372 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
373 } else if (db->db_blkid == DMU_SPILL_BLKID) {
374 ASSERT(dn != NULL);
375 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
376 ASSERT0(db->db.db_offset);
377 } else {
378 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
379 }
380
381 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
382 ASSERT(dr->dr_dbuf == db);
383
384 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
385 ASSERT(dr->dr_dbuf == db);
386
387 /*
388 * We can't assert that db_size matches dn_datablksz because it
389 * can be momentarily different when another thread is doing
390 * dnode_set_blksz().
391 */
392 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
393 dr = db->db_data_pending;
394 /*
395 * It should only be modified in syncing context, so
396 * make sure we only have one copy of the data.
397 */
398 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
399 }
400
401 /* verify db->db_blkptr */
402 if (db->db_blkptr) {
403 if (db->db_parent == dn->dn_dbuf) {
404 /* db is pointed to by the dnode */
405 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
406 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
407 ASSERT(db->db_parent == NULL);
408 else
409 ASSERT(db->db_parent != NULL);
410 if (db->db_blkid != DMU_SPILL_BLKID)
411 ASSERT3P(db->db_blkptr, ==,
412 &dn->dn_phys->dn_blkptr[db->db_blkid]);
413 } else {
414 /* db is pointed to by an indirect block */
415 ASSERTV(int epb = db->db_parent->db.db_size >>
416 SPA_BLKPTRSHIFT);
417 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
418 ASSERT3U(db->db_parent->db.db_object, ==,
419 db->db.db_object);
420 /*
421 * dnode_grow_indblksz() can make this fail if we don't
422 * have the struct_rwlock. XXX indblksz no longer
423 * grows. safe to do this now?
424 */
425 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
426 ASSERT3P(db->db_blkptr, ==,
427 ((blkptr_t *)db->db_parent->db.db_data +
428 db->db_blkid % epb));
429 }
430 }
431 }
432 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
433 (db->db_buf == NULL || db->db_buf->b_data) &&
434 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
435 db->db_state != DB_FILL && !dn->dn_free_txg) {
436 /*
437 * If the blkptr isn't set but they have nonzero data,
438 * it had better be dirty, otherwise we'll lose that
439 * data when we evict this buffer.
440 */
441 if (db->db_dirtycnt == 0) {
442 ASSERTV(uint64_t *buf = db->db.db_data);
443 int i;
444
445 for (i = 0; i < db->db.db_size >> 3; i++) {
446 ASSERT(buf[i] == 0);
447 }
448 }
449 }
450 DB_DNODE_EXIT(db);
451 }
452 #endif
453
454 static void
455 dbuf_update_data(dmu_buf_impl_t *db)
456 {
457 ASSERT(MUTEX_HELD(&db->db_mtx));
458 if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
459 ASSERT(!refcount_is_zero(&db->db_holds));
460 *db->db_user_data_ptr_ptr = db->db.db_data;
461 }
462 }
463
464 static void
465 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
466 {
467 ASSERT(MUTEX_HELD(&db->db_mtx));
468 ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf));
469 db->db_buf = buf;
470 if (buf != NULL) {
471 ASSERT(buf->b_data != NULL);
472 db->db.db_data = buf->b_data;
473 if (!arc_released(buf))
474 arc_set_callback(buf, dbuf_do_evict, db);
475 dbuf_update_data(db);
476 } else {
477 dbuf_evict_user(db);
478 db->db.db_data = NULL;
479 if (db->db_state != DB_NOFILL)
480 db->db_state = DB_UNCACHED;
481 }
482 }
483
484 /*
485 * Loan out an arc_buf for read. Return the loaned arc_buf.
486 */
487 arc_buf_t *
488 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
489 {
490 arc_buf_t *abuf;
491
492 mutex_enter(&db->db_mtx);
493 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
494 int blksz = db->db.db_size;
495 spa_t *spa;
496
497 mutex_exit(&db->db_mtx);
498 DB_GET_SPA(&spa, db);
499 abuf = arc_loan_buf(spa, blksz);
500 bcopy(db->db.db_data, abuf->b_data, blksz);
501 } else {
502 abuf = db->db_buf;
503 arc_loan_inuse_buf(abuf, db);
504 dbuf_set_data(db, NULL);
505 mutex_exit(&db->db_mtx);
506 }
507 return (abuf);
508 }
509
510 uint64_t
511 dbuf_whichblock(dnode_t *dn, uint64_t offset)
512 {
513 if (dn->dn_datablkshift) {
514 return (offset >> dn->dn_datablkshift);
515 } else {
516 ASSERT3U(offset, <, dn->dn_datablksz);
517 return (0);
518 }
519 }
520
521 static void
522 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
523 {
524 dmu_buf_impl_t *db = vdb;
525
526 mutex_enter(&db->db_mtx);
527 ASSERT3U(db->db_state, ==, DB_READ);
528 /*
529 * All reads are synchronous, so we must have a hold on the dbuf
530 */
531 ASSERT(refcount_count(&db->db_holds) > 0);
532 ASSERT(db->db_buf == NULL);
533 ASSERT(db->db.db_data == NULL);
534 if (db->db_level == 0 && db->db_freed_in_flight) {
535 /* we were freed in flight; disregard any error */
536 arc_release(buf, db);
537 bzero(buf->b_data, db->db.db_size);
538 arc_buf_freeze(buf);
539 db->db_freed_in_flight = FALSE;
540 dbuf_set_data(db, buf);
541 db->db_state = DB_CACHED;
542 } else if (zio == NULL || zio->io_error == 0) {
543 dbuf_set_data(db, buf);
544 db->db_state = DB_CACHED;
545 } else {
546 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
547 ASSERT3P(db->db_buf, ==, NULL);
548 VERIFY(arc_buf_remove_ref(buf, db) == 1);
549 db->db_state = DB_UNCACHED;
550 }
551 cv_broadcast(&db->db_changed);
552 dbuf_rele_and_unlock(db, NULL);
553 }
554
555 static void
556 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
557 {
558 dnode_t *dn;
559 spa_t *spa;
560 zbookmark_t zb;
561 uint32_t aflags = ARC_NOWAIT;
562 arc_buf_t *pbuf;
563
564 DB_DNODE_ENTER(db);
565 dn = DB_DNODE(db);
566 ASSERT(!refcount_is_zero(&db->db_holds));
567 /* We need the struct_rwlock to prevent db_blkptr from changing. */
568 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
569 ASSERT(MUTEX_HELD(&db->db_mtx));
570 ASSERT(db->db_state == DB_UNCACHED);
571 ASSERT(db->db_buf == NULL);
572
573 if (db->db_blkid == DMU_BONUS_BLKID) {
574 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
575
576 ASSERT3U(bonuslen, <=, db->db.db_size);
577 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
578 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
579 if (bonuslen < DN_MAX_BONUSLEN)
580 bzero(db->db.db_data, DN_MAX_BONUSLEN);
581 if (bonuslen)
582 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
583 DB_DNODE_EXIT(db);
584 dbuf_update_data(db);
585 db->db_state = DB_CACHED;
586 mutex_exit(&db->db_mtx);
587 return;
588 }
589
590 /*
591 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
592 * processes the delete record and clears the bp while we are waiting
593 * for the dn_mtx (resulting in a "no" from block_freed).
594 */
595 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
596 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
597 BP_IS_HOLE(db->db_blkptr)))) {
598 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
599
600 dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa,
601 db->db.db_size, db, type));
602 DB_DNODE_EXIT(db);
603 bzero(db->db.db_data, db->db.db_size);
604 db->db_state = DB_CACHED;
605 *flags |= DB_RF_CACHED;
606 mutex_exit(&db->db_mtx);
607 return;
608 }
609
610 spa = dn->dn_objset->os_spa;
611 DB_DNODE_EXIT(db);
612
613 db->db_state = DB_READ;
614 mutex_exit(&db->db_mtx);
615
616 if (DBUF_IS_L2CACHEABLE(db))
617 aflags |= ARC_L2CACHE;
618
619 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
620 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
621 db->db.db_object, db->db_level, db->db_blkid);
622
623 dbuf_add_ref(db, NULL);
624 /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */
625
626 if (db->db_parent)
627 pbuf = db->db_parent->db_buf;
628 else
629 pbuf = db->db_objset->os_phys_buf;
630
631 (void) dsl_read(zio, spa, db->db_blkptr, pbuf,
632 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
633 (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
634 &aflags, &zb);
635 if (aflags & ARC_CACHED)
636 *flags |= DB_RF_CACHED;
637 }
638
639 int
640 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
641 {
642 int err = 0;
643 int havepzio = (zio != NULL);
644 int prefetch;
645 dnode_t *dn;
646
647 /*
648 * We don't have to hold the mutex to check db_state because it
649 * can't be freed while we have a hold on the buffer.
650 */
651 ASSERT(!refcount_is_zero(&db->db_holds));
652
653 if (db->db_state == DB_NOFILL)
654 return (EIO);
655
656 DB_DNODE_ENTER(db);
657 dn = DB_DNODE(db);
658 if ((flags & DB_RF_HAVESTRUCT) == 0)
659 rw_enter(&dn->dn_struct_rwlock, RW_READER);
660
661 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
662 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
663 DBUF_IS_CACHEABLE(db);
664
665 mutex_enter(&db->db_mtx);
666 if (db->db_state == DB_CACHED) {
667 mutex_exit(&db->db_mtx);
668 if (prefetch)
669 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
670 db->db.db_size, TRUE);
671 if ((flags & DB_RF_HAVESTRUCT) == 0)
672 rw_exit(&dn->dn_struct_rwlock);
673 DB_DNODE_EXIT(db);
674 } else if (db->db_state == DB_UNCACHED) {
675 spa_t *spa = dn->dn_objset->os_spa;
676
677 if (zio == NULL)
678 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
679 dbuf_read_impl(db, zio, &flags);
680
681 /* dbuf_read_impl has dropped db_mtx for us */
682
683 if (prefetch)
684 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
685 db->db.db_size, flags & DB_RF_CACHED);
686
687 if ((flags & DB_RF_HAVESTRUCT) == 0)
688 rw_exit(&dn->dn_struct_rwlock);
689 DB_DNODE_EXIT(db);
690
691 if (!havepzio)
692 err = zio_wait(zio);
693 } else {
694 mutex_exit(&db->db_mtx);
695 if (prefetch)
696 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
697 db->db.db_size, TRUE);
698 if ((flags & DB_RF_HAVESTRUCT) == 0)
699 rw_exit(&dn->dn_struct_rwlock);
700 DB_DNODE_EXIT(db);
701
702 mutex_enter(&db->db_mtx);
703 if ((flags & DB_RF_NEVERWAIT) == 0) {
704 while (db->db_state == DB_READ ||
705 db->db_state == DB_FILL) {
706 ASSERT(db->db_state == DB_READ ||
707 (flags & DB_RF_HAVESTRUCT) == 0);
708 cv_wait(&db->db_changed, &db->db_mtx);
709 }
710 if (db->db_state == DB_UNCACHED)
711 err = EIO;
712 }
713 mutex_exit(&db->db_mtx);
714 }
715
716 ASSERT(err || havepzio || db->db_state == DB_CACHED);
717 return (err);
718 }
719
720 static void
721 dbuf_noread(dmu_buf_impl_t *db)
722 {
723 ASSERT(!refcount_is_zero(&db->db_holds));
724 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
725 mutex_enter(&db->db_mtx);
726 while (db->db_state == DB_READ || db->db_state == DB_FILL)
727 cv_wait(&db->db_changed, &db->db_mtx);
728 if (db->db_state == DB_UNCACHED) {
729 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
730 spa_t *spa;
731
732 ASSERT(db->db_buf == NULL);
733 ASSERT(db->db.db_data == NULL);
734 DB_GET_SPA(&spa, db);
735 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
736 db->db_state = DB_FILL;
737 } else if (db->db_state == DB_NOFILL) {
738 dbuf_set_data(db, NULL);
739 } else {
740 ASSERT3U(db->db_state, ==, DB_CACHED);
741 }
742 mutex_exit(&db->db_mtx);
743 }
744
745 /*
746 * This is our just-in-time copy function. It makes a copy of
747 * buffers, that have been modified in a previous transaction
748 * group, before we modify them in the current active group.
749 *
750 * This function is used in two places: when we are dirtying a
751 * buffer for the first time in a txg, and when we are freeing
752 * a range in a dnode that includes this buffer.
753 *
754 * Note that when we are called from dbuf_free_range() we do
755 * not put a hold on the buffer, we just traverse the active
756 * dbuf list for the dnode.
757 */
758 static void
759 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
760 {
761 dbuf_dirty_record_t *dr = db->db_last_dirty;
762
763 ASSERT(MUTEX_HELD(&db->db_mtx));
764 ASSERT(db->db.db_data != NULL);
765 ASSERT(db->db_level == 0);
766 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
767
768 if (dr == NULL ||
769 (dr->dt.dl.dr_data !=
770 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
771 return;
772
773 /*
774 * If the last dirty record for this dbuf has not yet synced
775 * and its referencing the dbuf data, either:
776 * reset the reference to point to a new copy,
777 * or (if there a no active holders)
778 * just null out the current db_data pointer.
779 */
780 ASSERT(dr->dr_txg >= txg - 2);
781 if (db->db_blkid == DMU_BONUS_BLKID) {
782 /* Note that the data bufs here are zio_bufs */
783 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
784 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
785 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
786 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
787 int size = db->db.db_size;
788 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
789 spa_t *spa;
790
791 DB_GET_SPA(&spa, db);
792 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
793 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
794 } else {
795 dbuf_set_data(db, NULL);
796 }
797 }
798
799 void
800 dbuf_unoverride(dbuf_dirty_record_t *dr)
801 {
802 dmu_buf_impl_t *db = dr->dr_dbuf;
803 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
804 uint64_t txg = dr->dr_txg;
805
806 ASSERT(MUTEX_HELD(&db->db_mtx));
807 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
808 ASSERT(db->db_level == 0);
809
810 if (db->db_blkid == DMU_BONUS_BLKID ||
811 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
812 return;
813
814 ASSERT(db->db_data_pending != dr);
815
816 /* free this block */
817 if (!BP_IS_HOLE(bp)) {
818 spa_t *spa;
819
820 DB_GET_SPA(&spa, db);
821 zio_free(spa, txg, bp);
822 }
823 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
824 /*
825 * Release the already-written buffer, so we leave it in
826 * a consistent dirty state. Note that all callers are
827 * modifying the buffer, so they will immediately do
828 * another (redundant) arc_release(). Therefore, leave
829 * the buf thawed to save the effort of freezing &
830 * immediately re-thawing it.
831 */
832 arc_release(dr->dt.dl.dr_data, db);
833 }
834
835 /*
836 * Evict (if its unreferenced) or clear (if its referenced) any level-0
837 * data blocks in the free range, so that any future readers will find
838 * empty blocks. Also, if we happen accross any level-1 dbufs in the
839 * range that have not already been marked dirty, mark them dirty so
840 * they stay in memory.
841 */
842 void
843 dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
844 {
845 dmu_buf_impl_t *db, *db_next;
846 uint64_t txg = tx->tx_txg;
847 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
848 uint64_t first_l1 = start >> epbs;
849 uint64_t last_l1 = end >> epbs;
850
851 if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID)) {
852 end = dn->dn_maxblkid;
853 last_l1 = end >> epbs;
854 }
855 dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
856 mutex_enter(&dn->dn_dbufs_mtx);
857 for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
858 db_next = list_next(&dn->dn_dbufs, db);
859 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
860
861 if (db->db_level == 1 &&
862 db->db_blkid >= first_l1 && db->db_blkid <= last_l1) {
863 mutex_enter(&db->db_mtx);
864 if (db->db_last_dirty &&
865 db->db_last_dirty->dr_txg < txg) {
866 dbuf_add_ref(db, FTAG);
867 mutex_exit(&db->db_mtx);
868 dbuf_will_dirty(db, tx);
869 dbuf_rele(db, FTAG);
870 } else {
871 mutex_exit(&db->db_mtx);
872 }
873 }
874
875 if (db->db_level != 0)
876 continue;
877 dprintf_dbuf(db, "found buf %s\n", "");
878 if (db->db_blkid < start || db->db_blkid > end)
879 continue;
880
881 /* found a level 0 buffer in the range */
882 if (dbuf_undirty(db, tx))
883 continue;
884
885 mutex_enter(&db->db_mtx);
886 if (db->db_state == DB_UNCACHED ||
887 db->db_state == DB_NOFILL ||
888 db->db_state == DB_EVICTING) {
889 ASSERT(db->db.db_data == NULL);
890 mutex_exit(&db->db_mtx);
891 continue;
892 }
893 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
894 /* will be handled in dbuf_read_done or dbuf_rele */
895 db->db_freed_in_flight = TRUE;
896 mutex_exit(&db->db_mtx);
897 continue;
898 }
899 if (refcount_count(&db->db_holds) == 0) {
900 ASSERT(db->db_buf);
901 dbuf_clear(db);
902 continue;
903 }
904 /* The dbuf is referenced */
905
906 if (db->db_last_dirty != NULL) {
907 dbuf_dirty_record_t *dr = db->db_last_dirty;
908
909 if (dr->dr_txg == txg) {
910 /*
911 * This buffer is "in-use", re-adjust the file
912 * size to reflect that this buffer may
913 * contain new data when we sync.
914 */
915 if (db->db_blkid != DMU_SPILL_BLKID &&
916 db->db_blkid > dn->dn_maxblkid)
917 dn->dn_maxblkid = db->db_blkid;
918 dbuf_unoverride(dr);
919 } else {
920 /*
921 * This dbuf is not dirty in the open context.
922 * Either uncache it (if its not referenced in
923 * the open context) or reset its contents to
924 * empty.
925 */
926 dbuf_fix_old_data(db, txg);
927 }
928 }
929 /* clear the contents if its cached */
930 if (db->db_state == DB_CACHED) {
931 ASSERT(db->db.db_data != NULL);
932 arc_release(db->db_buf, db);
933 bzero(db->db.db_data, db->db.db_size);
934 arc_buf_freeze(db->db_buf);
935 }
936
937 mutex_exit(&db->db_mtx);
938 }
939 mutex_exit(&dn->dn_dbufs_mtx);
940 }
941
942 static int
943 dbuf_block_freeable(dmu_buf_impl_t *db)
944 {
945 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
946 uint64_t birth_txg = 0;
947
948 /*
949 * We don't need any locking to protect db_blkptr:
950 * If it's syncing, then db_last_dirty will be set
951 * so we'll ignore db_blkptr.
952 */
953 ASSERT(MUTEX_HELD(&db->db_mtx));
954 if (db->db_last_dirty)
955 birth_txg = db->db_last_dirty->dr_txg;
956 else if (db->db_blkptr)
957 birth_txg = db->db_blkptr->blk_birth;
958
959 /*
960 * If we don't exist or are in a snapshot, we can't be freed.
961 * Don't pass the bp to dsl_dataset_block_freeable() since we
962 * are holding the db_mtx lock and might deadlock if we are
963 * prefetching a dedup-ed block.
964 */
965 if (birth_txg)
966 return (ds == NULL ||
967 dsl_dataset_block_freeable(ds, NULL, birth_txg));
968 else
969 return (FALSE);
970 }
971
972 void
973 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
974 {
975 arc_buf_t *buf, *obuf;
976 int osize = db->db.db_size;
977 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
978 dnode_t *dn;
979
980 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
981
982 DB_DNODE_ENTER(db);
983 dn = DB_DNODE(db);
984
985 /* XXX does *this* func really need the lock? */
986 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
987
988 /*
989 * This call to dbuf_will_dirty() with the dn_struct_rwlock held
990 * is OK, because there can be no other references to the db
991 * when we are changing its size, so no concurrent DB_FILL can
992 * be happening.
993 */
994 /*
995 * XXX we should be doing a dbuf_read, checking the return
996 * value and returning that up to our callers
997 */
998 dbuf_will_dirty(db, tx);
999
1000 /* create the data buffer for the new block */
1001 buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
1002
1003 /* copy old block data to the new block */
1004 obuf = db->db_buf;
1005 bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
1006 /* zero the remainder */
1007 if (size > osize)
1008 bzero((uint8_t *)buf->b_data + osize, size - osize);
1009
1010 mutex_enter(&db->db_mtx);
1011 dbuf_set_data(db, buf);
1012 VERIFY(arc_buf_remove_ref(obuf, db) == 1);
1013 db->db.db_size = size;
1014
1015 if (db->db_level == 0) {
1016 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1017 db->db_last_dirty->dt.dl.dr_data = buf;
1018 }
1019 mutex_exit(&db->db_mtx);
1020
1021 dnode_willuse_space(dn, size-osize, tx);
1022 DB_DNODE_EXIT(db);
1023 }
1024
1025 void
1026 dbuf_release_bp(dmu_buf_impl_t *db)
1027 {
1028 objset_t *os;
1029 zbookmark_t zb;
1030
1031 DB_GET_OBJSET(&os, db);
1032 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1033 ASSERT(arc_released(os->os_phys_buf) ||
1034 list_link_active(&os->os_dsl_dataset->ds_synced_link));
1035 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1036
1037 zb.zb_objset = os->os_dsl_dataset ?
1038 os->os_dsl_dataset->ds_object : 0;
1039 zb.zb_object = db->db.db_object;
1040 zb.zb_level = db->db_level;
1041 zb.zb_blkid = db->db_blkid;
1042 (void) arc_release_bp(db->db_buf, db,
1043 db->db_blkptr, os->os_spa, &zb);
1044 }
1045
1046 dbuf_dirty_record_t *
1047 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1048 {
1049 dnode_t *dn;
1050 objset_t *os;
1051 dbuf_dirty_record_t **drp, *dr;
1052 int drop_struct_lock = FALSE;
1053 boolean_t do_free_accounting = B_FALSE;
1054 int txgoff = tx->tx_txg & TXG_MASK;
1055
1056 ASSERT(tx->tx_txg != 0);
1057 ASSERT(!refcount_is_zero(&db->db_holds));
1058 DMU_TX_DIRTY_BUF(tx, db);
1059
1060 DB_DNODE_ENTER(db);
1061 dn = DB_DNODE(db);
1062 /*
1063 * Shouldn't dirty a regular buffer in syncing context. Private
1064 * objects may be dirtied in syncing context, but only if they
1065 * were already pre-dirtied in open context.
1066 */
1067 ASSERT(!dmu_tx_is_syncing(tx) ||
1068 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1069 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1070 dn->dn_objset->os_dsl_dataset == NULL);
1071 /*
1072 * We make this assert for private objects as well, but after we
1073 * check if we're already dirty. They are allowed to re-dirty
1074 * in syncing context.
1075 */
1076 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1077 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1078 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1079
1080 mutex_enter(&db->db_mtx);
1081 /*
1082 * XXX make this true for indirects too? The problem is that
1083 * transactions created with dmu_tx_create_assigned() from
1084 * syncing context don't bother holding ahead.
1085 */
1086 ASSERT(db->db_level != 0 ||
1087 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1088 db->db_state == DB_NOFILL);
1089
1090 mutex_enter(&dn->dn_mtx);
1091 /*
1092 * Don't set dirtyctx to SYNC if we're just modifying this as we
1093 * initialize the objset.
1094 */
1095 if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1096 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1097 dn->dn_dirtyctx =
1098 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1099 ASSERT(dn->dn_dirtyctx_firstset == NULL);
1100 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_PUSHPAGE);
1101 }
1102 mutex_exit(&dn->dn_mtx);
1103
1104 if (db->db_blkid == DMU_SPILL_BLKID)
1105 dn->dn_have_spill = B_TRUE;
1106
1107 /*
1108 * If this buffer is already dirty, we're done.
1109 */
1110 drp = &db->db_last_dirty;
1111 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1112 db->db.db_object == DMU_META_DNODE_OBJECT);
1113 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1114 drp = &dr->dr_next;
1115 if (dr && dr->dr_txg == tx->tx_txg) {
1116 DB_DNODE_EXIT(db);
1117
1118 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1119 /*
1120 * If this buffer has already been written out,
1121 * we now need to reset its state.
1122 */
1123 dbuf_unoverride(dr);
1124 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1125 db->db_state != DB_NOFILL)
1126 arc_buf_thaw(db->db_buf);
1127 }
1128 mutex_exit(&db->db_mtx);
1129 return (dr);
1130 }
1131
1132 /*
1133 * Only valid if not already dirty.
1134 */
1135 ASSERT(dn->dn_object == 0 ||
1136 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1137 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1138
1139 ASSERT3U(dn->dn_nlevels, >, db->db_level);
1140 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1141 dn->dn_phys->dn_nlevels > db->db_level ||
1142 dn->dn_next_nlevels[txgoff] > db->db_level ||
1143 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1144 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1145
1146 /*
1147 * We should only be dirtying in syncing context if it's the
1148 * mos or we're initializing the os or it's a special object.
1149 * However, we are allowed to dirty in syncing context provided
1150 * we already dirtied it in open context. Hence we must make
1151 * this assertion only if we're not already dirty.
1152 */
1153 os = dn->dn_objset;
1154 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1155 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1156 ASSERT(db->db.db_size != 0);
1157
1158 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1159
1160 if (db->db_blkid != DMU_BONUS_BLKID) {
1161 /*
1162 * Update the accounting.
1163 * Note: we delay "free accounting" until after we drop
1164 * the db_mtx. This keeps us from grabbing other locks
1165 * (and possibly deadlocking) in bp_get_dsize() while
1166 * also holding the db_mtx.
1167 */
1168 dnode_willuse_space(dn, db->db.db_size, tx);
1169 do_free_accounting = dbuf_block_freeable(db);
1170 }
1171
1172 /*
1173 * If this buffer is dirty in an old transaction group we need
1174 * to make a copy of it so that the changes we make in this
1175 * transaction group won't leak out when we sync the older txg.
1176 */
1177 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_PUSHPAGE);
1178 list_link_init(&dr->dr_dirty_node);
1179 if (db->db_level == 0) {
1180 void *data_old = db->db_buf;
1181
1182 if (db->db_state != DB_NOFILL) {
1183 if (db->db_blkid == DMU_BONUS_BLKID) {
1184 dbuf_fix_old_data(db, tx->tx_txg);
1185 data_old = db->db.db_data;
1186 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1187 /*
1188 * Release the data buffer from the cache so
1189 * that we can modify it without impacting
1190 * possible other users of this cached data
1191 * block. Note that indirect blocks and
1192 * private objects are not released until the
1193 * syncing state (since they are only modified
1194 * then).
1195 */
1196 arc_release(db->db_buf, db);
1197 dbuf_fix_old_data(db, tx->tx_txg);
1198 data_old = db->db_buf;
1199 }
1200 ASSERT(data_old != NULL);
1201 }
1202 dr->dt.dl.dr_data = data_old;
1203 } else {
1204 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1205 list_create(&dr->dt.di.dr_children,
1206 sizeof (dbuf_dirty_record_t),
1207 offsetof(dbuf_dirty_record_t, dr_dirty_node));
1208 }
1209 dr->dr_dbuf = db;
1210 dr->dr_txg = tx->tx_txg;
1211 dr->dr_next = *drp;
1212 *drp = dr;
1213
1214 /*
1215 * We could have been freed_in_flight between the dbuf_noread
1216 * and dbuf_dirty. We win, as though the dbuf_noread() had
1217 * happened after the free.
1218 */
1219 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1220 db->db_blkid != DMU_SPILL_BLKID) {
1221 mutex_enter(&dn->dn_mtx);
1222 dnode_clear_range(dn, db->db_blkid, 1, tx);
1223 mutex_exit(&dn->dn_mtx);
1224 db->db_freed_in_flight = FALSE;
1225 }
1226
1227 /*
1228 * This buffer is now part of this txg
1229 */
1230 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1231 db->db_dirtycnt += 1;
1232 ASSERT3U(db->db_dirtycnt, <=, 3);
1233
1234 mutex_exit(&db->db_mtx);
1235
1236 if (db->db_blkid == DMU_BONUS_BLKID ||
1237 db->db_blkid == DMU_SPILL_BLKID) {
1238 mutex_enter(&dn->dn_mtx);
1239 ASSERT(!list_link_active(&dr->dr_dirty_node));
1240 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1241 mutex_exit(&dn->dn_mtx);
1242 dnode_setdirty(dn, tx);
1243 DB_DNODE_EXIT(db);
1244 return (dr);
1245 } else if (do_free_accounting) {
1246 blkptr_t *bp = db->db_blkptr;
1247 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1248 bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1249 /*
1250 * This is only a guess -- if the dbuf is dirty
1251 * in a previous txg, we don't know how much
1252 * space it will use on disk yet. We should
1253 * really have the struct_rwlock to access
1254 * db_blkptr, but since this is just a guess,
1255 * it's OK if we get an odd answer.
1256 */
1257 ddt_prefetch(os->os_spa, bp);
1258 dnode_willuse_space(dn, -willfree, tx);
1259 }
1260
1261 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1262 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1263 drop_struct_lock = TRUE;
1264 }
1265
1266 if (db->db_level == 0) {
1267 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1268 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1269 }
1270
1271 if (db->db_level+1 < dn->dn_nlevels) {
1272 dmu_buf_impl_t *parent = db->db_parent;
1273 dbuf_dirty_record_t *di;
1274 int parent_held = FALSE;
1275
1276 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1277 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1278
1279 parent = dbuf_hold_level(dn, db->db_level+1,
1280 db->db_blkid >> epbs, FTAG);
1281 ASSERT(parent != NULL);
1282 parent_held = TRUE;
1283 }
1284 if (drop_struct_lock)
1285 rw_exit(&dn->dn_struct_rwlock);
1286 ASSERT3U(db->db_level+1, ==, parent->db_level);
1287 di = dbuf_dirty(parent, tx);
1288 if (parent_held)
1289 dbuf_rele(parent, FTAG);
1290
1291 mutex_enter(&db->db_mtx);
1292 /* possible race with dbuf_undirty() */
1293 if (db->db_last_dirty == dr ||
1294 dn->dn_object == DMU_META_DNODE_OBJECT) {
1295 mutex_enter(&di->dt.di.dr_mtx);
1296 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1297 ASSERT(!list_link_active(&dr->dr_dirty_node));
1298 list_insert_tail(&di->dt.di.dr_children, dr);
1299 mutex_exit(&di->dt.di.dr_mtx);
1300 dr->dr_parent = di;
1301 }
1302 mutex_exit(&db->db_mtx);
1303 } else {
1304 ASSERT(db->db_level+1 == dn->dn_nlevels);
1305 ASSERT(db->db_blkid < dn->dn_nblkptr);
1306 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1307 mutex_enter(&dn->dn_mtx);
1308 ASSERT(!list_link_active(&dr->dr_dirty_node));
1309 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1310 mutex_exit(&dn->dn_mtx);
1311 if (drop_struct_lock)
1312 rw_exit(&dn->dn_struct_rwlock);
1313 }
1314
1315 dnode_setdirty(dn, tx);
1316 DB_DNODE_EXIT(db);
1317 return (dr);
1318 }
1319
1320 static int
1321 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1322 {
1323 dnode_t *dn;
1324 uint64_t txg = tx->tx_txg;
1325 dbuf_dirty_record_t *dr, **drp;
1326
1327 ASSERT(txg != 0);
1328 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1329
1330 mutex_enter(&db->db_mtx);
1331 /*
1332 * If this buffer is not dirty, we're done.
1333 */
1334 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1335 if (dr->dr_txg <= txg)
1336 break;
1337 if (dr == NULL || dr->dr_txg < txg) {
1338 mutex_exit(&db->db_mtx);
1339 return (0);
1340 }
1341 ASSERT(dr->dr_txg == txg);
1342 ASSERT(dr->dr_dbuf == db);
1343
1344 DB_DNODE_ENTER(db);
1345 dn = DB_DNODE(db);
1346
1347 /*
1348 * If this buffer is currently held, we cannot undirty
1349 * it, since one of the current holders may be in the
1350 * middle of an update. Note that users of dbuf_undirty()
1351 * should not place a hold on the dbuf before the call.
1352 * Also note: we can get here with a spill block, so
1353 * test for that similar to how dbuf_dirty does.
1354 */
1355 if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
1356 mutex_exit(&db->db_mtx);
1357 /* Make sure we don't toss this buffer at sync phase */
1358 if (db->db_blkid != DMU_SPILL_BLKID) {
1359 mutex_enter(&dn->dn_mtx);
1360 dnode_clear_range(dn, db->db_blkid, 1, tx);
1361 mutex_exit(&dn->dn_mtx);
1362 }
1363 DB_DNODE_EXIT(db);
1364 return (0);
1365 }
1366
1367 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1368
1369 ASSERT(db->db.db_size != 0);
1370
1371 /* XXX would be nice to fix up dn_towrite_space[] */
1372
1373 *drp = dr->dr_next;
1374
1375 /*
1376 * Note that there are three places in dbuf_dirty()
1377 * where this dirty record may be put on a list.
1378 * Make sure to do a list_remove corresponding to
1379 * every one of those list_insert calls.
1380 */
1381 if (dr->dr_parent) {
1382 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1383 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1384 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1385 } else if (db->db_blkid == DMU_SPILL_BLKID ||
1386 db->db_level+1 == dn->dn_nlevels) {
1387 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1388 mutex_enter(&dn->dn_mtx);
1389 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1390 mutex_exit(&dn->dn_mtx);
1391 }
1392 DB_DNODE_EXIT(db);
1393
1394 if (db->db_level == 0) {
1395 if (db->db_state != DB_NOFILL) {
1396 dbuf_unoverride(dr);
1397
1398 ASSERT(db->db_buf != NULL);
1399 ASSERT(dr->dt.dl.dr_data != NULL);
1400 if (dr->dt.dl.dr_data != db->db_buf)
1401 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
1402 db) == 1);
1403 }
1404 } else {
1405 ASSERT(db->db_buf != NULL);
1406 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
1407 mutex_destroy(&dr->dt.di.dr_mtx);
1408 list_destroy(&dr->dt.di.dr_children);
1409 }
1410 kmem_free(dr, sizeof (dbuf_dirty_record_t));
1411
1412 ASSERT(db->db_dirtycnt > 0);
1413 db->db_dirtycnt -= 1;
1414
1415 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1416 arc_buf_t *buf = db->db_buf;
1417
1418 ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1419 dbuf_set_data(db, NULL);
1420 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1421 dbuf_evict(db);
1422 return (1);
1423 }
1424
1425 mutex_exit(&db->db_mtx);
1426 return (0);
1427 }
1428
1429 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty
1430 void
1431 dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1432 {
1433 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1434
1435 ASSERT(tx->tx_txg != 0);
1436 ASSERT(!refcount_is_zero(&db->db_holds));
1437
1438 DB_DNODE_ENTER(db);
1439 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1440 rf |= DB_RF_HAVESTRUCT;
1441 DB_DNODE_EXIT(db);
1442 (void) dbuf_read(db, NULL, rf);
1443 (void) dbuf_dirty(db, tx);
1444 }
1445
1446 void
1447 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1448 {
1449 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1450
1451 db->db_state = DB_NOFILL;
1452
1453 dmu_buf_will_fill(db_fake, tx);
1454 }
1455
1456 void
1457 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1458 {
1459 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1460
1461 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1462 ASSERT(tx->tx_txg != 0);
1463 ASSERT(db->db_level == 0);
1464 ASSERT(!refcount_is_zero(&db->db_holds));
1465
1466 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1467 dmu_tx_private_ok(tx));
1468
1469 dbuf_noread(db);
1470 (void) dbuf_dirty(db, tx);
1471 }
1472
1473 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1474 /* ARGSUSED */
1475 void
1476 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1477 {
1478 mutex_enter(&db->db_mtx);
1479 DBUF_VERIFY(db);
1480
1481 if (db->db_state == DB_FILL) {
1482 if (db->db_level == 0 && db->db_freed_in_flight) {
1483 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1484 /* we were freed while filling */
1485 /* XXX dbuf_undirty? */
1486 bzero(db->db.db_data, db->db.db_size);
1487 db->db_freed_in_flight = FALSE;
1488 }
1489 db->db_state = DB_CACHED;
1490 cv_broadcast(&db->db_changed);
1491 }
1492 mutex_exit(&db->db_mtx);
1493 }
1494
1495 /*
1496 * Directly assign a provided arc buf to a given dbuf if it's not referenced
1497 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1498 */
1499 void
1500 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1501 {
1502 ASSERT(!refcount_is_zero(&db->db_holds));
1503 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1504 ASSERT(db->db_level == 0);
1505 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1506 ASSERT(buf != NULL);
1507 ASSERT(arc_buf_size(buf) == db->db.db_size);
1508 ASSERT(tx->tx_txg != 0);
1509
1510 arc_return_buf(buf, db);
1511 ASSERT(arc_released(buf));
1512
1513 mutex_enter(&db->db_mtx);
1514
1515 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1516 cv_wait(&db->db_changed, &db->db_mtx);
1517
1518 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1519
1520 if (db->db_state == DB_CACHED &&
1521 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1522 mutex_exit(&db->db_mtx);
1523 (void) dbuf_dirty(db, tx);
1524 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1525 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1526 xuio_stat_wbuf_copied();
1527 return;
1528 }
1529
1530 xuio_stat_wbuf_nocopy();
1531 if (db->db_state == DB_CACHED) {
1532 dbuf_dirty_record_t *dr = db->db_last_dirty;
1533
1534 ASSERT(db->db_buf != NULL);
1535 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1536 ASSERT(dr->dt.dl.dr_data == db->db_buf);
1537 if (!arc_released(db->db_buf)) {
1538 ASSERT(dr->dt.dl.dr_override_state ==
1539 DR_OVERRIDDEN);
1540 arc_release(db->db_buf, db);
1541 }
1542 dr->dt.dl.dr_data = buf;
1543 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
1544 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1545 arc_release(db->db_buf, db);
1546 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
1547 }
1548 db->db_buf = NULL;
1549 }
1550 ASSERT(db->db_buf == NULL);
1551 dbuf_set_data(db, buf);
1552 db->db_state = DB_FILL;
1553 mutex_exit(&db->db_mtx);
1554 (void) dbuf_dirty(db, tx);
1555 dbuf_fill_done(db, tx);
1556 }
1557
1558 /*
1559 * "Clear" the contents of this dbuf. This will mark the dbuf
1560 * EVICTING and clear *most* of its references. Unfortunetely,
1561 * when we are not holding the dn_dbufs_mtx, we can't clear the
1562 * entry in the dn_dbufs list. We have to wait until dbuf_destroy()
1563 * in this case. For callers from the DMU we will usually see:
1564 * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1565 * For the arc callback, we will usually see:
1566 * dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1567 * Sometimes, though, we will get a mix of these two:
1568 * DMU: dbuf_clear()->arc_buf_evict()
1569 * ARC: dbuf_do_evict()->dbuf_destroy()
1570 */
1571 void
1572 dbuf_clear(dmu_buf_impl_t *db)
1573 {
1574 dnode_t *dn;
1575 dmu_buf_impl_t *parent = db->db_parent;
1576 dmu_buf_impl_t *dndb;
1577 int dbuf_gone = FALSE;
1578
1579 ASSERT(MUTEX_HELD(&db->db_mtx));
1580 ASSERT(refcount_is_zero(&db->db_holds));
1581
1582 dbuf_evict_user(db);
1583
1584 if (db->db_state == DB_CACHED) {
1585 ASSERT(db->db.db_data != NULL);
1586 if (db->db_blkid == DMU_BONUS_BLKID) {
1587 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1588 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1589 }
1590 db->db.db_data = NULL;
1591 db->db_state = DB_UNCACHED;
1592 }
1593
1594 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1595 ASSERT(db->db_data_pending == NULL);
1596
1597 db->db_state = DB_EVICTING;
1598 db->db_blkptr = NULL;
1599
1600 DB_DNODE_ENTER(db);
1601 dn = DB_DNODE(db);
1602 dndb = dn->dn_dbuf;
1603 if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1604 list_remove(&dn->dn_dbufs, db);
1605 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1606 membar_producer();
1607 DB_DNODE_EXIT(db);
1608 /*
1609 * Decrementing the dbuf count means that the hold corresponding
1610 * to the removed dbuf is no longer discounted in dnode_move(),
1611 * so the dnode cannot be moved until after we release the hold.
1612 * The membar_producer() ensures visibility of the decremented
1613 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1614 * release any lock.
1615 */
1616 dnode_rele(dn, db);
1617 db->db_dnode_handle = NULL;
1618 } else {
1619 DB_DNODE_EXIT(db);
1620 }
1621
1622 if (db->db_buf)
1623 dbuf_gone = arc_buf_evict(db->db_buf);
1624
1625 if (!dbuf_gone)
1626 mutex_exit(&db->db_mtx);
1627
1628 /*
1629 * If this dbuf is referenced from an indirect dbuf,
1630 * decrement the ref count on the indirect dbuf.
1631 */
1632 if (parent && parent != dndb)
1633 dbuf_rele(parent, db);
1634 }
1635
1636 __attribute__((always_inline))
1637 static inline int
1638 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1639 dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh)
1640 {
1641 int nlevels, epbs;
1642
1643 *parentp = NULL;
1644 *bpp = NULL;
1645
1646 ASSERT(blkid != DMU_BONUS_BLKID);
1647
1648 if (blkid == DMU_SPILL_BLKID) {
1649 mutex_enter(&dn->dn_mtx);
1650 if (dn->dn_have_spill &&
1651 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1652 *bpp = &dn->dn_phys->dn_spill;
1653 else
1654 *bpp = NULL;
1655 dbuf_add_ref(dn->dn_dbuf, NULL);
1656 *parentp = dn->dn_dbuf;
1657 mutex_exit(&dn->dn_mtx);
1658 return (0);
1659 }
1660
1661 if (dn->dn_phys->dn_nlevels == 0)
1662 nlevels = 1;
1663 else
1664 nlevels = dn->dn_phys->dn_nlevels;
1665
1666 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1667
1668 ASSERT3U(level * epbs, <, 64);
1669 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1670 if (level >= nlevels ||
1671 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1672 /* the buffer has no parent yet */
1673 return (ENOENT);
1674 } else if (level < nlevels-1) {
1675 /* this block is referenced from an indirect block */
1676 int err;
1677 if (dh == NULL) {
1678 err = dbuf_hold_impl(dn, level+1, blkid >> epbs,
1679 fail_sparse, NULL, parentp);
1680 }
1681 else {
1682 __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1,
1683 blkid >> epbs, fail_sparse, NULL,
1684 parentp, dh->dh_depth + 1);
1685 err = __dbuf_hold_impl(dh + 1);
1686 }
1687 if (err)
1688 return (err);
1689 err = dbuf_read(*parentp, NULL,
1690 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1691 if (err) {
1692 dbuf_rele(*parentp, NULL);
1693 *parentp = NULL;
1694 return (err);
1695 }
1696 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1697 (blkid & ((1ULL << epbs) - 1));
1698 return (0);
1699 } else {
1700 /* the block is referenced from the dnode */
1701 ASSERT3U(level, ==, nlevels-1);
1702 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1703 blkid < dn->dn_phys->dn_nblkptr);
1704 if (dn->dn_dbuf) {
1705 dbuf_add_ref(dn->dn_dbuf, NULL);
1706 *parentp = dn->dn_dbuf;
1707 }
1708 *bpp = &dn->dn_phys->dn_blkptr[blkid];
1709 return (0);
1710 }
1711 }
1712
1713 static dmu_buf_impl_t *
1714 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1715 dmu_buf_impl_t *parent, blkptr_t *blkptr)
1716 {
1717 objset_t *os = dn->dn_objset;
1718 dmu_buf_impl_t *db, *odb;
1719
1720 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1721 ASSERT(dn->dn_type != DMU_OT_NONE);
1722
1723 db = kmem_cache_alloc(dbuf_cache, KM_PUSHPAGE);
1724
1725 db->db_objset = os;
1726 db->db.db_object = dn->dn_object;
1727 db->db_level = level;
1728 db->db_blkid = blkid;
1729 db->db_last_dirty = NULL;
1730 db->db_dirtycnt = 0;
1731 db->db_dnode_handle = dn->dn_handle;
1732 db->db_parent = parent;
1733 db->db_blkptr = blkptr;
1734
1735 db->db_user_ptr = NULL;
1736 db->db_user_data_ptr_ptr = NULL;
1737 db->db_evict_func = NULL;
1738 db->db_immediate_evict = 0;
1739 db->db_freed_in_flight = 0;
1740
1741 if (blkid == DMU_BONUS_BLKID) {
1742 ASSERT3P(parent, ==, dn->dn_dbuf);
1743 db->db.db_size = DN_MAX_BONUSLEN -
1744 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1745 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1746 db->db.db_offset = DMU_BONUS_BLKID;
1747 db->db_state = DB_UNCACHED;
1748 /* the bonus dbuf is not placed in the hash table */
1749 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1750 return (db);
1751 } else if (blkid == DMU_SPILL_BLKID) {
1752 db->db.db_size = (blkptr != NULL) ?
1753 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1754 db->db.db_offset = 0;
1755 } else {
1756 int blocksize =
1757 db->db_level ? 1<<dn->dn_indblkshift : dn->dn_datablksz;
1758 db->db.db_size = blocksize;
1759 db->db.db_offset = db->db_blkid * blocksize;
1760 }
1761
1762 /*
1763 * Hold the dn_dbufs_mtx while we get the new dbuf
1764 * in the hash table *and* added to the dbufs list.
1765 * This prevents a possible deadlock with someone
1766 * trying to look up this dbuf before its added to the
1767 * dn_dbufs list.
1768 */
1769 mutex_enter(&dn->dn_dbufs_mtx);
1770 db->db_state = DB_EVICTING;
1771 if ((odb = dbuf_hash_insert(db)) != NULL) {
1772 /* someone else inserted it first */
1773 kmem_cache_free(dbuf_cache, db);
1774 mutex_exit(&dn->dn_dbufs_mtx);
1775 return (odb);
1776 }
1777 list_insert_head(&dn->dn_dbufs, db);
1778 db->db_state = DB_UNCACHED;
1779 mutex_exit(&dn->dn_dbufs_mtx);
1780 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1781
1782 if (parent && parent != dn->dn_dbuf)
1783 dbuf_add_ref(parent, db);
1784
1785 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1786 refcount_count(&dn->dn_holds) > 0);
1787 (void) refcount_add(&dn->dn_holds, db);
1788 (void) atomic_inc_32_nv(&dn->dn_dbufs_count);
1789
1790 dprintf_dbuf(db, "db=%p\n", db);
1791
1792 return (db);
1793 }
1794
1795 static int
1796 dbuf_do_evict(void *private)
1797 {
1798 arc_buf_t *buf = private;
1799 dmu_buf_impl_t *db = buf->b_private;
1800
1801 if (!MUTEX_HELD(&db->db_mtx))
1802 mutex_enter(&db->db_mtx);
1803
1804 ASSERT(refcount_is_zero(&db->db_holds));
1805
1806 if (db->db_state != DB_EVICTING) {
1807 ASSERT(db->db_state == DB_CACHED);
1808 DBUF_VERIFY(db);
1809 db->db_buf = NULL;
1810 dbuf_evict(db);
1811 } else {
1812 mutex_exit(&db->db_mtx);
1813 dbuf_destroy(db);
1814 }
1815 return (0);
1816 }
1817
1818 static void
1819 dbuf_destroy(dmu_buf_impl_t *db)
1820 {
1821 ASSERT(refcount_is_zero(&db->db_holds));
1822
1823 if (db->db_blkid != DMU_BONUS_BLKID) {
1824 /*
1825 * If this dbuf is still on the dn_dbufs list,
1826 * remove it from that list.
1827 */
1828 if (db->db_dnode_handle != NULL) {
1829 dnode_t *dn;
1830
1831 DB_DNODE_ENTER(db);
1832 dn = DB_DNODE(db);
1833 mutex_enter(&dn->dn_dbufs_mtx);
1834 list_remove(&dn->dn_dbufs, db);
1835 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1836 mutex_exit(&dn->dn_dbufs_mtx);
1837 DB_DNODE_EXIT(db);
1838 /*
1839 * Decrementing the dbuf count means that the hold
1840 * corresponding to the removed dbuf is no longer
1841 * discounted in dnode_move(), so the dnode cannot be
1842 * moved until after we release the hold.
1843 */
1844 dnode_rele(dn, db);
1845 db->db_dnode_handle = NULL;
1846 }
1847 dbuf_hash_remove(db);
1848 }
1849 db->db_parent = NULL;
1850 db->db_buf = NULL;
1851
1852 ASSERT(!list_link_active(&db->db_link));
1853 ASSERT(db->db.db_data == NULL);
1854 ASSERT(db->db_hash_next == NULL);
1855 ASSERT(db->db_blkptr == NULL);
1856 ASSERT(db->db_data_pending == NULL);
1857
1858 kmem_cache_free(dbuf_cache, db);
1859 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1860 }
1861
1862 void
1863 dbuf_prefetch(dnode_t *dn, uint64_t blkid)
1864 {
1865 dmu_buf_impl_t *db = NULL;
1866 blkptr_t *bp = NULL;
1867
1868 ASSERT(blkid != DMU_BONUS_BLKID);
1869 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1870
1871 if (dnode_block_freed(dn, blkid))
1872 return;
1873
1874 /* dbuf_find() returns with db_mtx held */
1875 if ((db = dbuf_find(dn, 0, blkid))) {
1876 /*
1877 * This dbuf is already in the cache. We assume that
1878 * it is already CACHED, or else about to be either
1879 * read or filled.
1880 */
1881 mutex_exit(&db->db_mtx);
1882 return;
1883 }
1884
1885 if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp, NULL) == 0) {
1886 if (bp && !BP_IS_HOLE(bp)) {
1887 int priority = dn->dn_type == DMU_OT_DDT_ZAP ?
1888 ZIO_PRIORITY_DDT_PREFETCH : ZIO_PRIORITY_ASYNC_READ;
1889 arc_buf_t *pbuf;
1890 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
1891 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1892 zbookmark_t zb;
1893
1894 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1895 dn->dn_object, 0, blkid);
1896
1897 if (db)
1898 pbuf = db->db_buf;
1899 else
1900 pbuf = dn->dn_objset->os_phys_buf;
1901
1902 (void) dsl_read(NULL, dn->dn_objset->os_spa,
1903 bp, pbuf, NULL, NULL, priority,
1904 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1905 &aflags, &zb);
1906 }
1907 if (db)
1908 dbuf_rele(db, NULL);
1909 }
1910 }
1911
1912 #define DBUF_HOLD_IMPL_MAX_DEPTH 20
1913
1914 /*
1915 * Returns with db_holds incremented, and db_mtx not held.
1916 * Note: dn_struct_rwlock must be held.
1917 */
1918 static int
1919 __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
1920 {
1921 ASSERT3S(dh->dh_depth, <, DBUF_HOLD_IMPL_MAX_DEPTH);
1922 dh->dh_parent = NULL;
1923
1924 ASSERT(dh->dh_blkid != DMU_BONUS_BLKID);
1925 ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock));
1926 ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level);
1927
1928 *(dh->dh_dbp) = NULL;
1929 top:
1930 /* dbuf_find() returns with db_mtx held */
1931 dh->dh_db = dbuf_find(dh->dh_dn, dh->dh_level, dh->dh_blkid);
1932
1933 if (dh->dh_db == NULL) {
1934 dh->dh_bp = NULL;
1935
1936 ASSERT3P(dh->dh_parent, ==, NULL);
1937 dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
1938 dh->dh_fail_sparse, &dh->dh_parent,
1939 &dh->dh_bp, dh);
1940 if (dh->dh_fail_sparse) {
1941 if (dh->dh_err == 0 && dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
1942 dh->dh_err = ENOENT;
1943 if (dh->dh_err) {
1944 if (dh->dh_parent)
1945 dbuf_rele(dh->dh_parent, NULL);
1946 return (dh->dh_err);
1947 }
1948 }
1949 if (dh->dh_err && dh->dh_err != ENOENT)
1950 return (dh->dh_err);
1951 dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid,
1952 dh->dh_parent, dh->dh_bp);
1953 }
1954
1955 if (dh->dh_db->db_buf && refcount_is_zero(&dh->dh_db->db_holds)) {
1956 arc_buf_add_ref(dh->dh_db->db_buf, dh->dh_db);
1957 if (dh->dh_db->db_buf->b_data == NULL) {
1958 dbuf_clear(dh->dh_db);
1959 if (dh->dh_parent) {
1960 dbuf_rele(dh->dh_parent, NULL);
1961 dh->dh_parent = NULL;
1962 }
1963 goto top;
1964 }
1965 ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data);
1966 }
1967
1968 ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf));
1969
1970 /*
1971 * If this buffer is currently syncing out, and we are are
1972 * still referencing it from db_data, we need to make a copy
1973 * of it in case we decide we want to dirty it again in this txg.
1974 */
1975 if (dh->dh_db->db_level == 0 &&
1976 dh->dh_db->db_blkid != DMU_BONUS_BLKID &&
1977 dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT &&
1978 dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) {
1979 dh->dh_dr = dh->dh_db->db_data_pending;
1980
1981 if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf) {
1982 dh->dh_type = DBUF_GET_BUFC_TYPE(dh->dh_db);
1983
1984 dbuf_set_data(dh->dh_db,
1985 arc_buf_alloc(dh->dh_dn->dn_objset->os_spa,
1986 dh->dh_db->db.db_size, dh->dh_db, dh->dh_type));
1987 bcopy(dh->dh_dr->dt.dl.dr_data->b_data,
1988 dh->dh_db->db.db_data, dh->dh_db->db.db_size);
1989 }
1990 }
1991
1992 (void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
1993 dbuf_update_data(dh->dh_db);
1994 DBUF_VERIFY(dh->dh_db);
1995 mutex_exit(&dh->dh_db->db_mtx);
1996
1997 /* NOTE: we can't rele the parent until after we drop the db_mtx */
1998 if (dh->dh_parent)
1999 dbuf_rele(dh->dh_parent, NULL);
2000
2001 ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn);
2002 ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid);
2003 ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level);
2004 *(dh->dh_dbp) = dh->dh_db;
2005
2006 return (0);
2007 }
2008
2009 /*
2010 * The following code preserves the recursive function dbuf_hold_impl()
2011 * but moves the local variables AND function arguments to the heap to
2012 * minimize the stack frame size. Enough space is initially allocated
2013 * on the stack for 20 levels of recursion.
2014 */
2015 int
2016 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
2017 void *tag, dmu_buf_impl_t **dbp)
2018 {
2019 struct dbuf_hold_impl_data *dh;
2020 int error;
2021
2022 dh = kmem_zalloc(sizeof(struct dbuf_hold_impl_data) *
2023 DBUF_HOLD_IMPL_MAX_DEPTH, KM_PUSHPAGE);
2024 __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0);
2025
2026 error = __dbuf_hold_impl(dh);
2027
2028 kmem_free(dh, sizeof(struct dbuf_hold_impl_data) *
2029 DBUF_HOLD_IMPL_MAX_DEPTH);
2030
2031 return (error);
2032 }
2033
2034 static void
2035 __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
2036 dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
2037 void *tag, dmu_buf_impl_t **dbp, int depth)
2038 {
2039 dh->dh_dn = dn;
2040 dh->dh_level = level;
2041 dh->dh_blkid = blkid;
2042 dh->dh_fail_sparse = fail_sparse;
2043 dh->dh_tag = tag;
2044 dh->dh_dbp = dbp;
2045 dh->dh_depth = depth;
2046 }
2047
2048 dmu_buf_impl_t *
2049 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
2050 {
2051 dmu_buf_impl_t *db;
2052 int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
2053 return (err ? NULL : db);
2054 }
2055
2056 dmu_buf_impl_t *
2057 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
2058 {
2059 dmu_buf_impl_t *db;
2060 int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
2061 return (err ? NULL : db);
2062 }
2063
2064 void
2065 dbuf_create_bonus(dnode_t *dn)
2066 {
2067 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
2068
2069 ASSERT(dn->dn_bonus == NULL);
2070 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
2071 }
2072
2073 int
2074 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
2075 {
2076 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2077 dnode_t *dn;
2078
2079 if (db->db_blkid != DMU_SPILL_BLKID)
2080 return (ENOTSUP);
2081 if (blksz == 0)
2082 blksz = SPA_MINBLOCKSIZE;
2083 if (blksz > SPA_MAXBLOCKSIZE)
2084 blksz = SPA_MAXBLOCKSIZE;
2085 else
2086 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2087
2088 DB_DNODE_ENTER(db);
2089 dn = DB_DNODE(db);
2090 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2091 dbuf_new_size(db, blksz, tx);
2092 rw_exit(&dn->dn_struct_rwlock);
2093 DB_DNODE_EXIT(db);
2094
2095 return (0);
2096 }
2097
2098 void
2099 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2100 {
2101 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2102 }
2103
2104 #pragma weak dmu_buf_add_ref = dbuf_add_ref
2105 void
2106 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2107 {
2108 VERIFY(refcount_add(&db->db_holds, tag) > 1);
2109 }
2110
2111 /*
2112 * If you call dbuf_rele() you had better not be referencing the dnode handle
2113 * unless you have some other direct or indirect hold on the dnode. (An indirect
2114 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2115 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2116 * dnode's parent dbuf evicting its dnode handles.
2117 */
2118 #pragma weak dmu_buf_rele = dbuf_rele
2119 void
2120 dbuf_rele(dmu_buf_impl_t *db, void *tag)
2121 {
2122 mutex_enter(&db->db_mtx);
2123 dbuf_rele_and_unlock(db, tag);
2124 }
2125
2126 /*
2127 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
2128 * db_dirtycnt and db_holds to be updated atomically.
2129 */
2130 void
2131 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2132 {
2133 int64_t holds;
2134
2135 ASSERT(MUTEX_HELD(&db->db_mtx));
2136 DBUF_VERIFY(db);
2137
2138 /*
2139 * Remove the reference to the dbuf before removing its hold on the
2140 * dnode so we can guarantee in dnode_move() that a referenced bonus
2141 * buffer has a corresponding dnode hold.
2142 */
2143 holds = refcount_remove(&db->db_holds, tag);
2144 ASSERT(holds >= 0);
2145
2146 /*
2147 * We can't freeze indirects if there is a possibility that they
2148 * may be modified in the current syncing context.
2149 */
2150 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2151 arc_buf_freeze(db->db_buf);
2152
2153 if (holds == db->db_dirtycnt &&
2154 db->db_level == 0 && db->db_immediate_evict)
2155 dbuf_evict_user(db);
2156
2157 if (holds == 0) {
2158 if (db->db_blkid == DMU_BONUS_BLKID) {
2159 mutex_exit(&db->db_mtx);
2160
2161 /*
2162 * If the dnode moves here, we cannot cross this barrier
2163 * until the move completes.
2164 */
2165 DB_DNODE_ENTER(db);
2166 (void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count);
2167 DB_DNODE_EXIT(db);
2168 /*
2169 * The bonus buffer's dnode hold is no longer discounted
2170 * in dnode_move(). The dnode cannot move until after
2171 * the dnode_rele().
2172 */
2173 dnode_rele(DB_DNODE(db), db);
2174 } else if (db->db_buf == NULL) {
2175 /*
2176 * This is a special case: we never associated this
2177 * dbuf with any data allocated from the ARC.
2178 */
2179 ASSERT(db->db_state == DB_UNCACHED ||
2180 db->db_state == DB_NOFILL);
2181 dbuf_evict(db);
2182 } else if (arc_released(db->db_buf)) {
2183 arc_buf_t *buf = db->db_buf;
2184 /*
2185 * This dbuf has anonymous data associated with it.
2186 */
2187 dbuf_set_data(db, NULL);
2188 VERIFY(arc_buf_remove_ref(buf, db) == 1);
2189 dbuf_evict(db);
2190 } else {
2191 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0);
2192
2193 /*
2194 * A dbuf will be eligible for eviction if either the
2195 * 'primarycache' property is set or a duplicate
2196 * copy of this buffer is already cached in the arc.
2197 *
2198 * In the case of the 'primarycache' a buffer
2199 * is considered for eviction if it matches the
2200 * criteria set in the property.
2201 *
2202 * To decide if our buffer is considered a
2203 * duplicate, we must call into the arc to determine
2204 * if multiple buffers are referencing the same
2205 * block on-disk. If so, then we simply evict
2206 * ourselves.
2207 */
2208 if (!DBUF_IS_CACHEABLE(db) ||
2209 arc_buf_eviction_needed(db->db_buf))
2210 dbuf_clear(db);
2211 else
2212 mutex_exit(&db->db_mtx);
2213 }
2214 } else {
2215 mutex_exit(&db->db_mtx);
2216 }
2217 }
2218
2219 #pragma weak dmu_buf_refcount = dbuf_refcount
2220 uint64_t
2221 dbuf_refcount(dmu_buf_impl_t *db)
2222 {
2223 return (refcount_count(&db->db_holds));
2224 }
2225
2226 void *
2227 dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2228 dmu_buf_evict_func_t *evict_func)
2229 {
2230 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2231 user_data_ptr_ptr, evict_func));
2232 }
2233
2234 void *
2235 dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2236 dmu_buf_evict_func_t *evict_func)
2237 {
2238 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2239
2240 db->db_immediate_evict = TRUE;
2241 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2242 user_data_ptr_ptr, evict_func));
2243 }
2244
2245 void *
2246 dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2247 void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
2248 {
2249 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2250 ASSERT(db->db_level == 0);
2251
2252 ASSERT((user_ptr == NULL) == (evict_func == NULL));
2253
2254 mutex_enter(&db->db_mtx);
2255
2256 if (db->db_user_ptr == old_user_ptr) {
2257 db->db_user_ptr = user_ptr;
2258 db->db_user_data_ptr_ptr = user_data_ptr_ptr;
2259 db->db_evict_func = evict_func;
2260
2261 dbuf_update_data(db);
2262 } else {
2263 old_user_ptr = db->db_user_ptr;
2264 }
2265
2266 mutex_exit(&db->db_mtx);
2267 return (old_user_ptr);
2268 }
2269
2270 void *
2271 dmu_buf_get_user(dmu_buf_t *db_fake)
2272 {
2273 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2274 ASSERT(!refcount_is_zero(&db->db_holds));
2275
2276 return (db->db_user_ptr);
2277 }
2278
2279 boolean_t
2280 dmu_buf_freeable(dmu_buf_t *dbuf)
2281 {
2282 boolean_t res = B_FALSE;
2283 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2284
2285 if (db->db_blkptr)
2286 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2287 db->db_blkptr, db->db_blkptr->blk_birth);
2288
2289 return (res);
2290 }
2291
2292 static void
2293 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2294 {
2295 /* ASSERT(dmu_tx_is_syncing(tx) */
2296 ASSERT(MUTEX_HELD(&db->db_mtx));
2297
2298 if (db->db_blkptr != NULL)
2299 return;
2300
2301 if (db->db_blkid == DMU_SPILL_BLKID) {
2302 db->db_blkptr = &dn->dn_phys->dn_spill;
2303 BP_ZERO(db->db_blkptr);
2304 return;
2305 }
2306 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2307 /*
2308 * This buffer was allocated at a time when there was
2309 * no available blkptrs from the dnode, or it was
2310 * inappropriate to hook it in (i.e., nlevels mis-match).
2311 */
2312 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2313 ASSERT(db->db_parent == NULL);
2314 db->db_parent = dn->dn_dbuf;
2315 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2316 DBUF_VERIFY(db);
2317 } else {
2318 dmu_buf_impl_t *parent = db->db_parent;
2319 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2320
2321 ASSERT(dn->dn_phys->dn_nlevels > 1);
2322 if (parent == NULL) {
2323 mutex_exit(&db->db_mtx);
2324 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2325 (void) dbuf_hold_impl(dn, db->db_level+1,
2326 db->db_blkid >> epbs, FALSE, db, &parent);
2327 rw_exit(&dn->dn_struct_rwlock);
2328 mutex_enter(&db->db_mtx);
2329 db->db_parent = parent;
2330 }
2331 db->db_blkptr = (blkptr_t *)parent->db.db_data +
2332 (db->db_blkid & ((1ULL << epbs) - 1));
2333 DBUF_VERIFY(db);
2334 }
2335 }
2336
2337 /* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
2338 * is critical the we not allow the compiler to inline this function in to
2339 * dbuf_sync_list() thereby drastically bloating the stack usage.
2340 */
2341 noinline static void
2342 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2343 {
2344 dmu_buf_impl_t *db = dr->dr_dbuf;
2345 dnode_t *dn;
2346 zio_t *zio;
2347
2348 ASSERT(dmu_tx_is_syncing(tx));
2349
2350 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2351
2352 mutex_enter(&db->db_mtx);
2353
2354 ASSERT(db->db_level > 0);
2355 DBUF_VERIFY(db);
2356
2357 if (db->db_buf == NULL) {
2358 mutex_exit(&db->db_mtx);
2359 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2360 mutex_enter(&db->db_mtx);
2361 }
2362 ASSERT3U(db->db_state, ==, DB_CACHED);
2363 ASSERT(db->db_buf != NULL);
2364
2365 DB_DNODE_ENTER(db);
2366 dn = DB_DNODE(db);
2367 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2368 dbuf_check_blkptr(dn, db);
2369 DB_DNODE_EXIT(db);
2370
2371 db->db_data_pending = dr;
2372
2373 mutex_exit(&db->db_mtx);
2374 dbuf_write(dr, db->db_buf, tx);
2375
2376 zio = dr->dr_zio;
2377 mutex_enter(&dr->dt.di.dr_mtx);
2378 dbuf_sync_list(&dr->dt.di.dr_children, tx);
2379 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2380 mutex_exit(&dr->dt.di.dr_mtx);
2381 zio_nowait(zio);
2382 }
2383
2384 /* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
2385 * critical the we not allow the compiler to inline this function in to
2386 * dbuf_sync_list() thereby drastically bloating the stack usage.
2387 */
2388 noinline static void
2389 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2390 {
2391 arc_buf_t **datap = &dr->dt.dl.dr_data;
2392 dmu_buf_impl_t *db = dr->dr_dbuf;
2393 dnode_t *dn;
2394 objset_t *os;
2395 uint64_t txg = tx->tx_txg;
2396
2397 ASSERT(dmu_tx_is_syncing(tx));
2398
2399 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2400
2401 mutex_enter(&db->db_mtx);
2402 /*
2403 * To be synced, we must be dirtied. But we
2404 * might have been freed after the dirty.
2405 */
2406 if (db->db_state == DB_UNCACHED) {
2407 /* This buffer has been freed since it was dirtied */
2408 ASSERT(db->db.db_data == NULL);
2409 } else if (db->db_state == DB_FILL) {
2410 /* This buffer was freed and is now being re-filled */
2411 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2412 } else {
2413 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2414 }
2415 DBUF_VERIFY(db);
2416
2417 DB_DNODE_ENTER(db);
2418 dn = DB_DNODE(db);
2419
2420 if (db->db_blkid == DMU_SPILL_BLKID) {
2421 mutex_enter(&dn->dn_mtx);
2422 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2423 mutex_exit(&dn->dn_mtx);
2424 }
2425
2426 /*
2427 * If this is a bonus buffer, simply copy the bonus data into the
2428 * dnode. It will be written out when the dnode is synced (and it
2429 * will be synced, since it must have been dirty for dbuf_sync to
2430 * be called).
2431 */
2432 if (db->db_blkid == DMU_BONUS_BLKID) {
2433 dbuf_dirty_record_t **drp;
2434
2435 ASSERT(*datap != NULL);
2436 ASSERT0(db->db_level);
2437 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2438 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2439 DB_DNODE_EXIT(db);
2440
2441 if (*datap != db->db.db_data) {
2442 zio_buf_free(*datap, DN_MAX_BONUSLEN);
2443 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2444 }
2445 db->db_data_pending = NULL;
2446 drp = &db->db_last_dirty;
2447 while (*drp != dr)
2448 drp = &(*drp)->dr_next;
2449 ASSERT(dr->dr_next == NULL);
2450 ASSERT(dr->dr_dbuf == db);
2451 *drp = dr->dr_next;
2452 if (dr->dr_dbuf->db_level != 0) {
2453 mutex_destroy(&dr->dt.di.dr_mtx);
2454 list_destroy(&dr->dt.di.dr_children);
2455 }
2456 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2457 ASSERT(db->db_dirtycnt > 0);
2458 db->db_dirtycnt -= 1;
2459 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2460 return;
2461 }
2462
2463 os = dn->dn_objset;
2464
2465 /*
2466 * This function may have dropped the db_mtx lock allowing a dmu_sync
2467 * operation to sneak in. As a result, we need to ensure that we
2468 * don't check the dr_override_state until we have returned from
2469 * dbuf_check_blkptr.
2470 */
2471 dbuf_check_blkptr(dn, db);
2472
2473 /*
2474 * If this buffer is in the middle of an immediate write,
2475 * wait for the synchronous IO to complete.
2476 */
2477 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2478 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2479 cv_wait(&db->db_changed, &db->db_mtx);
2480 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2481 }
2482
2483 if (db->db_state != DB_NOFILL &&
2484 dn->dn_object != DMU_META_DNODE_OBJECT &&
2485 refcount_count(&db->db_holds) > 1 &&
2486 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2487 *datap == db->db_buf) {
2488 /*
2489 * If this buffer is currently "in use" (i.e., there
2490 * are active holds and db_data still references it),
2491 * then make a copy before we start the write so that
2492 * any modifications from the open txg will not leak
2493 * into this write.
2494 *
2495 * NOTE: this copy does not need to be made for
2496 * objects only modified in the syncing context (e.g.
2497 * DNONE_DNODE blocks).
2498 */
2499 int blksz = arc_buf_size(*datap);
2500 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2501 *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2502 bcopy(db->db.db_data, (*datap)->b_data, blksz);
2503 }
2504 db->db_data_pending = dr;
2505
2506 mutex_exit(&db->db_mtx);
2507
2508 dbuf_write(dr, *datap, tx);
2509
2510 ASSERT(!list_link_active(&dr->dr_dirty_node));
2511 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2512 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2513 DB_DNODE_EXIT(db);
2514 } else {
2515 /*
2516 * Although zio_nowait() does not "wait for an IO", it does
2517 * initiate the IO. If this is an empty write it seems plausible
2518 * that the IO could actually be completed before the nowait
2519 * returns. We need to DB_DNODE_EXIT() first in case
2520 * zio_nowait() invalidates the dbuf.
2521 */
2522 DB_DNODE_EXIT(db);
2523 zio_nowait(dr->dr_zio);
2524 }
2525 }
2526
2527 void
2528 dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2529 {
2530 dbuf_dirty_record_t *dr;
2531
2532 while ((dr = list_head(list))) {
2533 if (dr->dr_zio != NULL) {
2534 /*
2535 * If we find an already initialized zio then we
2536 * are processing the meta-dnode, and we have finished.
2537 * The dbufs for all dnodes are put back on the list
2538 * during processing, so that we can zio_wait()
2539 * these IOs after initiating all child IOs.
2540 */
2541 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2542 DMU_META_DNODE_OBJECT);
2543 break;
2544 }
2545 list_remove(list, dr);
2546 if (dr->dr_dbuf->db_level > 0)
2547 dbuf_sync_indirect(dr, tx);
2548 else
2549 dbuf_sync_leaf(dr, tx);
2550 }
2551 }
2552
2553 /* ARGSUSED */
2554 static void
2555 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2556 {
2557 dmu_buf_impl_t *db = vdb;
2558 dnode_t *dn;
2559 blkptr_t *bp = zio->io_bp;
2560 blkptr_t *bp_orig = &zio->io_bp_orig;
2561 spa_t *spa = zio->io_spa;
2562 int64_t delta;
2563 uint64_t fill = 0;
2564 int i;
2565
2566 ASSERT(db->db_blkptr == bp);
2567
2568 DB_DNODE_ENTER(db);
2569 dn = DB_DNODE(db);
2570 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2571 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2572 zio->io_prev_space_delta = delta;
2573
2574 if (BP_IS_HOLE(bp)) {
2575 ASSERT(bp->blk_fill == 0);
2576 DB_DNODE_EXIT(db);
2577 return;
2578 }
2579
2580 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2581 BP_GET_TYPE(bp) == dn->dn_type) ||
2582 (db->db_blkid == DMU_SPILL_BLKID &&
2583 BP_GET_TYPE(bp) == dn->dn_bonustype));
2584 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2585
2586 mutex_enter(&db->db_mtx);
2587
2588 #ifdef ZFS_DEBUG
2589 if (db->db_blkid == DMU_SPILL_BLKID) {
2590 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2591 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2592 db->db_blkptr == &dn->dn_phys->dn_spill);
2593 }
2594 #endif
2595
2596 if (db->db_level == 0) {
2597 mutex_enter(&dn->dn_mtx);
2598 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2599 db->db_blkid != DMU_SPILL_BLKID)
2600 dn->dn_phys->dn_maxblkid = db->db_blkid;
2601 mutex_exit(&dn->dn_mtx);
2602
2603 if (dn->dn_type == DMU_OT_DNODE) {
2604 dnode_phys_t *dnp = db->db.db_data;
2605 for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2606 i--, dnp++) {
2607 if (dnp->dn_type != DMU_OT_NONE)
2608 fill++;
2609 }
2610 } else {
2611 fill = 1;
2612 }
2613 } else {
2614 blkptr_t *ibp = db->db.db_data;
2615 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2616 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2617 if (BP_IS_HOLE(ibp))
2618 continue;
2619 fill += ibp->blk_fill;
2620 }
2621 }
2622 DB_DNODE_EXIT(db);
2623
2624 bp->blk_fill = fill;
2625
2626 mutex_exit(&db->db_mtx);
2627 }
2628
2629 /* ARGSUSED */
2630 static void
2631 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2632 {
2633 dmu_buf_impl_t *db = vdb;
2634 blkptr_t *bp = zio->io_bp;
2635 blkptr_t *bp_orig = &zio->io_bp_orig;
2636 uint64_t txg = zio->io_txg;
2637 dbuf_dirty_record_t **drp, *dr;
2638
2639 ASSERT0(zio->io_error);
2640 ASSERT(db->db_blkptr == bp);
2641
2642 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
2643 ASSERT(BP_EQUAL(bp, bp_orig));
2644 } else {
2645 objset_t *os;
2646 dsl_dataset_t *ds;
2647 dmu_tx_t *tx;
2648
2649 DB_GET_OBJSET(&os, db);
2650 ds = os->os_dsl_dataset;
2651 tx = os->os_synctx;
2652
2653 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2654 dsl_dataset_block_born(ds, bp, tx);
2655 }
2656
2657 mutex_enter(&db->db_mtx);
2658
2659 DBUF_VERIFY(db);
2660
2661 drp = &db->db_last_dirty;
2662 while ((dr = *drp) != db->db_data_pending)
2663 drp = &dr->dr_next;
2664 ASSERT(!list_link_active(&dr->dr_dirty_node));
2665 ASSERT(dr->dr_txg == txg);
2666 ASSERT(dr->dr_dbuf == db);
2667 ASSERT(dr->dr_next == NULL);
2668 *drp = dr->dr_next;
2669
2670 #ifdef ZFS_DEBUG
2671 if (db->db_blkid == DMU_SPILL_BLKID) {
2672 dnode_t *dn;
2673
2674 DB_DNODE_ENTER(db);
2675 dn = DB_DNODE(db);
2676 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2677 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2678 db->db_blkptr == &dn->dn_phys->dn_spill);
2679 DB_DNODE_EXIT(db);
2680 }
2681 #endif
2682
2683 if (db->db_level == 0) {
2684 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2685 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2686 if (db->db_state != DB_NOFILL) {
2687 if (dr->dt.dl.dr_data != db->db_buf)
2688 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
2689 db) == 1);
2690 else if (!arc_released(db->db_buf))
2691 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2692 }
2693 } else {
2694 dnode_t *dn;
2695
2696 DB_DNODE_ENTER(db);
2697 dn = DB_DNODE(db);
2698 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2699 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2700 if (!BP_IS_HOLE(db->db_blkptr)) {
2701 ASSERTV(int epbs = dn->dn_phys->dn_indblkshift -
2702 SPA_BLKPTRSHIFT);
2703 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2704 db->db.db_size);
2705 ASSERT3U(dn->dn_phys->dn_maxblkid
2706 >> (db->db_level * epbs), >=, db->db_blkid);
2707 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2708 }
2709 DB_DNODE_EXIT(db);
2710 mutex_destroy(&dr->dt.di.dr_mtx);
2711 list_destroy(&dr->dt.di.dr_children);
2712 }
2713 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2714
2715 cv_broadcast(&db->db_changed);
2716 ASSERT(db->db_dirtycnt > 0);
2717 db->db_dirtycnt -= 1;
2718 db->db_data_pending = NULL;
2719 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2720 }
2721
2722 static void
2723 dbuf_write_nofill_ready(zio_t *zio)
2724 {
2725 dbuf_write_ready(zio, NULL, zio->io_private);
2726 }
2727
2728 static void
2729 dbuf_write_nofill_done(zio_t *zio)
2730 {
2731 dbuf_write_done(zio, NULL, zio->io_private);
2732 }
2733
2734 static void
2735 dbuf_write_override_ready(zio_t *zio)
2736 {
2737 dbuf_dirty_record_t *dr = zio->io_private;
2738 dmu_buf_impl_t *db = dr->dr_dbuf;
2739
2740 dbuf_write_ready(zio, NULL, db);
2741 }
2742
2743 static void
2744 dbuf_write_override_done(zio_t *zio)
2745 {
2746 dbuf_dirty_record_t *dr = zio->io_private;
2747 dmu_buf_impl_t *db = dr->dr_dbuf;
2748 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2749
2750 mutex_enter(&db->db_mtx);
2751 if (!BP_EQUAL(zio->io_bp, obp)) {
2752 if (!BP_IS_HOLE(obp))
2753 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2754 arc_release(dr->dt.dl.dr_data, db);
2755 }
2756 mutex_exit(&db->db_mtx);
2757
2758 dbuf_write_done(zio, NULL, db);
2759 }
2760
2761 static void
2762 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2763 {
2764 dmu_buf_impl_t *db = dr->dr_dbuf;
2765 dnode_t *dn;
2766 objset_t *os;
2767 dmu_buf_impl_t *parent = db->db_parent;
2768 uint64_t txg = tx->tx_txg;
2769 zbookmark_t zb;
2770 zio_prop_t zp;
2771 zio_t *zio;
2772 int wp_flag = 0;
2773
2774 DB_DNODE_ENTER(db);
2775 dn = DB_DNODE(db);
2776 os = dn->dn_objset;
2777
2778 if (db->db_state != DB_NOFILL) {
2779 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2780 /*
2781 * Private object buffers are released here rather
2782 * than in dbuf_dirty() since they are only modified
2783 * in the syncing context and we don't want the
2784 * overhead of making multiple copies of the data.
2785 */
2786 if (BP_IS_HOLE(db->db_blkptr)) {
2787 arc_buf_thaw(data);
2788 } else {
2789 dbuf_release_bp(db);
2790 }
2791 }
2792 }
2793
2794 if (parent != dn->dn_dbuf) {
2795 ASSERT(parent && parent->db_data_pending);
2796 ASSERT(db->db_level == parent->db_level-1);
2797 ASSERT(arc_released(parent->db_buf));
2798 zio = parent->db_data_pending->dr_zio;
2799 } else {
2800 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2801 db->db_blkid != DMU_SPILL_BLKID) ||
2802 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2803 if (db->db_blkid != DMU_SPILL_BLKID)
2804 ASSERT3P(db->db_blkptr, ==,
2805 &dn->dn_phys->dn_blkptr[db->db_blkid]);
2806 zio = dn->dn_zio;
2807 }
2808
2809 ASSERT(db->db_level == 0 || data == db->db_buf);
2810 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2811 ASSERT(zio);
2812
2813 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2814 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2815 db->db.db_object, db->db_level, db->db_blkid);
2816
2817 if (db->db_blkid == DMU_SPILL_BLKID)
2818 wp_flag = WP_SPILL;
2819 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2820
2821 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
2822 DB_DNODE_EXIT(db);
2823
2824 if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2825 ASSERT(db->db_state != DB_NOFILL);
2826 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2827 db->db_blkptr, data->b_data, arc_buf_size(data), &zp,
2828 dbuf_write_override_ready, dbuf_write_override_done, dr,
2829 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2830 mutex_enter(&db->db_mtx);
2831 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2832 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
2833 dr->dt.dl.dr_copies);
2834 mutex_exit(&db->db_mtx);
2835 } else if (db->db_state == DB_NOFILL) {
2836 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF);
2837 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2838 db->db_blkptr, NULL, db->db.db_size, &zp,
2839 dbuf_write_nofill_ready, dbuf_write_nofill_done, db,
2840 ZIO_PRIORITY_ASYNC_WRITE,
2841 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2842 } else {
2843 ASSERT(arc_released(data));
2844 dr->dr_zio = arc_write(zio, os->os_spa, txg,
2845 db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), &zp,
2846 dbuf_write_ready, dbuf_write_done, db,
2847 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2848 }
2849 }
2850
2851 #if defined(_KERNEL) && defined(HAVE_SPL)
2852 EXPORT_SYMBOL(dbuf_find);
2853 EXPORT_SYMBOL(dbuf_is_metadata);
2854 EXPORT_SYMBOL(dbuf_evict);
2855 EXPORT_SYMBOL(dbuf_loan_arcbuf);
2856 EXPORT_SYMBOL(dbuf_whichblock);
2857 EXPORT_SYMBOL(dbuf_read);
2858 EXPORT_SYMBOL(dbuf_unoverride);
2859 EXPORT_SYMBOL(dbuf_free_range);
2860 EXPORT_SYMBOL(dbuf_new_size);
2861 EXPORT_SYMBOL(dbuf_release_bp);
2862 EXPORT_SYMBOL(dbuf_dirty);
2863 EXPORT_SYMBOL(dmu_buf_will_dirty);
2864 EXPORT_SYMBOL(dmu_buf_will_not_fill);
2865 EXPORT_SYMBOL(dmu_buf_will_fill);
2866 EXPORT_SYMBOL(dmu_buf_fill_done);
2867 EXPORT_SYMBOL(dmu_buf_rele);
2868 EXPORT_SYMBOL(dbuf_assign_arcbuf);
2869 EXPORT_SYMBOL(dbuf_clear);
2870 EXPORT_SYMBOL(dbuf_prefetch);
2871 EXPORT_SYMBOL(dbuf_hold_impl);
2872 EXPORT_SYMBOL(dbuf_hold);
2873 EXPORT_SYMBOL(dbuf_hold_level);
2874 EXPORT_SYMBOL(dbuf_create_bonus);
2875 EXPORT_SYMBOL(dbuf_spill_set_blksz);
2876 EXPORT_SYMBOL(dbuf_rm_spill);
2877 EXPORT_SYMBOL(dbuf_add_ref);
2878 EXPORT_SYMBOL(dbuf_rele);
2879 EXPORT_SYMBOL(dbuf_rele_and_unlock);
2880 EXPORT_SYMBOL(dbuf_refcount);
2881 EXPORT_SYMBOL(dbuf_sync_list);
2882 EXPORT_SYMBOL(dmu_buf_set_user);
2883 EXPORT_SYMBOL(dmu_buf_set_user_ie);
2884 EXPORT_SYMBOL(dmu_buf_update_user);
2885 EXPORT_SYMBOL(dmu_buf_get_user);
2886 EXPORT_SYMBOL(dmu_buf_freeable);
2887 #endif