]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
ef3c1dea | 23 | * Copyright 2011 Nexenta Systems, Inc. All rights reserved. |
64fc7762 | 24 | * Copyright (c) 2012, 2017 by Delphix. All rights reserved. |
3a17a7a9 | 25 | * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. |
0c66c32d | 26 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. |
34dc7c2f BB |
27 | */ |
28 | ||
34dc7c2f | 29 | #include <sys/zfs_context.h> |
c28b2279 | 30 | #include <sys/arc.h> |
34dc7c2f | 31 | #include <sys/dmu.h> |
ea97f8ce | 32 | #include <sys/dmu_send.h> |
34dc7c2f BB |
33 | #include <sys/dmu_impl.h> |
34 | #include <sys/dbuf.h> | |
35 | #include <sys/dmu_objset.h> | |
36 | #include <sys/dsl_dataset.h> | |
37 | #include <sys/dsl_dir.h> | |
38 | #include <sys/dmu_tx.h> | |
39 | #include <sys/spa.h> | |
40 | #include <sys/zio.h> | |
41 | #include <sys/dmu_zfetch.h> | |
428870ff BB |
42 | #include <sys/sa.h> |
43 | #include <sys/sa_impl.h> | |
9b67f605 MA |
44 | #include <sys/zfeature.h> |
45 | #include <sys/blkptr.h> | |
9bd274dd | 46 | #include <sys/range_tree.h> |
49ee64e5 | 47 | #include <sys/trace_dbuf.h> |
d3c2ae1c | 48 | #include <sys/callb.h> |
a6255b7f | 49 | #include <sys/abd.h> |
34dc7c2f | 50 | |
fc5bb51f BB |
51 | struct dbuf_hold_impl_data { |
52 | /* Function arguments */ | |
53 | dnode_t *dh_dn; | |
54 | uint8_t dh_level; | |
55 | uint64_t dh_blkid; | |
fcff0f35 PD |
56 | boolean_t dh_fail_sparse; |
57 | boolean_t dh_fail_uncached; | |
fc5bb51f BB |
58 | void *dh_tag; |
59 | dmu_buf_impl_t **dh_dbp; | |
60 | /* Local variables */ | |
61 | dmu_buf_impl_t *dh_db; | |
62 | dmu_buf_impl_t *dh_parent; | |
63 | blkptr_t *dh_bp; | |
64 | int dh_err; | |
65 | dbuf_dirty_record_t *dh_dr; | |
66 | arc_buf_contents_t dh_type; | |
67 | int dh_depth; | |
68 | }; | |
69 | ||
70 | static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh, | |
fcff0f35 PD |
71 | dnode_t *dn, uint8_t level, uint64_t blkid, boolean_t fail_sparse, |
72 | boolean_t fail_uncached, | |
73 | void *tag, dmu_buf_impl_t **dbp, int depth); | |
fc5bb51f BB |
74 | static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh); |
75 | ||
d3c2ae1c | 76 | uint_t zfs_dbuf_evict_key; |
b663a23d | 77 | |
13fe0198 | 78 | static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); |
b128c09f | 79 | static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); |
34dc7c2f | 80 | |
0c66c32d | 81 | extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, |
39efbde7 GM |
82 | dmu_buf_evict_func_t *evict_func_sync, |
83 | dmu_buf_evict_func_t *evict_func_async, | |
84 | dmu_buf_t **clear_on_evict_dbufp); | |
0c66c32d | 85 | |
34dc7c2f BB |
86 | /* |
87 | * Global data structures and functions for the dbuf cache. | |
88 | */ | |
d3c2ae1c | 89 | static kmem_cache_t *dbuf_kmem_cache; |
0c66c32d | 90 | static taskq_t *dbu_evict_taskq; |
34dc7c2f | 91 | |
d3c2ae1c GW |
92 | static kthread_t *dbuf_cache_evict_thread; |
93 | static kmutex_t dbuf_evict_lock; | |
94 | static kcondvar_t dbuf_evict_cv; | |
95 | static boolean_t dbuf_evict_thread_exit; | |
96 | ||
97 | /* | |
98 | * LRU cache of dbufs. The dbuf cache maintains a list of dbufs that | |
99 | * are not currently held but have been recently released. These dbufs | |
100 | * are not eligible for arc eviction until they are aged out of the cache. | |
101 | * Dbufs are added to the dbuf cache once the last hold is released. If a | |
102 | * dbuf is later accessed and still exists in the dbuf cache, then it will | |
103 | * be removed from the cache and later re-added to the head of the cache. | |
104 | * Dbufs that are aged out of the cache will be immediately destroyed and | |
105 | * become eligible for arc eviction. | |
106 | */ | |
64fc7762 | 107 | static multilist_t *dbuf_cache; |
d3c2ae1c GW |
108 | static refcount_t dbuf_cache_size; |
109 | unsigned long dbuf_cache_max_bytes = 100 * 1024 * 1024; | |
110 | ||
111 | /* Cap the size of the dbuf cache to log2 fraction of arc size. */ | |
112 | int dbuf_cache_max_shift = 5; | |
113 | ||
114 | /* | |
115 | * The dbuf cache uses a three-stage eviction policy: | |
116 | * - A low water marker designates when the dbuf eviction thread | |
117 | * should stop evicting from the dbuf cache. | |
118 | * - When we reach the maximum size (aka mid water mark), we | |
119 | * signal the eviction thread to run. | |
120 | * - The high water mark indicates when the eviction thread | |
121 | * is unable to keep up with the incoming load and eviction must | |
122 | * happen in the context of the calling thread. | |
123 | * | |
124 | * The dbuf cache: | |
125 | * (max size) | |
126 | * low water mid water hi water | |
127 | * +----------------------------------------+----------+----------+ | |
128 | * | | | | | |
129 | * | | | | | |
130 | * | | | | | |
131 | * | | | | | |
132 | * +----------------------------------------+----------+----------+ | |
133 | * stop signal evict | |
134 | * evicting eviction directly | |
135 | * thread | |
136 | * | |
137 | * The high and low water marks indicate the operating range for the eviction | |
138 | * thread. The low water mark is, by default, 90% of the total size of the | |
139 | * cache and the high water mark is at 110% (both of these percentages can be | |
140 | * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, | |
141 | * respectively). The eviction thread will try to ensure that the cache remains | |
142 | * within this range by waking up every second and checking if the cache is | |
143 | * above the low water mark. The thread can also be woken up by callers adding | |
144 | * elements into the cache if the cache is larger than the mid water (i.e max | |
145 | * cache size). Once the eviction thread is woken up and eviction is required, | |
146 | * it will continue evicting buffers until it's able to reduce the cache size | |
147 | * to the low water mark. If the cache size continues to grow and hits the high | |
4e33ba4c | 148 | * water mark, then callers adding elements to the cache will begin to evict |
d3c2ae1c GW |
149 | * directly from the cache until the cache is no longer above the high water |
150 | * mark. | |
151 | */ | |
152 | ||
153 | /* | |
154 | * The percentage above and below the maximum cache size. | |
155 | */ | |
156 | uint_t dbuf_cache_hiwater_pct = 10; | |
157 | uint_t dbuf_cache_lowater_pct = 10; | |
158 | ||
34dc7c2f BB |
159 | /* ARGSUSED */ |
160 | static int | |
161 | dbuf_cons(void *vdb, void *unused, int kmflag) | |
162 | { | |
163 | dmu_buf_impl_t *db = vdb; | |
164 | bzero(db, sizeof (dmu_buf_impl_t)); | |
165 | ||
166 | mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); | |
167 | cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); | |
d3c2ae1c | 168 | multilist_link_init(&db->db_cache_link); |
34dc7c2f | 169 | refcount_create(&db->db_holds); |
d3c2ae1c | 170 | multilist_link_init(&db->db_cache_link); |
8951cb8d | 171 | |
34dc7c2f BB |
172 | return (0); |
173 | } | |
174 | ||
175 | /* ARGSUSED */ | |
176 | static void | |
177 | dbuf_dest(void *vdb, void *unused) | |
178 | { | |
179 | dmu_buf_impl_t *db = vdb; | |
180 | mutex_destroy(&db->db_mtx); | |
181 | cv_destroy(&db->db_changed); | |
d3c2ae1c | 182 | ASSERT(!multilist_link_active(&db->db_cache_link)); |
34dc7c2f BB |
183 | refcount_destroy(&db->db_holds); |
184 | } | |
185 | ||
186 | /* | |
187 | * dbuf hash table routines | |
188 | */ | |
189 | static dbuf_hash_table_t dbuf_hash_table; | |
190 | ||
191 | static uint64_t dbuf_hash_count; | |
192 | ||
193 | static uint64_t | |
194 | dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) | |
195 | { | |
196 | uintptr_t osv = (uintptr_t)os; | |
197 | uint64_t crc = -1ULL; | |
198 | ||
199 | ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); | |
200 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; | |
201 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; | |
202 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; | |
203 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; | |
204 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; | |
205 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; | |
206 | ||
207 | crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); | |
208 | ||
209 | return (crc); | |
210 | } | |
211 | ||
34dc7c2f BB |
212 | #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ |
213 | ((dbuf)->db.db_object == (obj) && \ | |
214 | (dbuf)->db_objset == (os) && \ | |
215 | (dbuf)->db_level == (level) && \ | |
216 | (dbuf)->db_blkid == (blkid)) | |
217 | ||
218 | dmu_buf_impl_t * | |
6ebebace | 219 | dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) |
34dc7c2f BB |
220 | { |
221 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
d6320ddb BB |
222 | uint64_t hv; |
223 | uint64_t idx; | |
34dc7c2f BB |
224 | dmu_buf_impl_t *db; |
225 | ||
d3c2ae1c | 226 | hv = dbuf_hash(os, obj, level, blkid); |
d6320ddb BB |
227 | idx = hv & h->hash_table_mask; |
228 | ||
34dc7c2f BB |
229 | mutex_enter(DBUF_HASH_MUTEX(h, idx)); |
230 | for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { | |
231 | if (DBUF_EQUAL(db, os, obj, level, blkid)) { | |
232 | mutex_enter(&db->db_mtx); | |
233 | if (db->db_state != DB_EVICTING) { | |
234 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
235 | return (db); | |
236 | } | |
237 | mutex_exit(&db->db_mtx); | |
238 | } | |
239 | } | |
240 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
241 | return (NULL); | |
242 | } | |
243 | ||
6ebebace JG |
244 | static dmu_buf_impl_t * |
245 | dbuf_find_bonus(objset_t *os, uint64_t object) | |
246 | { | |
247 | dnode_t *dn; | |
248 | dmu_buf_impl_t *db = NULL; | |
249 | ||
250 | if (dnode_hold(os, object, FTAG, &dn) == 0) { | |
251 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
252 | if (dn->dn_bonus != NULL) { | |
253 | db = dn->dn_bonus; | |
254 | mutex_enter(&db->db_mtx); | |
255 | } | |
256 | rw_exit(&dn->dn_struct_rwlock); | |
257 | dnode_rele(dn, FTAG); | |
258 | } | |
259 | return (db); | |
260 | } | |
261 | ||
34dc7c2f BB |
262 | /* |
263 | * Insert an entry into the hash table. If there is already an element | |
264 | * equal to elem in the hash table, then the already existing element | |
265 | * will be returned and the new element will not be inserted. | |
266 | * Otherwise returns NULL. | |
267 | */ | |
268 | static dmu_buf_impl_t * | |
269 | dbuf_hash_insert(dmu_buf_impl_t *db) | |
270 | { | |
271 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
428870ff | 272 | objset_t *os = db->db_objset; |
34dc7c2f BB |
273 | uint64_t obj = db->db.db_object; |
274 | int level = db->db_level; | |
d6320ddb | 275 | uint64_t blkid, hv, idx; |
34dc7c2f BB |
276 | dmu_buf_impl_t *dbf; |
277 | ||
d6320ddb | 278 | blkid = db->db_blkid; |
d3c2ae1c | 279 | hv = dbuf_hash(os, obj, level, blkid); |
d6320ddb BB |
280 | idx = hv & h->hash_table_mask; |
281 | ||
34dc7c2f BB |
282 | mutex_enter(DBUF_HASH_MUTEX(h, idx)); |
283 | for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { | |
284 | if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { | |
285 | mutex_enter(&dbf->db_mtx); | |
286 | if (dbf->db_state != DB_EVICTING) { | |
287 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
288 | return (dbf); | |
289 | } | |
290 | mutex_exit(&dbf->db_mtx); | |
291 | } | |
292 | } | |
293 | ||
294 | mutex_enter(&db->db_mtx); | |
295 | db->db_hash_next = h->hash_table[idx]; | |
296 | h->hash_table[idx] = db; | |
297 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
bc89ac84 | 298 | atomic_inc_64(&dbuf_hash_count); |
34dc7c2f BB |
299 | |
300 | return (NULL); | |
301 | } | |
302 | ||
303 | /* | |
bd089c54 | 304 | * Remove an entry from the hash table. It must be in the EVICTING state. |
34dc7c2f BB |
305 | */ |
306 | static void | |
307 | dbuf_hash_remove(dmu_buf_impl_t *db) | |
308 | { | |
309 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
d6320ddb | 310 | uint64_t hv, idx; |
34dc7c2f BB |
311 | dmu_buf_impl_t *dbf, **dbp; |
312 | ||
d3c2ae1c | 313 | hv = dbuf_hash(db->db_objset, db->db.db_object, |
d6320ddb BB |
314 | db->db_level, db->db_blkid); |
315 | idx = hv & h->hash_table_mask; | |
316 | ||
34dc7c2f | 317 | /* |
4e33ba4c | 318 | * We mustn't hold db_mtx to maintain lock ordering: |
34dc7c2f BB |
319 | * DBUF_HASH_MUTEX > db_mtx. |
320 | */ | |
321 | ASSERT(refcount_is_zero(&db->db_holds)); | |
322 | ASSERT(db->db_state == DB_EVICTING); | |
323 | ASSERT(!MUTEX_HELD(&db->db_mtx)); | |
324 | ||
325 | mutex_enter(DBUF_HASH_MUTEX(h, idx)); | |
326 | dbp = &h->hash_table[idx]; | |
327 | while ((dbf = *dbp) != db) { | |
328 | dbp = &dbf->db_hash_next; | |
329 | ASSERT(dbf != NULL); | |
330 | } | |
331 | *dbp = db->db_hash_next; | |
332 | db->db_hash_next = NULL; | |
333 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
bc89ac84 | 334 | atomic_dec_64(&dbuf_hash_count); |
34dc7c2f BB |
335 | } |
336 | ||
0c66c32d JG |
337 | typedef enum { |
338 | DBVU_EVICTING, | |
339 | DBVU_NOT_EVICTING | |
340 | } dbvu_verify_type_t; | |
341 | ||
342 | static void | |
343 | dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) | |
344 | { | |
345 | #ifdef ZFS_DEBUG | |
346 | int64_t holds; | |
347 | ||
348 | if (db->db_user == NULL) | |
349 | return; | |
350 | ||
351 | /* Only data blocks support the attachment of user data. */ | |
352 | ASSERT(db->db_level == 0); | |
353 | ||
354 | /* Clients must resolve a dbuf before attaching user data. */ | |
355 | ASSERT(db->db.db_data != NULL); | |
356 | ASSERT3U(db->db_state, ==, DB_CACHED); | |
357 | ||
358 | holds = refcount_count(&db->db_holds); | |
359 | if (verify_type == DBVU_EVICTING) { | |
360 | /* | |
361 | * Immediate eviction occurs when holds == dirtycnt. | |
362 | * For normal eviction buffers, holds is zero on | |
363 | * eviction, except when dbuf_fix_old_data() calls | |
364 | * dbuf_clear_data(). However, the hold count can grow | |
365 | * during eviction even though db_mtx is held (see | |
366 | * dmu_bonus_hold() for an example), so we can only | |
367 | * test the generic invariant that holds >= dirtycnt. | |
368 | */ | |
369 | ASSERT3U(holds, >=, db->db_dirtycnt); | |
370 | } else { | |
bc4501f7 | 371 | if (db->db_user_immediate_evict == TRUE) |
0c66c32d JG |
372 | ASSERT3U(holds, >=, db->db_dirtycnt); |
373 | else | |
374 | ASSERT3U(holds, >, 0); | |
375 | } | |
376 | #endif | |
377 | } | |
378 | ||
34dc7c2f BB |
379 | static void |
380 | dbuf_evict_user(dmu_buf_impl_t *db) | |
381 | { | |
0c66c32d JG |
382 | dmu_buf_user_t *dbu = db->db_user; |
383 | ||
34dc7c2f BB |
384 | ASSERT(MUTEX_HELD(&db->db_mtx)); |
385 | ||
0c66c32d | 386 | if (dbu == NULL) |
34dc7c2f BB |
387 | return; |
388 | ||
0c66c32d JG |
389 | dbuf_verify_user(db, DBVU_EVICTING); |
390 | db->db_user = NULL; | |
391 | ||
392 | #ifdef ZFS_DEBUG | |
393 | if (dbu->dbu_clear_on_evict_dbufp != NULL) | |
394 | *dbu->dbu_clear_on_evict_dbufp = NULL; | |
395 | #endif | |
396 | ||
397 | /* | |
39efbde7 GM |
398 | * There are two eviction callbacks - one that we call synchronously |
399 | * and one that we invoke via a taskq. The async one is useful for | |
400 | * avoiding lock order reversals and limiting stack depth. | |
401 | * | |
402 | * Note that if we have a sync callback but no async callback, | |
403 | * it's likely that the sync callback will free the structure | |
404 | * containing the dbu. In that case we need to take care to not | |
405 | * dereference dbu after calling the sync evict func. | |
0c66c32d | 406 | */ |
a7004725 | 407 | boolean_t has_async = (dbu->dbu_evict_func_async != NULL); |
39efbde7 GM |
408 | |
409 | if (dbu->dbu_evict_func_sync != NULL) | |
410 | dbu->dbu_evict_func_sync(dbu); | |
411 | ||
412 | if (has_async) { | |
413 | taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, | |
414 | dbu, 0, &dbu->dbu_tqent); | |
415 | } | |
34dc7c2f BB |
416 | } |
417 | ||
572e2857 BB |
418 | boolean_t |
419 | dbuf_is_metadata(dmu_buf_impl_t *db) | |
420 | { | |
cc79a5c2 BB |
421 | /* |
422 | * Consider indirect blocks and spill blocks to be meta data. | |
423 | */ | |
424 | if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) { | |
572e2857 BB |
425 | return (B_TRUE); |
426 | } else { | |
427 | boolean_t is_metadata; | |
428 | ||
429 | DB_DNODE_ENTER(db); | |
9ae529ec | 430 | is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); |
572e2857 BB |
431 | DB_DNODE_EXIT(db); |
432 | ||
433 | return (is_metadata); | |
434 | } | |
435 | } | |
436 | ||
d3c2ae1c GW |
437 | |
438 | /* | |
439 | * This function *must* return indices evenly distributed between all | |
440 | * sublists of the multilist. This is needed due to how the dbuf eviction | |
441 | * code is laid out; dbuf_evict_thread() assumes dbufs are evenly | |
442 | * distributed between all sublists and uses this assumption when | |
443 | * deciding which sublist to evict from and how much to evict from it. | |
444 | */ | |
445 | unsigned int | |
446 | dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) | |
34dc7c2f | 447 | { |
d3c2ae1c GW |
448 | dmu_buf_impl_t *db = obj; |
449 | ||
450 | /* | |
451 | * The assumption here, is the hash value for a given | |
452 | * dmu_buf_impl_t will remain constant throughout it's lifetime | |
453 | * (i.e. it's objset, object, level and blkid fields don't change). | |
454 | * Thus, we don't need to store the dbuf's sublist index | |
455 | * on insertion, as this index can be recalculated on removal. | |
456 | * | |
457 | * Also, the low order bits of the hash value are thought to be | |
458 | * distributed evenly. Otherwise, in the case that the multilist | |
459 | * has a power of two number of sublists, each sublists' usage | |
460 | * would not be evenly distributed. | |
461 | */ | |
462 | return (dbuf_hash(db->db_objset, db->db.db_object, | |
463 | db->db_level, db->db_blkid) % | |
464 | multilist_get_num_sublists(ml)); | |
465 | } | |
466 | ||
467 | static inline boolean_t | |
468 | dbuf_cache_above_hiwater(void) | |
469 | { | |
470 | uint64_t dbuf_cache_hiwater_bytes = | |
471 | (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100; | |
472 | ||
473 | return (refcount_count(&dbuf_cache_size) > | |
474 | dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes); | |
475 | } | |
476 | ||
477 | static inline boolean_t | |
478 | dbuf_cache_above_lowater(void) | |
479 | { | |
480 | uint64_t dbuf_cache_lowater_bytes = | |
481 | (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100; | |
482 | ||
483 | return (refcount_count(&dbuf_cache_size) > | |
484 | dbuf_cache_max_bytes - dbuf_cache_lowater_bytes); | |
485 | } | |
486 | ||
487 | /* | |
488 | * Evict the oldest eligible dbuf from the dbuf cache. | |
489 | */ | |
490 | static void | |
491 | dbuf_evict_one(void) | |
492 | { | |
64fc7762 MA |
493 | int idx = multilist_get_random_index(dbuf_cache); |
494 | multilist_sublist_t *mls = multilist_sublist_lock(dbuf_cache, idx); | |
d3c2ae1c GW |
495 | dmu_buf_impl_t *db; |
496 | ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); | |
497 | ||
498 | /* | |
499 | * Set the thread's tsd to indicate that it's processing evictions. | |
500 | * Once a thread stops evicting from the dbuf cache it will | |
501 | * reset its tsd to NULL. | |
502 | */ | |
503 | ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL); | |
504 | (void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE); | |
505 | ||
506 | db = multilist_sublist_tail(mls); | |
507 | while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { | |
508 | db = multilist_sublist_prev(mls, db); | |
509 | } | |
510 | ||
511 | DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, | |
512 | multilist_sublist_t *, mls); | |
513 | ||
514 | if (db != NULL) { | |
515 | multilist_sublist_remove(mls, db); | |
516 | multilist_sublist_unlock(mls); | |
517 | (void) refcount_remove_many(&dbuf_cache_size, | |
518 | db->db.db_size, db); | |
519 | dbuf_destroy(db); | |
520 | } else { | |
521 | multilist_sublist_unlock(mls); | |
522 | } | |
523 | (void) tsd_set(zfs_dbuf_evict_key, NULL); | |
524 | } | |
525 | ||
526 | /* | |
527 | * The dbuf evict thread is responsible for aging out dbufs from the | |
528 | * cache. Once the cache has reached it's maximum size, dbufs are removed | |
529 | * and destroyed. The eviction thread will continue running until the size | |
530 | * of the dbuf cache is at or below the maximum size. Once the dbuf is aged | |
531 | * out of the cache it is destroyed and becomes eligible for arc eviction. | |
532 | */ | |
533 | static void | |
c25b8f99 | 534 | dbuf_evict_thread(void *unused) |
d3c2ae1c GW |
535 | { |
536 | callb_cpr_t cpr; | |
537 | ||
538 | CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); | |
539 | ||
540 | mutex_enter(&dbuf_evict_lock); | |
541 | while (!dbuf_evict_thread_exit) { | |
542 | while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { | |
543 | CALLB_CPR_SAFE_BEGIN(&cpr); | |
544 | (void) cv_timedwait_sig_hires(&dbuf_evict_cv, | |
545 | &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); | |
546 | CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); | |
547 | } | |
548 | mutex_exit(&dbuf_evict_lock); | |
549 | ||
550 | /* | |
551 | * Keep evicting as long as we're above the low water mark | |
552 | * for the cache. We do this without holding the locks to | |
553 | * minimize lock contention. | |
554 | */ | |
555 | while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { | |
556 | dbuf_evict_one(); | |
557 | } | |
558 | ||
559 | mutex_enter(&dbuf_evict_lock); | |
560 | } | |
561 | ||
562 | dbuf_evict_thread_exit = B_FALSE; | |
563 | cv_broadcast(&dbuf_evict_cv); | |
564 | CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ | |
565 | thread_exit(); | |
566 | } | |
567 | ||
568 | /* | |
569 | * Wake up the dbuf eviction thread if the dbuf cache is at its max size. | |
570 | * If the dbuf cache is at its high water mark, then evict a dbuf from the | |
571 | * dbuf cache using the callers context. | |
572 | */ | |
573 | static void | |
574 | dbuf_evict_notify(void) | |
575 | { | |
576 | ||
577 | /* | |
578 | * We use thread specific data to track when a thread has | |
579 | * started processing evictions. This allows us to avoid deeply | |
580 | * nested stacks that would have a call flow similar to this: | |
581 | * | |
582 | * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() | |
583 | * ^ | | |
584 | * | | | |
585 | * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ | |
586 | * | |
587 | * The dbuf_eviction_thread will always have its tsd set until | |
588 | * that thread exits. All other threads will only set their tsd | |
589 | * if they are participating in the eviction process. This only | |
590 | * happens if the eviction thread is unable to process evictions | |
591 | * fast enough. To keep the dbuf cache size in check, other threads | |
592 | * can evict from the dbuf cache directly. Those threads will set | |
593 | * their tsd values so that we ensure that they only evict one dbuf | |
594 | * from the dbuf cache. | |
595 | */ | |
596 | if (tsd_get(zfs_dbuf_evict_key) != NULL) | |
597 | return; | |
598 | ||
38240ebd MA |
599 | /* |
600 | * We check if we should evict without holding the dbuf_evict_lock, | |
601 | * because it's OK to occasionally make the wrong decision here, | |
602 | * and grabbing the lock results in massive lock contention. | |
603 | */ | |
d3c2ae1c | 604 | if (refcount_count(&dbuf_cache_size) > dbuf_cache_max_bytes) { |
38240ebd | 605 | if (dbuf_cache_above_hiwater()) |
d3c2ae1c | 606 | dbuf_evict_one(); |
38240ebd | 607 | cv_signal(&dbuf_evict_cv); |
d3c2ae1c | 608 | } |
34dc7c2f BB |
609 | } |
610 | ||
d3c2ae1c GW |
611 | |
612 | ||
34dc7c2f BB |
613 | void |
614 | dbuf_init(void) | |
615 | { | |
616 | uint64_t hsize = 1ULL << 16; | |
617 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
618 | int i; | |
619 | ||
620 | /* | |
621 | * The hash table is big enough to fill all of physical memory | |
69de3421 TC |
622 | * with an average block size of zfs_arc_average_blocksize (default 8K). |
623 | * By default, the table will take up | |
624 | * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). | |
34dc7c2f | 625 | */ |
69de3421 | 626 | while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE) |
34dc7c2f BB |
627 | hsize <<= 1; |
628 | ||
629 | retry: | |
630 | h->hash_table_mask = hsize - 1; | |
00b46022 | 631 | #if defined(_KERNEL) && defined(HAVE_SPL) |
d1d7e268 MK |
632 | /* |
633 | * Large allocations which do not require contiguous pages | |
634 | * should be using vmem_alloc() in the linux kernel | |
635 | */ | |
79c76d5b | 636 | h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP); |
00b46022 | 637 | #else |
34dc7c2f | 638 | h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); |
00b46022 | 639 | #endif |
34dc7c2f BB |
640 | if (h->hash_table == NULL) { |
641 | /* XXX - we should really return an error instead of assert */ | |
642 | ASSERT(hsize > (1ULL << 10)); | |
643 | hsize >>= 1; | |
644 | goto retry; | |
645 | } | |
646 | ||
d3c2ae1c | 647 | dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", |
34dc7c2f BB |
648 | sizeof (dmu_buf_impl_t), |
649 | 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); | |
650 | ||
651 | for (i = 0; i < DBUF_MUTEXES; i++) | |
40d06e3c | 652 | mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); |
e0b0ca98 BB |
653 | |
654 | dbuf_stats_init(h); | |
0c66c32d | 655 | |
d3c2ae1c GW |
656 | /* |
657 | * Setup the parameters for the dbuf cache. We cap the size of the | |
658 | * dbuf cache to 1/32nd (default) of the size of the ARC. | |
659 | */ | |
660 | dbuf_cache_max_bytes = MIN(dbuf_cache_max_bytes, | |
661 | arc_max_bytes() >> dbuf_cache_max_shift); | |
662 | ||
0c66c32d JG |
663 | /* |
664 | * All entries are queued via taskq_dispatch_ent(), so min/maxalloc | |
665 | * configuration is not required. | |
666 | */ | |
1229323d | 667 | dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0); |
d3c2ae1c | 668 | |
64fc7762 | 669 | dbuf_cache = multilist_create(sizeof (dmu_buf_impl_t), |
d3c2ae1c | 670 | offsetof(dmu_buf_impl_t, db_cache_link), |
d3c2ae1c GW |
671 | dbuf_cache_multilist_index_func); |
672 | refcount_create(&dbuf_cache_size); | |
673 | ||
674 | tsd_create(&zfs_dbuf_evict_key, NULL); | |
675 | dbuf_evict_thread_exit = B_FALSE; | |
676 | mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); | |
677 | cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); | |
678 | dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, | |
679 | NULL, 0, &p0, TS_RUN, minclsyspri); | |
34dc7c2f BB |
680 | } |
681 | ||
682 | void | |
683 | dbuf_fini(void) | |
684 | { | |
685 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
686 | int i; | |
687 | ||
e0b0ca98 BB |
688 | dbuf_stats_destroy(); |
689 | ||
34dc7c2f BB |
690 | for (i = 0; i < DBUF_MUTEXES; i++) |
691 | mutex_destroy(&h->hash_mutexes[i]); | |
00b46022 | 692 | #if defined(_KERNEL) && defined(HAVE_SPL) |
d1d7e268 MK |
693 | /* |
694 | * Large allocations which do not require contiguous pages | |
695 | * should be using vmem_free() in the linux kernel | |
696 | */ | |
00b46022 BB |
697 | vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); |
698 | #else | |
34dc7c2f | 699 | kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); |
00b46022 | 700 | #endif |
d3c2ae1c | 701 | kmem_cache_destroy(dbuf_kmem_cache); |
0c66c32d | 702 | taskq_destroy(dbu_evict_taskq); |
d3c2ae1c GW |
703 | |
704 | mutex_enter(&dbuf_evict_lock); | |
705 | dbuf_evict_thread_exit = B_TRUE; | |
706 | while (dbuf_evict_thread_exit) { | |
707 | cv_signal(&dbuf_evict_cv); | |
708 | cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); | |
709 | } | |
710 | mutex_exit(&dbuf_evict_lock); | |
711 | tsd_destroy(&zfs_dbuf_evict_key); | |
712 | ||
713 | mutex_destroy(&dbuf_evict_lock); | |
714 | cv_destroy(&dbuf_evict_cv); | |
715 | ||
716 | refcount_destroy(&dbuf_cache_size); | |
64fc7762 | 717 | multilist_destroy(dbuf_cache); |
34dc7c2f BB |
718 | } |
719 | ||
720 | /* | |
721 | * Other stuff. | |
722 | */ | |
723 | ||
724 | #ifdef ZFS_DEBUG | |
725 | static void | |
726 | dbuf_verify(dmu_buf_impl_t *db) | |
727 | { | |
572e2857 | 728 | dnode_t *dn; |
428870ff | 729 | dbuf_dirty_record_t *dr; |
34dc7c2f BB |
730 | |
731 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
732 | ||
733 | if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) | |
734 | return; | |
735 | ||
736 | ASSERT(db->db_objset != NULL); | |
572e2857 BB |
737 | DB_DNODE_ENTER(db); |
738 | dn = DB_DNODE(db); | |
34dc7c2f BB |
739 | if (dn == NULL) { |
740 | ASSERT(db->db_parent == NULL); | |
741 | ASSERT(db->db_blkptr == NULL); | |
742 | } else { | |
743 | ASSERT3U(db->db.db_object, ==, dn->dn_object); | |
744 | ASSERT3P(db->db_objset, ==, dn->dn_objset); | |
745 | ASSERT3U(db->db_level, <, dn->dn_nlevels); | |
572e2857 BB |
746 | ASSERT(db->db_blkid == DMU_BONUS_BLKID || |
747 | db->db_blkid == DMU_SPILL_BLKID || | |
8951cb8d | 748 | !avl_is_empty(&dn->dn_dbufs)); |
34dc7c2f | 749 | } |
428870ff BB |
750 | if (db->db_blkid == DMU_BONUS_BLKID) { |
751 | ASSERT(dn != NULL); | |
752 | ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); | |
753 | ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); | |
754 | } else if (db->db_blkid == DMU_SPILL_BLKID) { | |
34dc7c2f | 755 | ASSERT(dn != NULL); |
c99c9001 | 756 | ASSERT0(db->db.db_offset); |
34dc7c2f BB |
757 | } else { |
758 | ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); | |
759 | } | |
760 | ||
428870ff BB |
761 | for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) |
762 | ASSERT(dr->dr_dbuf == db); | |
763 | ||
764 | for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) | |
765 | ASSERT(dr->dr_dbuf == db); | |
766 | ||
b128c09f BB |
767 | /* |
768 | * We can't assert that db_size matches dn_datablksz because it | |
769 | * can be momentarily different when another thread is doing | |
770 | * dnode_set_blksz(). | |
771 | */ | |
772 | if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { | |
428870ff | 773 | dr = db->db_data_pending; |
b128c09f BB |
774 | /* |
775 | * It should only be modified in syncing context, so | |
776 | * make sure we only have one copy of the data. | |
777 | */ | |
778 | ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); | |
34dc7c2f BB |
779 | } |
780 | ||
781 | /* verify db->db_blkptr */ | |
782 | if (db->db_blkptr) { | |
783 | if (db->db_parent == dn->dn_dbuf) { | |
784 | /* db is pointed to by the dnode */ | |
785 | /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ | |
9babb374 | 786 | if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) |
34dc7c2f BB |
787 | ASSERT(db->db_parent == NULL); |
788 | else | |
789 | ASSERT(db->db_parent != NULL); | |
428870ff BB |
790 | if (db->db_blkid != DMU_SPILL_BLKID) |
791 | ASSERT3P(db->db_blkptr, ==, | |
792 | &dn->dn_phys->dn_blkptr[db->db_blkid]); | |
34dc7c2f BB |
793 | } else { |
794 | /* db is pointed to by an indirect block */ | |
1fde1e37 | 795 | ASSERTV(int epb = db->db_parent->db.db_size >> |
02730c33 | 796 | SPA_BLKPTRSHIFT); |
34dc7c2f BB |
797 | ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); |
798 | ASSERT3U(db->db_parent->db.db_object, ==, | |
799 | db->db.db_object); | |
800 | /* | |
801 | * dnode_grow_indblksz() can make this fail if we don't | |
802 | * have the struct_rwlock. XXX indblksz no longer | |
803 | * grows. safe to do this now? | |
804 | */ | |
572e2857 | 805 | if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { |
34dc7c2f BB |
806 | ASSERT3P(db->db_blkptr, ==, |
807 | ((blkptr_t *)db->db_parent->db.db_data + | |
808 | db->db_blkid % epb)); | |
809 | } | |
810 | } | |
811 | } | |
812 | if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && | |
428870ff BB |
813 | (db->db_buf == NULL || db->db_buf->b_data) && |
814 | db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && | |
34dc7c2f BB |
815 | db->db_state != DB_FILL && !dn->dn_free_txg) { |
816 | /* | |
817 | * If the blkptr isn't set but they have nonzero data, | |
818 | * it had better be dirty, otherwise we'll lose that | |
819 | * data when we evict this buffer. | |
bc77ba73 PD |
820 | * |
821 | * There is an exception to this rule for indirect blocks; in | |
822 | * this case, if the indirect block is a hole, we fill in a few | |
823 | * fields on each of the child blocks (importantly, birth time) | |
824 | * to prevent hole birth times from being lost when you | |
825 | * partially fill in a hole. | |
34dc7c2f BB |
826 | */ |
827 | if (db->db_dirtycnt == 0) { | |
bc77ba73 PD |
828 | if (db->db_level == 0) { |
829 | uint64_t *buf = db->db.db_data; | |
830 | int i; | |
34dc7c2f | 831 | |
bc77ba73 PD |
832 | for (i = 0; i < db->db.db_size >> 3; i++) { |
833 | ASSERT(buf[i] == 0); | |
834 | } | |
835 | } else { | |
836 | int i; | |
837 | blkptr_t *bps = db->db.db_data; | |
838 | ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, | |
839 | db->db.db_size); | |
840 | /* | |
841 | * We want to verify that all the blkptrs in the | |
842 | * indirect block are holes, but we may have | |
843 | * automatically set up a few fields for them. | |
844 | * We iterate through each blkptr and verify | |
845 | * they only have those fields set. | |
846 | */ | |
847 | for (i = 0; | |
848 | i < db->db.db_size / sizeof (blkptr_t); | |
849 | i++) { | |
850 | blkptr_t *bp = &bps[i]; | |
851 | ASSERT(ZIO_CHECKSUM_IS_ZERO( | |
852 | &bp->blk_cksum)); | |
853 | ASSERT( | |
854 | DVA_IS_EMPTY(&bp->blk_dva[0]) && | |
855 | DVA_IS_EMPTY(&bp->blk_dva[1]) && | |
856 | DVA_IS_EMPTY(&bp->blk_dva[2])); | |
857 | ASSERT0(bp->blk_fill); | |
858 | ASSERT0(bp->blk_pad[0]); | |
859 | ASSERT0(bp->blk_pad[1]); | |
860 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
861 | ASSERT(BP_IS_HOLE(bp)); | |
862 | ASSERT0(bp->blk_phys_birth); | |
863 | } | |
34dc7c2f BB |
864 | } |
865 | } | |
866 | } | |
572e2857 | 867 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
868 | } |
869 | #endif | |
870 | ||
0c66c32d JG |
871 | static void |
872 | dbuf_clear_data(dmu_buf_impl_t *db) | |
873 | { | |
874 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
875 | dbuf_evict_user(db); | |
d3c2ae1c | 876 | ASSERT3P(db->db_buf, ==, NULL); |
0c66c32d JG |
877 | db->db.db_data = NULL; |
878 | if (db->db_state != DB_NOFILL) | |
879 | db->db_state = DB_UNCACHED; | |
880 | } | |
881 | ||
34dc7c2f BB |
882 | static void |
883 | dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) | |
884 | { | |
885 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
0c66c32d JG |
886 | ASSERT(buf != NULL); |
887 | ||
34dc7c2f | 888 | db->db_buf = buf; |
0c66c32d JG |
889 | ASSERT(buf->b_data != NULL); |
890 | db->db.db_data = buf->b_data; | |
34dc7c2f BB |
891 | } |
892 | ||
428870ff BB |
893 | /* |
894 | * Loan out an arc_buf for read. Return the loaned arc_buf. | |
895 | */ | |
896 | arc_buf_t * | |
897 | dbuf_loan_arcbuf(dmu_buf_impl_t *db) | |
898 | { | |
899 | arc_buf_t *abuf; | |
900 | ||
d3c2ae1c | 901 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
428870ff BB |
902 | mutex_enter(&db->db_mtx); |
903 | if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { | |
904 | int blksz = db->db.db_size; | |
b0bc7a84 | 905 | spa_t *spa = db->db_objset->os_spa; |
572e2857 | 906 | |
428870ff | 907 | mutex_exit(&db->db_mtx); |
2aa34383 | 908 | abuf = arc_loan_buf(spa, B_FALSE, blksz); |
428870ff BB |
909 | bcopy(db->db.db_data, abuf->b_data, blksz); |
910 | } else { | |
911 | abuf = db->db_buf; | |
912 | arc_loan_inuse_buf(abuf, db); | |
d3c2ae1c | 913 | db->db_buf = NULL; |
0c66c32d | 914 | dbuf_clear_data(db); |
428870ff BB |
915 | mutex_exit(&db->db_mtx); |
916 | } | |
917 | return (abuf); | |
918 | } | |
919 | ||
fcff0f35 PD |
920 | /* |
921 | * Calculate which level n block references the data at the level 0 offset | |
922 | * provided. | |
923 | */ | |
34dc7c2f | 924 | uint64_t |
031d7c2f | 925 | dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset) |
34dc7c2f | 926 | { |
fcff0f35 PD |
927 | if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { |
928 | /* | |
929 | * The level n blkid is equal to the level 0 blkid divided by | |
930 | * the number of level 0s in a level n block. | |
931 | * | |
932 | * The level 0 blkid is offset >> datablkshift = | |
933 | * offset / 2^datablkshift. | |
934 | * | |
935 | * The number of level 0s in a level n is the number of block | |
936 | * pointers in an indirect block, raised to the power of level. | |
937 | * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = | |
938 | * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). | |
939 | * | |
940 | * Thus, the level n blkid is: offset / | |
941 | * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) | |
942 | * = offset / 2^(datablkshift + level * | |
943 | * (indblkshift - SPA_BLKPTRSHIFT)) | |
944 | * = offset >> (datablkshift + level * | |
945 | * (indblkshift - SPA_BLKPTRSHIFT)) | |
946 | */ | |
031d7c2f GN |
947 | |
948 | const unsigned exp = dn->dn_datablkshift + | |
949 | level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT); | |
950 | ||
951 | if (exp >= 8 * sizeof (offset)) { | |
952 | /* This only happens on the highest indirection level */ | |
953 | ASSERT3U(level, ==, dn->dn_nlevels - 1); | |
954 | return (0); | |
955 | } | |
956 | ||
957 | ASSERT3U(exp, <, 8 * sizeof (offset)); | |
958 | ||
959 | return (offset >> exp); | |
34dc7c2f BB |
960 | } else { |
961 | ASSERT3U(offset, <, dn->dn_datablksz); | |
962 | return (0); | |
963 | } | |
964 | } | |
965 | ||
966 | static void | |
967 | dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) | |
968 | { | |
969 | dmu_buf_impl_t *db = vdb; | |
970 | ||
971 | mutex_enter(&db->db_mtx); | |
972 | ASSERT3U(db->db_state, ==, DB_READ); | |
973 | /* | |
974 | * All reads are synchronous, so we must have a hold on the dbuf | |
975 | */ | |
976 | ASSERT(refcount_count(&db->db_holds) > 0); | |
977 | ASSERT(db->db_buf == NULL); | |
978 | ASSERT(db->db.db_data == NULL); | |
979 | if (db->db_level == 0 && db->db_freed_in_flight) { | |
980 | /* we were freed in flight; disregard any error */ | |
981 | arc_release(buf, db); | |
982 | bzero(buf->b_data, db->db.db_size); | |
983 | arc_buf_freeze(buf); | |
984 | db->db_freed_in_flight = FALSE; | |
985 | dbuf_set_data(db, buf); | |
986 | db->db_state = DB_CACHED; | |
987 | } else if (zio == NULL || zio->io_error == 0) { | |
988 | dbuf_set_data(db, buf); | |
989 | db->db_state = DB_CACHED; | |
990 | } else { | |
428870ff | 991 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f | 992 | ASSERT3P(db->db_buf, ==, NULL); |
d3c2ae1c | 993 | arc_buf_destroy(buf, db); |
34dc7c2f BB |
994 | db->db_state = DB_UNCACHED; |
995 | } | |
996 | cv_broadcast(&db->db_changed); | |
428870ff | 997 | dbuf_rele_and_unlock(db, NULL); |
34dc7c2f BB |
998 | } |
999 | ||
5f6d0b6f | 1000 | static int |
7f60329a | 1001 | dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) |
34dc7c2f | 1002 | { |
572e2857 | 1003 | dnode_t *dn; |
5dbd68a3 | 1004 | zbookmark_phys_t zb; |
2a432414 | 1005 | uint32_t aflags = ARC_FLAG_NOWAIT; |
5f6d0b6f | 1006 | int err; |
34dc7c2f | 1007 | |
572e2857 BB |
1008 | DB_DNODE_ENTER(db); |
1009 | dn = DB_DNODE(db); | |
34dc7c2f BB |
1010 | ASSERT(!refcount_is_zero(&db->db_holds)); |
1011 | /* We need the struct_rwlock to prevent db_blkptr from changing. */ | |
b128c09f | 1012 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); |
34dc7c2f BB |
1013 | ASSERT(MUTEX_HELD(&db->db_mtx)); |
1014 | ASSERT(db->db_state == DB_UNCACHED); | |
1015 | ASSERT(db->db_buf == NULL); | |
1016 | ||
428870ff | 1017 | if (db->db_blkid == DMU_BONUS_BLKID) { |
50c957f7 NB |
1018 | /* |
1019 | * The bonus length stored in the dnode may be less than | |
1020 | * the maximum available space in the bonus buffer. | |
1021 | */ | |
9babb374 | 1022 | int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); |
50c957f7 | 1023 | int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); |
34dc7c2f BB |
1024 | |
1025 | ASSERT3U(bonuslen, <=, db->db.db_size); | |
a3fd9d9e | 1026 | db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP); |
25458cbe | 1027 | arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); |
50c957f7 NB |
1028 | if (bonuslen < max_bonuslen) |
1029 | bzero(db->db.db_data, max_bonuslen); | |
9babb374 BB |
1030 | if (bonuslen) |
1031 | bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); | |
572e2857 | 1032 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
1033 | db->db_state = DB_CACHED; |
1034 | mutex_exit(&db->db_mtx); | |
5f6d0b6f | 1035 | return (0); |
34dc7c2f BB |
1036 | } |
1037 | ||
b128c09f BB |
1038 | /* |
1039 | * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() | |
1040 | * processes the delete record and clears the bp while we are waiting | |
1041 | * for the dn_mtx (resulting in a "no" from block_freed). | |
1042 | */ | |
1043 | if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || | |
1044 | (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || | |
1045 | BP_IS_HOLE(db->db_blkptr)))) { | |
34dc7c2f BB |
1046 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); |
1047 | ||
2aa34383 DK |
1048 | dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, |
1049 | db->db.db_size)); | |
34dc7c2f | 1050 | bzero(db->db.db_data, db->db.db_size); |
bc77ba73 PD |
1051 | |
1052 | if (db->db_blkptr != NULL && db->db_level > 0 && | |
1053 | BP_IS_HOLE(db->db_blkptr) && | |
1054 | db->db_blkptr->blk_birth != 0) { | |
1055 | blkptr_t *bps = db->db.db_data; | |
1056 | int i; | |
1057 | for (i = 0; i < ((1 << | |
1058 | DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); | |
1059 | i++) { | |
1060 | blkptr_t *bp = &bps[i]; | |
1061 | ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, | |
1062 | 1 << dn->dn_indblkshift); | |
1063 | BP_SET_LSIZE(bp, | |
1064 | BP_GET_LEVEL(db->db_blkptr) == 1 ? | |
1065 | dn->dn_datablksz : | |
1066 | BP_GET_LSIZE(db->db_blkptr)); | |
1067 | BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); | |
1068 | BP_SET_LEVEL(bp, | |
1069 | BP_GET_LEVEL(db->db_blkptr) - 1); | |
1070 | BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); | |
1071 | } | |
1072 | } | |
1073 | DB_DNODE_EXIT(db); | |
34dc7c2f | 1074 | db->db_state = DB_CACHED; |
34dc7c2f | 1075 | mutex_exit(&db->db_mtx); |
5f6d0b6f | 1076 | return (0); |
34dc7c2f BB |
1077 | } |
1078 | ||
572e2857 BB |
1079 | DB_DNODE_EXIT(db); |
1080 | ||
34dc7c2f BB |
1081 | db->db_state = DB_READ; |
1082 | mutex_exit(&db->db_mtx); | |
1083 | ||
b128c09f | 1084 | if (DBUF_IS_L2CACHEABLE(db)) |
2a432414 | 1085 | aflags |= ARC_FLAG_L2CACHE; |
b128c09f | 1086 | |
428870ff BB |
1087 | SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? |
1088 | db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, | |
1089 | db->db.db_object, db->db_level, db->db_blkid); | |
34dc7c2f BB |
1090 | |
1091 | dbuf_add_ref(db, NULL); | |
b128c09f | 1092 | |
5f6d0b6f | 1093 | err = arc_read(zio, db->db_objset->os_spa, db->db_blkptr, |
34dc7c2f | 1094 | dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, |
7f60329a | 1095 | (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, |
34dc7c2f | 1096 | &aflags, &zb); |
5f6d0b6f | 1097 | |
da8d5748 | 1098 | return (err); |
34dc7c2f BB |
1099 | } |
1100 | ||
2aa34383 DK |
1101 | /* |
1102 | * This is our just-in-time copy function. It makes a copy of buffers that | |
1103 | * have been modified in a previous transaction group before we access them in | |
1104 | * the current active group. | |
1105 | * | |
1106 | * This function is used in three places: when we are dirtying a buffer for the | |
1107 | * first time in a txg, when we are freeing a range in a dnode that includes | |
1108 | * this buffer, and when we are accessing a buffer which was received compressed | |
1109 | * and later referenced in a WRITE_BYREF record. | |
1110 | * | |
1111 | * Note that when we are called from dbuf_free_range() we do not put a hold on | |
1112 | * the buffer, we just traverse the active dbuf list for the dnode. | |
1113 | */ | |
1114 | static void | |
1115 | dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) | |
1116 | { | |
1117 | dbuf_dirty_record_t *dr = db->db_last_dirty; | |
1118 | ||
1119 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1120 | ASSERT(db->db.db_data != NULL); | |
1121 | ASSERT(db->db_level == 0); | |
1122 | ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); | |
1123 | ||
1124 | if (dr == NULL || | |
1125 | (dr->dt.dl.dr_data != | |
1126 | ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) | |
1127 | return; | |
1128 | ||
1129 | /* | |
1130 | * If the last dirty record for this dbuf has not yet synced | |
1131 | * and its referencing the dbuf data, either: | |
1132 | * reset the reference to point to a new copy, | |
1133 | * or (if there a no active holders) | |
1134 | * just null out the current db_data pointer. | |
1135 | */ | |
1136 | ASSERT(dr->dr_txg >= txg - 2); | |
1137 | if (db->db_blkid == DMU_BONUS_BLKID) { | |
2aa34383 DK |
1138 | dnode_t *dn = DB_DNODE(db); |
1139 | int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); | |
a3fd9d9e | 1140 | dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP); |
2aa34383 DK |
1141 | arc_space_consume(bonuslen, ARC_SPACE_BONUS); |
1142 | bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen); | |
1143 | } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { | |
1144 | int size = arc_buf_size(db->db_buf); | |
1145 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
1146 | spa_t *spa = db->db_objset->os_spa; | |
1147 | enum zio_compress compress_type = | |
1148 | arc_get_compression(db->db_buf); | |
1149 | ||
1150 | if (compress_type == ZIO_COMPRESS_OFF) { | |
1151 | dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); | |
1152 | } else { | |
1153 | ASSERT3U(type, ==, ARC_BUFC_DATA); | |
1154 | dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, | |
1155 | size, arc_buf_lsize(db->db_buf), compress_type); | |
1156 | } | |
1157 | bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); | |
1158 | } else { | |
1159 | db->db_buf = NULL; | |
1160 | dbuf_clear_data(db); | |
1161 | } | |
1162 | } | |
1163 | ||
34dc7c2f BB |
1164 | int |
1165 | dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) | |
1166 | { | |
1167 | int err = 0; | |
b0bc7a84 | 1168 | boolean_t prefetch; |
572e2857 | 1169 | dnode_t *dn; |
34dc7c2f BB |
1170 | |
1171 | /* | |
1172 | * We don't have to hold the mutex to check db_state because it | |
1173 | * can't be freed while we have a hold on the buffer. | |
1174 | */ | |
1175 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
1176 | ||
b128c09f | 1177 | if (db->db_state == DB_NOFILL) |
2e528b49 | 1178 | return (SET_ERROR(EIO)); |
b128c09f | 1179 | |
572e2857 BB |
1180 | DB_DNODE_ENTER(db); |
1181 | dn = DB_DNODE(db); | |
34dc7c2f | 1182 | if ((flags & DB_RF_HAVESTRUCT) == 0) |
572e2857 | 1183 | rw_enter(&dn->dn_struct_rwlock, RW_READER); |
34dc7c2f | 1184 | |
428870ff | 1185 | prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && |
572e2857 | 1186 | (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && |
b128c09f | 1187 | DBUF_IS_CACHEABLE(db); |
34dc7c2f BB |
1188 | |
1189 | mutex_enter(&db->db_mtx); | |
1190 | if (db->db_state == DB_CACHED) { | |
2aa34383 DK |
1191 | /* |
1192 | * If the arc buf is compressed, we need to decompress it to | |
1193 | * read the data. This could happen during the "zfs receive" of | |
1194 | * a stream which is compressed and deduplicated. | |
1195 | */ | |
1196 | if (db->db_buf != NULL && | |
1197 | arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF) { | |
1198 | dbuf_fix_old_data(db, | |
1199 | spa_syncing_txg(dmu_objset_spa(db->db_objset))); | |
1200 | err = arc_decompress(db->db_buf); | |
1201 | dbuf_set_data(db, db->db_buf); | |
1202 | } | |
34dc7c2f BB |
1203 | mutex_exit(&db->db_mtx); |
1204 | if (prefetch) | |
755065f3 | 1205 | dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); |
34dc7c2f | 1206 | if ((flags & DB_RF_HAVESTRUCT) == 0) |
572e2857 BB |
1207 | rw_exit(&dn->dn_struct_rwlock); |
1208 | DB_DNODE_EXIT(db); | |
34dc7c2f | 1209 | } else if (db->db_state == DB_UNCACHED) { |
572e2857 | 1210 | spa_t *spa = dn->dn_objset->os_spa; |
a0043383 | 1211 | boolean_t need_wait = B_FALSE; |
572e2857 | 1212 | |
b0319c1f | 1213 | if (zio == NULL && |
a0043383 | 1214 | db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { |
572e2857 | 1215 | zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); |
a0043383 MA |
1216 | need_wait = B_TRUE; |
1217 | } | |
7f60329a | 1218 | err = dbuf_read_impl(db, zio, flags); |
34dc7c2f BB |
1219 | |
1220 | /* dbuf_read_impl has dropped db_mtx for us */ | |
1221 | ||
5f6d0b6f | 1222 | if (!err && prefetch) |
755065f3 | 1223 | dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); |
34dc7c2f BB |
1224 | |
1225 | if ((flags & DB_RF_HAVESTRUCT) == 0) | |
572e2857 BB |
1226 | rw_exit(&dn->dn_struct_rwlock); |
1227 | DB_DNODE_EXIT(db); | |
34dc7c2f | 1228 | |
a0043383 | 1229 | if (!err && need_wait) |
34dc7c2f BB |
1230 | err = zio_wait(zio); |
1231 | } else { | |
e49f1e20 WA |
1232 | /* |
1233 | * Another reader came in while the dbuf was in flight | |
1234 | * between UNCACHED and CACHED. Either a writer will finish | |
1235 | * writing the buffer (sending the dbuf to CACHED) or the | |
1236 | * first reader's request will reach the read_done callback | |
1237 | * and send the dbuf to CACHED. Otherwise, a failure | |
1238 | * occurred and the dbuf went to UNCACHED. | |
1239 | */ | |
34dc7c2f BB |
1240 | mutex_exit(&db->db_mtx); |
1241 | if (prefetch) | |
755065f3 | 1242 | dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); |
34dc7c2f | 1243 | if ((flags & DB_RF_HAVESTRUCT) == 0) |
572e2857 BB |
1244 | rw_exit(&dn->dn_struct_rwlock); |
1245 | DB_DNODE_EXIT(db); | |
34dc7c2f | 1246 | |
e49f1e20 | 1247 | /* Skip the wait per the caller's request. */ |
34dc7c2f BB |
1248 | mutex_enter(&db->db_mtx); |
1249 | if ((flags & DB_RF_NEVERWAIT) == 0) { | |
1250 | while (db->db_state == DB_READ || | |
1251 | db->db_state == DB_FILL) { | |
1252 | ASSERT(db->db_state == DB_READ || | |
1253 | (flags & DB_RF_HAVESTRUCT) == 0); | |
64dbba36 AL |
1254 | DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, |
1255 | db, zio_t *, zio); | |
34dc7c2f BB |
1256 | cv_wait(&db->db_changed, &db->db_mtx); |
1257 | } | |
1258 | if (db->db_state == DB_UNCACHED) | |
2e528b49 | 1259 | err = SET_ERROR(EIO); |
34dc7c2f BB |
1260 | } |
1261 | mutex_exit(&db->db_mtx); | |
1262 | } | |
1263 | ||
34dc7c2f BB |
1264 | return (err); |
1265 | } | |
1266 | ||
1267 | static void | |
1268 | dbuf_noread(dmu_buf_impl_t *db) | |
1269 | { | |
1270 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
428870ff | 1271 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f BB |
1272 | mutex_enter(&db->db_mtx); |
1273 | while (db->db_state == DB_READ || db->db_state == DB_FILL) | |
1274 | cv_wait(&db->db_changed, &db->db_mtx); | |
1275 | if (db->db_state == DB_UNCACHED) { | |
1276 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
b0bc7a84 | 1277 | spa_t *spa = db->db_objset->os_spa; |
34dc7c2f BB |
1278 | |
1279 | ASSERT(db->db_buf == NULL); | |
1280 | ASSERT(db->db.db_data == NULL); | |
2aa34383 | 1281 | dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); |
34dc7c2f | 1282 | db->db_state = DB_FILL; |
b128c09f | 1283 | } else if (db->db_state == DB_NOFILL) { |
0c66c32d | 1284 | dbuf_clear_data(db); |
34dc7c2f BB |
1285 | } else { |
1286 | ASSERT3U(db->db_state, ==, DB_CACHED); | |
1287 | } | |
1288 | mutex_exit(&db->db_mtx); | |
1289 | } | |
1290 | ||
34dc7c2f BB |
1291 | void |
1292 | dbuf_unoverride(dbuf_dirty_record_t *dr) | |
1293 | { | |
1294 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
428870ff | 1295 | blkptr_t *bp = &dr->dt.dl.dr_overridden_by; |
34dc7c2f BB |
1296 | uint64_t txg = dr->dr_txg; |
1297 | ||
1298 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
00710365 AS |
1299 | /* |
1300 | * This assert is valid because dmu_sync() expects to be called by | |
1301 | * a zilog's get_data while holding a range lock. This call only | |
1302 | * comes from dbuf_dirty() callers who must also hold a range lock. | |
1303 | */ | |
34dc7c2f BB |
1304 | ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); |
1305 | ASSERT(db->db_level == 0); | |
1306 | ||
428870ff | 1307 | if (db->db_blkid == DMU_BONUS_BLKID || |
34dc7c2f BB |
1308 | dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) |
1309 | return; | |
1310 | ||
428870ff BB |
1311 | ASSERT(db->db_data_pending != dr); |
1312 | ||
34dc7c2f | 1313 | /* free this block */ |
b0bc7a84 MG |
1314 | if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) |
1315 | zio_free(db->db_objset->os_spa, txg, bp); | |
428870ff | 1316 | |
34dc7c2f | 1317 | dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; |
03c6040b GW |
1318 | dr->dt.dl.dr_nopwrite = B_FALSE; |
1319 | ||
34dc7c2f BB |
1320 | /* |
1321 | * Release the already-written buffer, so we leave it in | |
1322 | * a consistent dirty state. Note that all callers are | |
1323 | * modifying the buffer, so they will immediately do | |
1324 | * another (redundant) arc_release(). Therefore, leave | |
1325 | * the buf thawed to save the effort of freezing & | |
1326 | * immediately re-thawing it. | |
1327 | */ | |
1328 | arc_release(dr->dt.dl.dr_data, db); | |
1329 | } | |
1330 | ||
b128c09f BB |
1331 | /* |
1332 | * Evict (if its unreferenced) or clear (if its referenced) any level-0 | |
1333 | * data blocks in the free range, so that any future readers will find | |
b0bc7a84 | 1334 | * empty blocks. |
b128c09f | 1335 | */ |
34dc7c2f | 1336 | void |
8951cb8d AR |
1337 | dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, |
1338 | dmu_tx_t *tx) | |
34dc7c2f | 1339 | { |
0c66c32d JG |
1340 | dmu_buf_impl_t *db_search; |
1341 | dmu_buf_impl_t *db, *db_next; | |
34dc7c2f | 1342 | uint64_t txg = tx->tx_txg; |
8951cb8d | 1343 | avl_index_t where; |
8951cb8d | 1344 | |
9c9531cb GM |
1345 | if (end_blkid > dn->dn_maxblkid && |
1346 | !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) | |
8951cb8d AR |
1347 | end_blkid = dn->dn_maxblkid; |
1348 | dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); | |
34dc7c2f | 1349 | |
0c66c32d | 1350 | db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP); |
8951cb8d AR |
1351 | db_search->db_level = 0; |
1352 | db_search->db_blkid = start_blkid; | |
9925c28c | 1353 | db_search->db_state = DB_SEARCH; |
ea97f8ce | 1354 | |
b663a23d | 1355 | mutex_enter(&dn->dn_dbufs_mtx); |
8951cb8d AR |
1356 | db = avl_find(&dn->dn_dbufs, db_search, &where); |
1357 | ASSERT3P(db, ==, NULL); | |
9c9531cb | 1358 | |
8951cb8d AR |
1359 | db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); |
1360 | ||
1361 | for (; db != NULL; db = db_next) { | |
1362 | db_next = AVL_NEXT(&dn->dn_dbufs, db); | |
428870ff | 1363 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
b128c09f | 1364 | |
8951cb8d AR |
1365 | if (db->db_level != 0 || db->db_blkid > end_blkid) { |
1366 | break; | |
1367 | } | |
1368 | ASSERT3U(db->db_blkid, >=, start_blkid); | |
34dc7c2f BB |
1369 | |
1370 | /* found a level 0 buffer in the range */ | |
13fe0198 MA |
1371 | mutex_enter(&db->db_mtx); |
1372 | if (dbuf_undirty(db, tx)) { | |
1373 | /* mutex has been dropped and dbuf destroyed */ | |
34dc7c2f | 1374 | continue; |
13fe0198 | 1375 | } |
34dc7c2f | 1376 | |
34dc7c2f | 1377 | if (db->db_state == DB_UNCACHED || |
b128c09f | 1378 | db->db_state == DB_NOFILL || |
34dc7c2f BB |
1379 | db->db_state == DB_EVICTING) { |
1380 | ASSERT(db->db.db_data == NULL); | |
1381 | mutex_exit(&db->db_mtx); | |
1382 | continue; | |
1383 | } | |
1384 | if (db->db_state == DB_READ || db->db_state == DB_FILL) { | |
1385 | /* will be handled in dbuf_read_done or dbuf_rele */ | |
1386 | db->db_freed_in_flight = TRUE; | |
1387 | mutex_exit(&db->db_mtx); | |
1388 | continue; | |
1389 | } | |
1390 | if (refcount_count(&db->db_holds) == 0) { | |
1391 | ASSERT(db->db_buf); | |
d3c2ae1c | 1392 | dbuf_destroy(db); |
34dc7c2f BB |
1393 | continue; |
1394 | } | |
1395 | /* The dbuf is referenced */ | |
1396 | ||
1397 | if (db->db_last_dirty != NULL) { | |
1398 | dbuf_dirty_record_t *dr = db->db_last_dirty; | |
1399 | ||
1400 | if (dr->dr_txg == txg) { | |
1401 | /* | |
1402 | * This buffer is "in-use", re-adjust the file | |
1403 | * size to reflect that this buffer may | |
1404 | * contain new data when we sync. | |
1405 | */ | |
428870ff BB |
1406 | if (db->db_blkid != DMU_SPILL_BLKID && |
1407 | db->db_blkid > dn->dn_maxblkid) | |
34dc7c2f BB |
1408 | dn->dn_maxblkid = db->db_blkid; |
1409 | dbuf_unoverride(dr); | |
1410 | } else { | |
1411 | /* | |
1412 | * This dbuf is not dirty in the open context. | |
1413 | * Either uncache it (if its not referenced in | |
1414 | * the open context) or reset its contents to | |
1415 | * empty. | |
1416 | */ | |
1417 | dbuf_fix_old_data(db, txg); | |
1418 | } | |
1419 | } | |
1420 | /* clear the contents if its cached */ | |
1421 | if (db->db_state == DB_CACHED) { | |
1422 | ASSERT(db->db.db_data != NULL); | |
1423 | arc_release(db->db_buf, db); | |
1424 | bzero(db->db.db_data, db->db.db_size); | |
1425 | arc_buf_freeze(db->db_buf); | |
1426 | } | |
1427 | ||
1428 | mutex_exit(&db->db_mtx); | |
1429 | } | |
8951cb8d | 1430 | |
8951cb8d | 1431 | kmem_free(db_search, sizeof (dmu_buf_impl_t)); |
34dc7c2f BB |
1432 | mutex_exit(&dn->dn_dbufs_mtx); |
1433 | } | |
1434 | ||
34dc7c2f BB |
1435 | void |
1436 | dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) | |
1437 | { | |
1438 | arc_buf_t *buf, *obuf; | |
1439 | int osize = db->db.db_size; | |
1440 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
572e2857 | 1441 | dnode_t *dn; |
34dc7c2f | 1442 | |
428870ff | 1443 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f | 1444 | |
572e2857 BB |
1445 | DB_DNODE_ENTER(db); |
1446 | dn = DB_DNODE(db); | |
1447 | ||
34dc7c2f | 1448 | /* XXX does *this* func really need the lock? */ |
572e2857 | 1449 | ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); |
34dc7c2f BB |
1450 | |
1451 | /* | |
b0bc7a84 | 1452 | * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held |
34dc7c2f BB |
1453 | * is OK, because there can be no other references to the db |
1454 | * when we are changing its size, so no concurrent DB_FILL can | |
1455 | * be happening. | |
1456 | */ | |
1457 | /* | |
1458 | * XXX we should be doing a dbuf_read, checking the return | |
1459 | * value and returning that up to our callers | |
1460 | */ | |
b0bc7a84 | 1461 | dmu_buf_will_dirty(&db->db, tx); |
34dc7c2f BB |
1462 | |
1463 | /* create the data buffer for the new block */ | |
2aa34383 | 1464 | buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); |
34dc7c2f BB |
1465 | |
1466 | /* copy old block data to the new block */ | |
1467 | obuf = db->db_buf; | |
1468 | bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); | |
1469 | /* zero the remainder */ | |
1470 | if (size > osize) | |
1471 | bzero((uint8_t *)buf->b_data + osize, size - osize); | |
1472 | ||
1473 | mutex_enter(&db->db_mtx); | |
1474 | dbuf_set_data(db, buf); | |
d3c2ae1c | 1475 | arc_buf_destroy(obuf, db); |
34dc7c2f BB |
1476 | db->db.db_size = size; |
1477 | ||
1478 | if (db->db_level == 0) { | |
1479 | ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); | |
1480 | db->db_last_dirty->dt.dl.dr_data = buf; | |
1481 | } | |
1482 | mutex_exit(&db->db_mtx); | |
1483 | ||
3ec3bc21 | 1484 | dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); |
572e2857 | 1485 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
1486 | } |
1487 | ||
428870ff BB |
1488 | void |
1489 | dbuf_release_bp(dmu_buf_impl_t *db) | |
1490 | { | |
b0bc7a84 | 1491 | ASSERTV(objset_t *os = db->db_objset); |
428870ff BB |
1492 | |
1493 | ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); | |
1494 | ASSERT(arc_released(os->os_phys_buf) || | |
1495 | list_link_active(&os->os_dsl_dataset->ds_synced_link)); | |
1496 | ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); | |
1497 | ||
294f6806 | 1498 | (void) arc_release(db->db_buf, db); |
428870ff BB |
1499 | } |
1500 | ||
5a28a973 MA |
1501 | /* |
1502 | * We already have a dirty record for this TXG, and we are being | |
1503 | * dirtied again. | |
1504 | */ | |
1505 | static void | |
1506 | dbuf_redirty(dbuf_dirty_record_t *dr) | |
1507 | { | |
1508 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
1509 | ||
1510 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1511 | ||
1512 | if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { | |
1513 | /* | |
1514 | * If this buffer has already been written out, | |
1515 | * we now need to reset its state. | |
1516 | */ | |
1517 | dbuf_unoverride(dr); | |
1518 | if (db->db.db_object != DMU_META_DNODE_OBJECT && | |
1519 | db->db_state != DB_NOFILL) { | |
1520 | /* Already released on initial dirty, so just thaw. */ | |
1521 | ASSERT(arc_released(db->db_buf)); | |
1522 | arc_buf_thaw(db->db_buf); | |
1523 | } | |
1524 | } | |
1525 | } | |
1526 | ||
34dc7c2f BB |
1527 | dbuf_dirty_record_t * |
1528 | dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) | |
1529 | { | |
572e2857 BB |
1530 | dnode_t *dn; |
1531 | objset_t *os; | |
34dc7c2f BB |
1532 | dbuf_dirty_record_t **drp, *dr; |
1533 | int drop_struct_lock = FALSE; | |
1534 | int txgoff = tx->tx_txg & TXG_MASK; | |
1535 | ||
1536 | ASSERT(tx->tx_txg != 0); | |
1537 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
1538 | DMU_TX_DIRTY_BUF(tx, db); | |
1539 | ||
572e2857 BB |
1540 | DB_DNODE_ENTER(db); |
1541 | dn = DB_DNODE(db); | |
34dc7c2f BB |
1542 | /* |
1543 | * Shouldn't dirty a regular buffer in syncing context. Private | |
1544 | * objects may be dirtied in syncing context, but only if they | |
1545 | * were already pre-dirtied in open context. | |
34dc7c2f | 1546 | */ |
cc9bb3e5 GM |
1547 | #ifdef DEBUG |
1548 | if (dn->dn_objset->os_dsl_dataset != NULL) { | |
1549 | rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, | |
1550 | RW_READER, FTAG); | |
1551 | } | |
34dc7c2f BB |
1552 | ASSERT(!dmu_tx_is_syncing(tx) || |
1553 | BP_IS_HOLE(dn->dn_objset->os_rootbp) || | |
9babb374 BB |
1554 | DMU_OBJECT_IS_SPECIAL(dn->dn_object) || |
1555 | dn->dn_objset->os_dsl_dataset == NULL); | |
cc9bb3e5 GM |
1556 | if (dn->dn_objset->os_dsl_dataset != NULL) |
1557 | rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); | |
1558 | #endif | |
34dc7c2f BB |
1559 | /* |
1560 | * We make this assert for private objects as well, but after we | |
1561 | * check if we're already dirty. They are allowed to re-dirty | |
1562 | * in syncing context. | |
1563 | */ | |
1564 | ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || | |
1565 | dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == | |
1566 | (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); | |
1567 | ||
1568 | mutex_enter(&db->db_mtx); | |
1569 | /* | |
1570 | * XXX make this true for indirects too? The problem is that | |
1571 | * transactions created with dmu_tx_create_assigned() from | |
1572 | * syncing context don't bother holding ahead. | |
1573 | */ | |
1574 | ASSERT(db->db_level != 0 || | |
b128c09f BB |
1575 | db->db_state == DB_CACHED || db->db_state == DB_FILL || |
1576 | db->db_state == DB_NOFILL); | |
34dc7c2f BB |
1577 | |
1578 | mutex_enter(&dn->dn_mtx); | |
1579 | /* | |
1580 | * Don't set dirtyctx to SYNC if we're just modifying this as we | |
1581 | * initialize the objset. | |
1582 | */ | |
cc9bb3e5 GM |
1583 | if (dn->dn_dirtyctx == DN_UNDIRTIED) { |
1584 | if (dn->dn_objset->os_dsl_dataset != NULL) { | |
1585 | rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, | |
1586 | RW_READER, FTAG); | |
1587 | } | |
1588 | if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) { | |
1589 | dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? | |
1590 | DN_DIRTY_SYNC : DN_DIRTY_OPEN); | |
1591 | ASSERT(dn->dn_dirtyctx_firstset == NULL); | |
1592 | dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); | |
1593 | } | |
1594 | if (dn->dn_objset->os_dsl_dataset != NULL) { | |
1595 | rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, | |
1596 | FTAG); | |
1597 | } | |
34dc7c2f BB |
1598 | } |
1599 | mutex_exit(&dn->dn_mtx); | |
1600 | ||
428870ff BB |
1601 | if (db->db_blkid == DMU_SPILL_BLKID) |
1602 | dn->dn_have_spill = B_TRUE; | |
1603 | ||
34dc7c2f BB |
1604 | /* |
1605 | * If this buffer is already dirty, we're done. | |
1606 | */ | |
1607 | drp = &db->db_last_dirty; | |
1608 | ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || | |
1609 | db->db.db_object == DMU_META_DNODE_OBJECT); | |
1610 | while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) | |
1611 | drp = &dr->dr_next; | |
1612 | if (dr && dr->dr_txg == tx->tx_txg) { | |
572e2857 BB |
1613 | DB_DNODE_EXIT(db); |
1614 | ||
5a28a973 | 1615 | dbuf_redirty(dr); |
34dc7c2f BB |
1616 | mutex_exit(&db->db_mtx); |
1617 | return (dr); | |
1618 | } | |
1619 | ||
1620 | /* | |
1621 | * Only valid if not already dirty. | |
1622 | */ | |
9babb374 BB |
1623 | ASSERT(dn->dn_object == 0 || |
1624 | dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == | |
34dc7c2f BB |
1625 | (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); |
1626 | ||
1627 | ASSERT3U(dn->dn_nlevels, >, db->db_level); | |
34dc7c2f BB |
1628 | |
1629 | /* | |
1630 | * We should only be dirtying in syncing context if it's the | |
9babb374 BB |
1631 | * mos or we're initializing the os or it's a special object. |
1632 | * However, we are allowed to dirty in syncing context provided | |
1633 | * we already dirtied it in open context. Hence we must make | |
1634 | * this assertion only if we're not already dirty. | |
34dc7c2f | 1635 | */ |
572e2857 | 1636 | os = dn->dn_objset; |
3b7f360c | 1637 | VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); |
cc9bb3e5 GM |
1638 | #ifdef DEBUG |
1639 | if (dn->dn_objset->os_dsl_dataset != NULL) | |
1640 | rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); | |
9babb374 BB |
1641 | ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || |
1642 | os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); | |
cc9bb3e5 GM |
1643 | if (dn->dn_objset->os_dsl_dataset != NULL) |
1644 | rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); | |
1645 | #endif | |
34dc7c2f BB |
1646 | ASSERT(db->db.db_size != 0); |
1647 | ||
1648 | dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); | |
1649 | ||
428870ff | 1650 | if (db->db_blkid != DMU_BONUS_BLKID) { |
3ec3bc21 | 1651 | dmu_objset_willuse_space(os, db->db.db_size, tx); |
34dc7c2f BB |
1652 | } |
1653 | ||
1654 | /* | |
1655 | * If this buffer is dirty in an old transaction group we need | |
1656 | * to make a copy of it so that the changes we make in this | |
1657 | * transaction group won't leak out when we sync the older txg. | |
1658 | */ | |
79c76d5b | 1659 | dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); |
98f72a53 | 1660 | list_link_init(&dr->dr_dirty_node); |
34dc7c2f BB |
1661 | if (db->db_level == 0) { |
1662 | void *data_old = db->db_buf; | |
1663 | ||
b128c09f | 1664 | if (db->db_state != DB_NOFILL) { |
428870ff | 1665 | if (db->db_blkid == DMU_BONUS_BLKID) { |
b128c09f BB |
1666 | dbuf_fix_old_data(db, tx->tx_txg); |
1667 | data_old = db->db.db_data; | |
1668 | } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { | |
1669 | /* | |
1670 | * Release the data buffer from the cache so | |
1671 | * that we can modify it without impacting | |
1672 | * possible other users of this cached data | |
1673 | * block. Note that indirect blocks and | |
1674 | * private objects are not released until the | |
1675 | * syncing state (since they are only modified | |
1676 | * then). | |
1677 | */ | |
1678 | arc_release(db->db_buf, db); | |
1679 | dbuf_fix_old_data(db, tx->tx_txg); | |
1680 | data_old = db->db_buf; | |
1681 | } | |
1682 | ASSERT(data_old != NULL); | |
34dc7c2f | 1683 | } |
34dc7c2f BB |
1684 | dr->dt.dl.dr_data = data_old; |
1685 | } else { | |
448d7aaa | 1686 | mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL); |
34dc7c2f BB |
1687 | list_create(&dr->dt.di.dr_children, |
1688 | sizeof (dbuf_dirty_record_t), | |
1689 | offsetof(dbuf_dirty_record_t, dr_dirty_node)); | |
1690 | } | |
e8b96c60 MA |
1691 | if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) |
1692 | dr->dr_accounted = db->db.db_size; | |
34dc7c2f BB |
1693 | dr->dr_dbuf = db; |
1694 | dr->dr_txg = tx->tx_txg; | |
1695 | dr->dr_next = *drp; | |
1696 | *drp = dr; | |
1697 | ||
1698 | /* | |
1699 | * We could have been freed_in_flight between the dbuf_noread | |
1700 | * and dbuf_dirty. We win, as though the dbuf_noread() had | |
1701 | * happened after the free. | |
1702 | */ | |
428870ff BB |
1703 | if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && |
1704 | db->db_blkid != DMU_SPILL_BLKID) { | |
34dc7c2f | 1705 | mutex_enter(&dn->dn_mtx); |
9bd274dd MA |
1706 | if (dn->dn_free_ranges[txgoff] != NULL) { |
1707 | range_tree_clear(dn->dn_free_ranges[txgoff], | |
1708 | db->db_blkid, 1); | |
1709 | } | |
34dc7c2f BB |
1710 | mutex_exit(&dn->dn_mtx); |
1711 | db->db_freed_in_flight = FALSE; | |
1712 | } | |
1713 | ||
1714 | /* | |
1715 | * This buffer is now part of this txg | |
1716 | */ | |
1717 | dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); | |
1718 | db->db_dirtycnt += 1; | |
1719 | ASSERT3U(db->db_dirtycnt, <=, 3); | |
1720 | ||
1721 | mutex_exit(&db->db_mtx); | |
1722 | ||
428870ff BB |
1723 | if (db->db_blkid == DMU_BONUS_BLKID || |
1724 | db->db_blkid == DMU_SPILL_BLKID) { | |
34dc7c2f BB |
1725 | mutex_enter(&dn->dn_mtx); |
1726 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
1727 | list_insert_tail(&dn->dn_dirty_records[txgoff], dr); | |
1728 | mutex_exit(&dn->dn_mtx); | |
1729 | dnode_setdirty(dn, tx); | |
572e2857 | 1730 | DB_DNODE_EXIT(db); |
34dc7c2f | 1731 | return (dr); |
98ace739 MA |
1732 | } |
1733 | ||
1734 | /* | |
1735 | * The dn_struct_rwlock prevents db_blkptr from changing | |
1736 | * due to a write from syncing context completing | |
1737 | * while we are running, so we want to acquire it before | |
1738 | * looking at db_blkptr. | |
1739 | */ | |
1740 | if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { | |
1741 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
1742 | drop_struct_lock = TRUE; | |
1743 | } | |
1744 | ||
2ade4a99 MA |
1745 | /* |
1746 | * We need to hold the dn_struct_rwlock to make this assertion, | |
1747 | * because it protects dn_phys / dn_next_nlevels from changing. | |
1748 | */ | |
1749 | ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || | |
1750 | dn->dn_phys->dn_nlevels > db->db_level || | |
1751 | dn->dn_next_nlevels[txgoff] > db->db_level || | |
1752 | dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || | |
1753 | dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); | |
1754 | ||
3ec3bc21 BB |
1755 | /* |
1756 | * If we are overwriting a dedup BP, then unless it is snapshotted, | |
1757 | * when we get to syncing context we will need to decrement its | |
1758 | * refcount in the DDT. Prefetch the relevant DDT block so that | |
1759 | * syncing context won't have to wait for the i/o. | |
1760 | */ | |
1761 | ddt_prefetch(os->os_spa, db->db_blkptr); | |
34dc7c2f | 1762 | |
b128c09f BB |
1763 | if (db->db_level == 0) { |
1764 | dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); | |
1765 | ASSERT(dn->dn_maxblkid >= db->db_blkid); | |
1766 | } | |
1767 | ||
34dc7c2f BB |
1768 | if (db->db_level+1 < dn->dn_nlevels) { |
1769 | dmu_buf_impl_t *parent = db->db_parent; | |
1770 | dbuf_dirty_record_t *di; | |
1771 | int parent_held = FALSE; | |
1772 | ||
1773 | if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { | |
1774 | int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
1775 | ||
1776 | parent = dbuf_hold_level(dn, db->db_level+1, | |
1777 | db->db_blkid >> epbs, FTAG); | |
428870ff | 1778 | ASSERT(parent != NULL); |
34dc7c2f BB |
1779 | parent_held = TRUE; |
1780 | } | |
1781 | if (drop_struct_lock) | |
1782 | rw_exit(&dn->dn_struct_rwlock); | |
1783 | ASSERT3U(db->db_level+1, ==, parent->db_level); | |
1784 | di = dbuf_dirty(parent, tx); | |
1785 | if (parent_held) | |
1786 | dbuf_rele(parent, FTAG); | |
1787 | ||
1788 | mutex_enter(&db->db_mtx); | |
e8b96c60 MA |
1789 | /* |
1790 | * Since we've dropped the mutex, it's possible that | |
1791 | * dbuf_undirty() might have changed this out from under us. | |
1792 | */ | |
34dc7c2f BB |
1793 | if (db->db_last_dirty == dr || |
1794 | dn->dn_object == DMU_META_DNODE_OBJECT) { | |
1795 | mutex_enter(&di->dt.di.dr_mtx); | |
1796 | ASSERT3U(di->dr_txg, ==, tx->tx_txg); | |
1797 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
1798 | list_insert_tail(&di->dt.di.dr_children, dr); | |
1799 | mutex_exit(&di->dt.di.dr_mtx); | |
1800 | dr->dr_parent = di; | |
1801 | } | |
1802 | mutex_exit(&db->db_mtx); | |
1803 | } else { | |
1804 | ASSERT(db->db_level+1 == dn->dn_nlevels); | |
1805 | ASSERT(db->db_blkid < dn->dn_nblkptr); | |
572e2857 | 1806 | ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); |
34dc7c2f BB |
1807 | mutex_enter(&dn->dn_mtx); |
1808 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
1809 | list_insert_tail(&dn->dn_dirty_records[txgoff], dr); | |
1810 | mutex_exit(&dn->dn_mtx); | |
1811 | if (drop_struct_lock) | |
1812 | rw_exit(&dn->dn_struct_rwlock); | |
1813 | } | |
1814 | ||
1815 | dnode_setdirty(dn, tx); | |
572e2857 | 1816 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
1817 | return (dr); |
1818 | } | |
1819 | ||
13fe0198 | 1820 | /* |
e49f1e20 WA |
1821 | * Undirty a buffer in the transaction group referenced by the given |
1822 | * transaction. Return whether this evicted the dbuf. | |
13fe0198 MA |
1823 | */ |
1824 | static boolean_t | |
34dc7c2f BB |
1825 | dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) |
1826 | { | |
572e2857 | 1827 | dnode_t *dn; |
34dc7c2f BB |
1828 | uint64_t txg = tx->tx_txg; |
1829 | dbuf_dirty_record_t *dr, **drp; | |
1830 | ||
1831 | ASSERT(txg != 0); | |
4bda3bd0 MA |
1832 | |
1833 | /* | |
1834 | * Due to our use of dn_nlevels below, this can only be called | |
1835 | * in open context, unless we are operating on the MOS. | |
1836 | * From syncing context, dn_nlevels may be different from the | |
1837 | * dn_nlevels used when dbuf was dirtied. | |
1838 | */ | |
1839 | ASSERT(db->db_objset == | |
1840 | dmu_objset_pool(db->db_objset)->dp_meta_objset || | |
1841 | txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); | |
428870ff | 1842 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
13fe0198 MA |
1843 | ASSERT0(db->db_level); |
1844 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
34dc7c2f | 1845 | |
34dc7c2f BB |
1846 | /* |
1847 | * If this buffer is not dirty, we're done. | |
1848 | */ | |
1849 | for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) | |
1850 | if (dr->dr_txg <= txg) | |
1851 | break; | |
13fe0198 MA |
1852 | if (dr == NULL || dr->dr_txg < txg) |
1853 | return (B_FALSE); | |
34dc7c2f | 1854 | ASSERT(dr->dr_txg == txg); |
428870ff | 1855 | ASSERT(dr->dr_dbuf == db); |
34dc7c2f | 1856 | |
572e2857 BB |
1857 | DB_DNODE_ENTER(db); |
1858 | dn = DB_DNODE(db); | |
1859 | ||
34dc7c2f BB |
1860 | dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); |
1861 | ||
1862 | ASSERT(db->db.db_size != 0); | |
1863 | ||
4bda3bd0 MA |
1864 | dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), |
1865 | dr->dr_accounted, txg); | |
34dc7c2f BB |
1866 | |
1867 | *drp = dr->dr_next; | |
1868 | ||
ef3c1dea GR |
1869 | /* |
1870 | * Note that there are three places in dbuf_dirty() | |
1871 | * where this dirty record may be put on a list. | |
1872 | * Make sure to do a list_remove corresponding to | |
1873 | * every one of those list_insert calls. | |
1874 | */ | |
34dc7c2f BB |
1875 | if (dr->dr_parent) { |
1876 | mutex_enter(&dr->dr_parent->dt.di.dr_mtx); | |
1877 | list_remove(&dr->dr_parent->dt.di.dr_children, dr); | |
1878 | mutex_exit(&dr->dr_parent->dt.di.dr_mtx); | |
ef3c1dea | 1879 | } else if (db->db_blkid == DMU_SPILL_BLKID || |
4bda3bd0 | 1880 | db->db_level + 1 == dn->dn_nlevels) { |
b128c09f | 1881 | ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); |
34dc7c2f BB |
1882 | mutex_enter(&dn->dn_mtx); |
1883 | list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); | |
1884 | mutex_exit(&dn->dn_mtx); | |
1885 | } | |
572e2857 | 1886 | DB_DNODE_EXIT(db); |
34dc7c2f | 1887 | |
13fe0198 MA |
1888 | if (db->db_state != DB_NOFILL) { |
1889 | dbuf_unoverride(dr); | |
34dc7c2f | 1890 | |
34dc7c2f | 1891 | ASSERT(db->db_buf != NULL); |
13fe0198 MA |
1892 | ASSERT(dr->dt.dl.dr_data != NULL); |
1893 | if (dr->dt.dl.dr_data != db->db_buf) | |
d3c2ae1c | 1894 | arc_buf_destroy(dr->dt.dl.dr_data, db); |
34dc7c2f | 1895 | } |
58c4aa00 | 1896 | |
34dc7c2f BB |
1897 | kmem_free(dr, sizeof (dbuf_dirty_record_t)); |
1898 | ||
1899 | ASSERT(db->db_dirtycnt > 0); | |
1900 | db->db_dirtycnt -= 1; | |
1901 | ||
1902 | if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { | |
d3c2ae1c GW |
1903 | ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); |
1904 | dbuf_destroy(db); | |
13fe0198 | 1905 | return (B_TRUE); |
34dc7c2f BB |
1906 | } |
1907 | ||
13fe0198 | 1908 | return (B_FALSE); |
34dc7c2f BB |
1909 | } |
1910 | ||
34dc7c2f | 1911 | void |
b0bc7a84 | 1912 | dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) |
34dc7c2f | 1913 | { |
b0bc7a84 | 1914 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; |
34dc7c2f | 1915 | int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; |
5a28a973 | 1916 | dbuf_dirty_record_t *dr; |
34dc7c2f BB |
1917 | |
1918 | ASSERT(tx->tx_txg != 0); | |
1919 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
1920 | ||
5a28a973 MA |
1921 | /* |
1922 | * Quick check for dirtyness. For already dirty blocks, this | |
1923 | * reduces runtime of this function by >90%, and overall performance | |
1924 | * by 50% for some workloads (e.g. file deletion with indirect blocks | |
1925 | * cached). | |
1926 | */ | |
1927 | mutex_enter(&db->db_mtx); | |
1928 | ||
1929 | for (dr = db->db_last_dirty; | |
1930 | dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { | |
1931 | /* | |
1932 | * It's possible that it is already dirty but not cached, | |
1933 | * because there are some calls to dbuf_dirty() that don't | |
1934 | * go through dmu_buf_will_dirty(). | |
1935 | */ | |
1936 | if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { | |
1937 | /* This dbuf is already dirty and cached. */ | |
1938 | dbuf_redirty(dr); | |
1939 | mutex_exit(&db->db_mtx); | |
1940 | return; | |
1941 | } | |
1942 | } | |
1943 | mutex_exit(&db->db_mtx); | |
1944 | ||
572e2857 BB |
1945 | DB_DNODE_ENTER(db); |
1946 | if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) | |
34dc7c2f | 1947 | rf |= DB_RF_HAVESTRUCT; |
572e2857 | 1948 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
1949 | (void) dbuf_read(db, NULL, rf); |
1950 | (void) dbuf_dirty(db, tx); | |
1951 | } | |
1952 | ||
b128c09f BB |
1953 | void |
1954 | dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) | |
1955 | { | |
1956 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
1957 | ||
1958 | db->db_state = DB_NOFILL; | |
1959 | ||
1960 | dmu_buf_will_fill(db_fake, tx); | |
1961 | } | |
1962 | ||
34dc7c2f BB |
1963 | void |
1964 | dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) | |
1965 | { | |
1966 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
1967 | ||
428870ff | 1968 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f BB |
1969 | ASSERT(tx->tx_txg != 0); |
1970 | ASSERT(db->db_level == 0); | |
1971 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
1972 | ||
1973 | ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || | |
1974 | dmu_tx_private_ok(tx)); | |
1975 | ||
1976 | dbuf_noread(db); | |
1977 | (void) dbuf_dirty(db, tx); | |
1978 | } | |
1979 | ||
1980 | #pragma weak dmu_buf_fill_done = dbuf_fill_done | |
1981 | /* ARGSUSED */ | |
1982 | void | |
1983 | dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) | |
1984 | { | |
1985 | mutex_enter(&db->db_mtx); | |
1986 | DBUF_VERIFY(db); | |
1987 | ||
1988 | if (db->db_state == DB_FILL) { | |
1989 | if (db->db_level == 0 && db->db_freed_in_flight) { | |
428870ff | 1990 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f BB |
1991 | /* we were freed while filling */ |
1992 | /* XXX dbuf_undirty? */ | |
1993 | bzero(db->db.db_data, db->db.db_size); | |
1994 | db->db_freed_in_flight = FALSE; | |
1995 | } | |
1996 | db->db_state = DB_CACHED; | |
1997 | cv_broadcast(&db->db_changed); | |
1998 | } | |
1999 | mutex_exit(&db->db_mtx); | |
2000 | } | |
2001 | ||
9b67f605 MA |
2002 | void |
2003 | dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, | |
2004 | bp_embedded_type_t etype, enum zio_compress comp, | |
2005 | int uncompressed_size, int compressed_size, int byteorder, | |
2006 | dmu_tx_t *tx) | |
2007 | { | |
2008 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; | |
2009 | struct dirty_leaf *dl; | |
2010 | dmu_object_type_t type; | |
2011 | ||
241b5415 MA |
2012 | if (etype == BP_EMBEDDED_TYPE_DATA) { |
2013 | ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), | |
2014 | SPA_FEATURE_EMBEDDED_DATA)); | |
2015 | } | |
2016 | ||
9b67f605 MA |
2017 | DB_DNODE_ENTER(db); |
2018 | type = DB_DNODE(db)->dn_type; | |
2019 | DB_DNODE_EXIT(db); | |
2020 | ||
2021 | ASSERT0(db->db_level); | |
2022 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
2023 | ||
2024 | dmu_buf_will_not_fill(dbuf, tx); | |
2025 | ||
2026 | ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); | |
2027 | dl = &db->db_last_dirty->dt.dl; | |
2028 | encode_embedded_bp_compressed(&dl->dr_overridden_by, | |
2029 | data, comp, uncompressed_size, compressed_size); | |
2030 | BPE_SET_ETYPE(&dl->dr_overridden_by, etype); | |
2031 | BP_SET_TYPE(&dl->dr_overridden_by, type); | |
2032 | BP_SET_LEVEL(&dl->dr_overridden_by, 0); | |
2033 | BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); | |
2034 | ||
2035 | dl->dr_override_state = DR_OVERRIDDEN; | |
2036 | dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; | |
2037 | } | |
2038 | ||
9babb374 BB |
2039 | /* |
2040 | * Directly assign a provided arc buf to a given dbuf if it's not referenced | |
2041 | * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. | |
2042 | */ | |
2043 | void | |
2044 | dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) | |
2045 | { | |
2046 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
428870ff | 2047 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
9babb374 | 2048 | ASSERT(db->db_level == 0); |
2aa34383 | 2049 | ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); |
9babb374 | 2050 | ASSERT(buf != NULL); |
2aa34383 | 2051 | ASSERT(arc_buf_lsize(buf) == db->db.db_size); |
9babb374 BB |
2052 | ASSERT(tx->tx_txg != 0); |
2053 | ||
2054 | arc_return_buf(buf, db); | |
2055 | ASSERT(arc_released(buf)); | |
2056 | ||
2057 | mutex_enter(&db->db_mtx); | |
2058 | ||
2059 | while (db->db_state == DB_READ || db->db_state == DB_FILL) | |
2060 | cv_wait(&db->db_changed, &db->db_mtx); | |
2061 | ||
2062 | ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); | |
2063 | ||
2064 | if (db->db_state == DB_CACHED && | |
2065 | refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { | |
2066 | mutex_exit(&db->db_mtx); | |
2067 | (void) dbuf_dirty(db, tx); | |
2068 | bcopy(buf->b_data, db->db.db_data, db->db.db_size); | |
d3c2ae1c | 2069 | arc_buf_destroy(buf, db); |
428870ff | 2070 | xuio_stat_wbuf_copied(); |
9babb374 BB |
2071 | return; |
2072 | } | |
2073 | ||
428870ff | 2074 | xuio_stat_wbuf_nocopy(); |
9babb374 BB |
2075 | if (db->db_state == DB_CACHED) { |
2076 | dbuf_dirty_record_t *dr = db->db_last_dirty; | |
2077 | ||
2078 | ASSERT(db->db_buf != NULL); | |
2079 | if (dr != NULL && dr->dr_txg == tx->tx_txg) { | |
2080 | ASSERT(dr->dt.dl.dr_data == db->db_buf); | |
2081 | if (!arc_released(db->db_buf)) { | |
2082 | ASSERT(dr->dt.dl.dr_override_state == | |
2083 | DR_OVERRIDDEN); | |
2084 | arc_release(db->db_buf, db); | |
2085 | } | |
2086 | dr->dt.dl.dr_data = buf; | |
d3c2ae1c | 2087 | arc_buf_destroy(db->db_buf, db); |
9babb374 BB |
2088 | } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { |
2089 | arc_release(db->db_buf, db); | |
d3c2ae1c | 2090 | arc_buf_destroy(db->db_buf, db); |
9babb374 BB |
2091 | } |
2092 | db->db_buf = NULL; | |
2093 | } | |
2094 | ASSERT(db->db_buf == NULL); | |
2095 | dbuf_set_data(db, buf); | |
2096 | db->db_state = DB_FILL; | |
2097 | mutex_exit(&db->db_mtx); | |
2098 | (void) dbuf_dirty(db, tx); | |
b0bc7a84 | 2099 | dmu_buf_fill_done(&db->db, tx); |
9babb374 BB |
2100 | } |
2101 | ||
34dc7c2f | 2102 | void |
d3c2ae1c | 2103 | dbuf_destroy(dmu_buf_impl_t *db) |
34dc7c2f | 2104 | { |
572e2857 | 2105 | dnode_t *dn; |
34dc7c2f | 2106 | dmu_buf_impl_t *parent = db->db_parent; |
572e2857 | 2107 | dmu_buf_impl_t *dndb; |
34dc7c2f BB |
2108 | |
2109 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
2110 | ASSERT(refcount_is_zero(&db->db_holds)); | |
2111 | ||
d3c2ae1c GW |
2112 | if (db->db_buf != NULL) { |
2113 | arc_buf_destroy(db->db_buf, db); | |
2114 | db->db_buf = NULL; | |
2115 | } | |
34dc7c2f | 2116 | |
d3c2ae1c GW |
2117 | if (db->db_blkid == DMU_BONUS_BLKID) { |
2118 | int slots = DB_DNODE(db)->dn_num_slots; | |
2119 | int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); | |
34dc7c2f | 2120 | ASSERT(db->db.db_data != NULL); |
a3fd9d9e | 2121 | kmem_free(db->db.db_data, bonuslen); |
d3c2ae1c | 2122 | arc_space_return(bonuslen, ARC_SPACE_BONUS); |
34dc7c2f BB |
2123 | db->db_state = DB_UNCACHED; |
2124 | } | |
2125 | ||
d3c2ae1c GW |
2126 | dbuf_clear_data(db); |
2127 | ||
2128 | if (multilist_link_active(&db->db_cache_link)) { | |
64fc7762 | 2129 | multilist_remove(dbuf_cache, db); |
d3c2ae1c GW |
2130 | (void) refcount_remove_many(&dbuf_cache_size, |
2131 | db->db.db_size, db); | |
2132 | } | |
2133 | ||
b128c09f | 2134 | ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); |
34dc7c2f BB |
2135 | ASSERT(db->db_data_pending == NULL); |
2136 | ||
2137 | db->db_state = DB_EVICTING; | |
2138 | db->db_blkptr = NULL; | |
2139 | ||
d3c2ae1c GW |
2140 | /* |
2141 | * Now that db_state is DB_EVICTING, nobody else can find this via | |
2142 | * the hash table. We can now drop db_mtx, which allows us to | |
2143 | * acquire the dn_dbufs_mtx. | |
2144 | */ | |
2145 | mutex_exit(&db->db_mtx); | |
2146 | ||
572e2857 BB |
2147 | DB_DNODE_ENTER(db); |
2148 | dn = DB_DNODE(db); | |
2149 | dndb = dn->dn_dbuf; | |
d3c2ae1c GW |
2150 | if (db->db_blkid != DMU_BONUS_BLKID) { |
2151 | boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); | |
2152 | if (needlock) | |
2153 | mutex_enter(&dn->dn_dbufs_mtx); | |
8951cb8d | 2154 | avl_remove(&dn->dn_dbufs, db); |
73ad4a9f | 2155 | atomic_dec_32(&dn->dn_dbufs_count); |
572e2857 BB |
2156 | membar_producer(); |
2157 | DB_DNODE_EXIT(db); | |
d3c2ae1c GW |
2158 | if (needlock) |
2159 | mutex_exit(&dn->dn_dbufs_mtx); | |
572e2857 BB |
2160 | /* |
2161 | * Decrementing the dbuf count means that the hold corresponding | |
2162 | * to the removed dbuf is no longer discounted in dnode_move(), | |
2163 | * so the dnode cannot be moved until after we release the hold. | |
2164 | * The membar_producer() ensures visibility of the decremented | |
2165 | * value in dnode_move(), since DB_DNODE_EXIT doesn't actually | |
2166 | * release any lock. | |
2167 | */ | |
34dc7c2f | 2168 | dnode_rele(dn, db); |
572e2857 | 2169 | db->db_dnode_handle = NULL; |
d3c2ae1c GW |
2170 | |
2171 | dbuf_hash_remove(db); | |
572e2857 BB |
2172 | } else { |
2173 | DB_DNODE_EXIT(db); | |
34dc7c2f BB |
2174 | } |
2175 | ||
d3c2ae1c | 2176 | ASSERT(refcount_is_zero(&db->db_holds)); |
34dc7c2f | 2177 | |
d3c2ae1c GW |
2178 | db->db_parent = NULL; |
2179 | ||
2180 | ASSERT(db->db_buf == NULL); | |
2181 | ASSERT(db->db.db_data == NULL); | |
2182 | ASSERT(db->db_hash_next == NULL); | |
2183 | ASSERT(db->db_blkptr == NULL); | |
2184 | ASSERT(db->db_data_pending == NULL); | |
2185 | ASSERT(!multilist_link_active(&db->db_cache_link)); | |
2186 | ||
2187 | kmem_cache_free(dbuf_kmem_cache, db); | |
2188 | arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); | |
34dc7c2f BB |
2189 | |
2190 | /* | |
572e2857 | 2191 | * If this dbuf is referenced from an indirect dbuf, |
34dc7c2f BB |
2192 | * decrement the ref count on the indirect dbuf. |
2193 | */ | |
2194 | if (parent && parent != dndb) | |
2195 | dbuf_rele(parent, db); | |
2196 | } | |
2197 | ||
fcff0f35 PD |
2198 | /* |
2199 | * Note: While bpp will always be updated if the function returns success, | |
2200 | * parentp will not be updated if the dnode does not have dn_dbuf filled in; | |
2201 | * this happens when the dnode is the meta-dnode, or a userused or groupused | |
2202 | * object. | |
2203 | */ | |
bf701a83 BB |
2204 | __attribute__((always_inline)) |
2205 | static inline int | |
34dc7c2f | 2206 | dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, |
fc5bb51f | 2207 | dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh) |
34dc7c2f BB |
2208 | { |
2209 | int nlevels, epbs; | |
2210 | ||
2211 | *parentp = NULL; | |
2212 | *bpp = NULL; | |
2213 | ||
428870ff BB |
2214 | ASSERT(blkid != DMU_BONUS_BLKID); |
2215 | ||
2216 | if (blkid == DMU_SPILL_BLKID) { | |
2217 | mutex_enter(&dn->dn_mtx); | |
2218 | if (dn->dn_have_spill && | |
2219 | (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) | |
50c957f7 | 2220 | *bpp = DN_SPILL_BLKPTR(dn->dn_phys); |
428870ff BB |
2221 | else |
2222 | *bpp = NULL; | |
2223 | dbuf_add_ref(dn->dn_dbuf, NULL); | |
2224 | *parentp = dn->dn_dbuf; | |
2225 | mutex_exit(&dn->dn_mtx); | |
2226 | return (0); | |
2227 | } | |
34dc7c2f | 2228 | |
32d41fb7 PD |
2229 | nlevels = |
2230 | (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; | |
34dc7c2f BB |
2231 | epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; |
2232 | ||
2233 | ASSERT3U(level * epbs, <, 64); | |
2234 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); | |
32d41fb7 PD |
2235 | /* |
2236 | * This assertion shouldn't trip as long as the max indirect block size | |
2237 | * is less than 1M. The reason for this is that up to that point, | |
2238 | * the number of levels required to address an entire object with blocks | |
2239 | * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In | |
2240 | * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 | |
2241 | * (i.e. we can address the entire object), objects will all use at most | |
2242 | * N-1 levels and the assertion won't overflow. However, once epbs is | |
2243 | * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be | |
2244 | * enough to address an entire object, so objects will have 5 levels, | |
2245 | * but then this assertion will overflow. | |
2246 | * | |
2247 | * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we | |
2248 | * need to redo this logic to handle overflows. | |
2249 | */ | |
2250 | ASSERT(level >= nlevels || | |
2251 | ((nlevels - level - 1) * epbs) + | |
2252 | highbit64(dn->dn_phys->dn_nblkptr) <= 64); | |
34dc7c2f | 2253 | if (level >= nlevels || |
32d41fb7 PD |
2254 | blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << |
2255 | ((nlevels - level - 1) * epbs)) || | |
2256 | (fail_sparse && | |
2257 | blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { | |
34dc7c2f | 2258 | /* the buffer has no parent yet */ |
2e528b49 | 2259 | return (SET_ERROR(ENOENT)); |
34dc7c2f BB |
2260 | } else if (level < nlevels-1) { |
2261 | /* this block is referenced from an indirect block */ | |
fc5bb51f BB |
2262 | int err; |
2263 | if (dh == NULL) { | |
fcff0f35 PD |
2264 | err = dbuf_hold_impl(dn, level+1, |
2265 | blkid >> epbs, fail_sparse, FALSE, NULL, parentp); | |
d1d7e268 | 2266 | } else { |
fc5bb51f | 2267 | __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1, |
fcff0f35 PD |
2268 | blkid >> epbs, fail_sparse, FALSE, NULL, |
2269 | parentp, dh->dh_depth + 1); | |
fc5bb51f BB |
2270 | err = __dbuf_hold_impl(dh + 1); |
2271 | } | |
34dc7c2f BB |
2272 | if (err) |
2273 | return (err); | |
2274 | err = dbuf_read(*parentp, NULL, | |
2275 | (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); | |
2276 | if (err) { | |
2277 | dbuf_rele(*parentp, NULL); | |
2278 | *parentp = NULL; | |
2279 | return (err); | |
2280 | } | |
2281 | *bpp = ((blkptr_t *)(*parentp)->db.db_data) + | |
2282 | (blkid & ((1ULL << epbs) - 1)); | |
32d41fb7 PD |
2283 | if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) |
2284 | ASSERT(BP_IS_HOLE(*bpp)); | |
34dc7c2f BB |
2285 | return (0); |
2286 | } else { | |
2287 | /* the block is referenced from the dnode */ | |
2288 | ASSERT3U(level, ==, nlevels-1); | |
2289 | ASSERT(dn->dn_phys->dn_nblkptr == 0 || | |
2290 | blkid < dn->dn_phys->dn_nblkptr); | |
2291 | if (dn->dn_dbuf) { | |
2292 | dbuf_add_ref(dn->dn_dbuf, NULL); | |
2293 | *parentp = dn->dn_dbuf; | |
2294 | } | |
2295 | *bpp = &dn->dn_phys->dn_blkptr[blkid]; | |
2296 | return (0); | |
2297 | } | |
2298 | } | |
2299 | ||
2300 | static dmu_buf_impl_t * | |
2301 | dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, | |
2302 | dmu_buf_impl_t *parent, blkptr_t *blkptr) | |
2303 | { | |
428870ff | 2304 | objset_t *os = dn->dn_objset; |
34dc7c2f BB |
2305 | dmu_buf_impl_t *db, *odb; |
2306 | ||
2307 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); | |
2308 | ASSERT(dn->dn_type != DMU_OT_NONE); | |
2309 | ||
d3c2ae1c | 2310 | db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); |
34dc7c2f BB |
2311 | |
2312 | db->db_objset = os; | |
2313 | db->db.db_object = dn->dn_object; | |
2314 | db->db_level = level; | |
2315 | db->db_blkid = blkid; | |
2316 | db->db_last_dirty = NULL; | |
2317 | db->db_dirtycnt = 0; | |
572e2857 | 2318 | db->db_dnode_handle = dn->dn_handle; |
34dc7c2f BB |
2319 | db->db_parent = parent; |
2320 | db->db_blkptr = blkptr; | |
2321 | ||
0c66c32d | 2322 | db->db_user = NULL; |
bc4501f7 JG |
2323 | db->db_user_immediate_evict = FALSE; |
2324 | db->db_freed_in_flight = FALSE; | |
2325 | db->db_pending_evict = FALSE; | |
34dc7c2f | 2326 | |
428870ff | 2327 | if (blkid == DMU_BONUS_BLKID) { |
34dc7c2f | 2328 | ASSERT3P(parent, ==, dn->dn_dbuf); |
50c957f7 | 2329 | db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - |
34dc7c2f BB |
2330 | (dn->dn_nblkptr-1) * sizeof (blkptr_t); |
2331 | ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); | |
428870ff | 2332 | db->db.db_offset = DMU_BONUS_BLKID; |
34dc7c2f BB |
2333 | db->db_state = DB_UNCACHED; |
2334 | /* the bonus dbuf is not placed in the hash table */ | |
25458cbe | 2335 | arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); |
34dc7c2f | 2336 | return (db); |
428870ff BB |
2337 | } else if (blkid == DMU_SPILL_BLKID) { |
2338 | db->db.db_size = (blkptr != NULL) ? | |
2339 | BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; | |
2340 | db->db.db_offset = 0; | |
34dc7c2f BB |
2341 | } else { |
2342 | int blocksize = | |
e8b96c60 | 2343 | db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; |
34dc7c2f BB |
2344 | db->db.db_size = blocksize; |
2345 | db->db.db_offset = db->db_blkid * blocksize; | |
2346 | } | |
2347 | ||
2348 | /* | |
2349 | * Hold the dn_dbufs_mtx while we get the new dbuf | |
2350 | * in the hash table *and* added to the dbufs list. | |
2351 | * This prevents a possible deadlock with someone | |
2352 | * trying to look up this dbuf before its added to the | |
2353 | * dn_dbufs list. | |
2354 | */ | |
2355 | mutex_enter(&dn->dn_dbufs_mtx); | |
2356 | db->db_state = DB_EVICTING; | |
2357 | if ((odb = dbuf_hash_insert(db)) != NULL) { | |
2358 | /* someone else inserted it first */ | |
d3c2ae1c | 2359 | kmem_cache_free(dbuf_kmem_cache, db); |
34dc7c2f BB |
2360 | mutex_exit(&dn->dn_dbufs_mtx); |
2361 | return (odb); | |
2362 | } | |
8951cb8d | 2363 | avl_add(&dn->dn_dbufs, db); |
9c9531cb | 2364 | |
34dc7c2f BB |
2365 | db->db_state = DB_UNCACHED; |
2366 | mutex_exit(&dn->dn_dbufs_mtx); | |
25458cbe | 2367 | arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); |
34dc7c2f BB |
2368 | |
2369 | if (parent && parent != dn->dn_dbuf) | |
2370 | dbuf_add_ref(parent, db); | |
2371 | ||
2372 | ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || | |
2373 | refcount_count(&dn->dn_holds) > 0); | |
2374 | (void) refcount_add(&dn->dn_holds, db); | |
73ad4a9f | 2375 | atomic_inc_32(&dn->dn_dbufs_count); |
34dc7c2f BB |
2376 | |
2377 | dprintf_dbuf(db, "db=%p\n", db); | |
2378 | ||
2379 | return (db); | |
2380 | } | |
2381 | ||
fcff0f35 PD |
2382 | typedef struct dbuf_prefetch_arg { |
2383 | spa_t *dpa_spa; /* The spa to issue the prefetch in. */ | |
2384 | zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ | |
2385 | int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ | |
2386 | int dpa_curlevel; /* The current level that we're reading */ | |
d3c2ae1c | 2387 | dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ |
fcff0f35 PD |
2388 | zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ |
2389 | zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ | |
2390 | arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ | |
2391 | } dbuf_prefetch_arg_t; | |
2392 | ||
2393 | /* | |
2394 | * Actually issue the prefetch read for the block given. | |
2395 | */ | |
2396 | static void | |
2397 | dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) | |
2398 | { | |
2399 | arc_flags_t aflags; | |
2400 | if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) | |
2401 | return; | |
2402 | ||
2403 | aflags = dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; | |
2404 | ||
2405 | ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); | |
2406 | ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); | |
2407 | ASSERT(dpa->dpa_zio != NULL); | |
2408 | (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, | |
2409 | dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, | |
2410 | &aflags, &dpa->dpa_zb); | |
2411 | } | |
2412 | ||
2413 | /* | |
2414 | * Called when an indirect block above our prefetch target is read in. This | |
2415 | * will either read in the next indirect block down the tree or issue the actual | |
2416 | * prefetch if the next block down is our target. | |
2417 | */ | |
2418 | static void | |
2419 | dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private) | |
2420 | { | |
2421 | dbuf_prefetch_arg_t *dpa = private; | |
2422 | uint64_t nextblkid; | |
2423 | blkptr_t *bp; | |
2424 | ||
2425 | ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); | |
2426 | ASSERT3S(dpa->dpa_curlevel, >, 0); | |
d3c2ae1c GW |
2427 | |
2428 | /* | |
2429 | * The dpa_dnode is only valid if we are called with a NULL | |
2430 | * zio. This indicates that the arc_read() returned without | |
2431 | * first calling zio_read() to issue a physical read. Once | |
2432 | * a physical read is made the dpa_dnode must be invalidated | |
2433 | * as the locks guarding it may have been dropped. If the | |
2434 | * dpa_dnode is still valid, then we want to add it to the dbuf | |
2435 | * cache. To do so, we must hold the dbuf associated with the block | |
2436 | * we just prefetched, read its contents so that we associate it | |
2437 | * with an arc_buf_t, and then release it. | |
2438 | */ | |
fcff0f35 PD |
2439 | if (zio != NULL) { |
2440 | ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); | |
d3c2ae1c GW |
2441 | if (zio->io_flags & ZIO_FLAG_RAW) { |
2442 | ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); | |
2443 | } else { | |
2444 | ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); | |
2445 | } | |
fcff0f35 | 2446 | ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); |
d3c2ae1c GW |
2447 | |
2448 | dpa->dpa_dnode = NULL; | |
2449 | } else if (dpa->dpa_dnode != NULL) { | |
2450 | uint64_t curblkid = dpa->dpa_zb.zb_blkid >> | |
2451 | (dpa->dpa_epbs * (dpa->dpa_curlevel - | |
2452 | dpa->dpa_zb.zb_level)); | |
2453 | dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, | |
2454 | dpa->dpa_curlevel, curblkid, FTAG); | |
2455 | (void) dbuf_read(db, NULL, | |
2456 | DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); | |
2457 | dbuf_rele(db, FTAG); | |
fcff0f35 PD |
2458 | } |
2459 | ||
2460 | dpa->dpa_curlevel--; | |
2461 | ||
2462 | nextblkid = dpa->dpa_zb.zb_blkid >> | |
2463 | (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); | |
2464 | bp = ((blkptr_t *)abuf->b_data) + | |
2465 | P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); | |
2466 | if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) { | |
2467 | kmem_free(dpa, sizeof (*dpa)); | |
2468 | } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { | |
2469 | ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); | |
2470 | dbuf_issue_final_prefetch(dpa, bp); | |
2471 | kmem_free(dpa, sizeof (*dpa)); | |
2472 | } else { | |
2473 | arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; | |
2474 | zbookmark_phys_t zb; | |
2475 | ||
2476 | ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); | |
2477 | ||
2478 | SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, | |
2479 | dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); | |
2480 | ||
2481 | (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, | |
2482 | bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, | |
2483 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, | |
2484 | &iter_aflags, &zb); | |
2485 | } | |
d3c2ae1c GW |
2486 | |
2487 | arc_buf_destroy(abuf, private); | |
fcff0f35 PD |
2488 | } |
2489 | ||
2490 | /* | |
2491 | * Issue prefetch reads for the given block on the given level. If the indirect | |
2492 | * blocks above that block are not in memory, we will read them in | |
2493 | * asynchronously. As a result, this call never blocks waiting for a read to | |
2494 | * complete. | |
2495 | */ | |
34dc7c2f | 2496 | void |
fcff0f35 PD |
2497 | dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, |
2498 | arc_flags_t aflags) | |
34dc7c2f | 2499 | { |
fcff0f35 PD |
2500 | blkptr_t bp; |
2501 | int epbs, nlevels, curlevel; | |
2502 | uint64_t curblkid; | |
2503 | dmu_buf_impl_t *db; | |
2504 | zio_t *pio; | |
2505 | dbuf_prefetch_arg_t *dpa; | |
2506 | dsl_dataset_t *ds; | |
34dc7c2f | 2507 | |
428870ff | 2508 | ASSERT(blkid != DMU_BONUS_BLKID); |
34dc7c2f BB |
2509 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); |
2510 | ||
7f60329a MA |
2511 | if (blkid > dn->dn_maxblkid) |
2512 | return; | |
2513 | ||
34dc7c2f BB |
2514 | if (dnode_block_freed(dn, blkid)) |
2515 | return; | |
2516 | ||
fcff0f35 PD |
2517 | /* |
2518 | * This dnode hasn't been written to disk yet, so there's nothing to | |
2519 | * prefetch. | |
2520 | */ | |
2521 | nlevels = dn->dn_phys->dn_nlevels; | |
2522 | if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) | |
2523 | return; | |
2524 | ||
2525 | epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
2526 | if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) | |
2527 | return; | |
2528 | ||
2529 | db = dbuf_find(dn->dn_objset, dn->dn_object, | |
2530 | level, blkid); | |
2531 | if (db != NULL) { | |
2532 | mutex_exit(&db->db_mtx); | |
572e2857 | 2533 | /* |
fcff0f35 PD |
2534 | * This dbuf already exists. It is either CACHED, or |
2535 | * (we assume) about to be read or filled. | |
572e2857 | 2536 | */ |
572e2857 | 2537 | return; |
34dc7c2f BB |
2538 | } |
2539 | ||
fcff0f35 PD |
2540 | /* |
2541 | * Find the closest ancestor (indirect block) of the target block | |
2542 | * that is present in the cache. In this indirect block, we will | |
2543 | * find the bp that is at curlevel, curblkid. | |
2544 | */ | |
2545 | curlevel = level; | |
2546 | curblkid = blkid; | |
2547 | while (curlevel < nlevels - 1) { | |
2548 | int parent_level = curlevel + 1; | |
2549 | uint64_t parent_blkid = curblkid >> epbs; | |
2550 | dmu_buf_impl_t *db; | |
2551 | ||
2552 | if (dbuf_hold_impl(dn, parent_level, parent_blkid, | |
2553 | FALSE, TRUE, FTAG, &db) == 0) { | |
2554 | blkptr_t *bpp = db->db_buf->b_data; | |
2555 | bp = bpp[P2PHASE(curblkid, 1 << epbs)]; | |
2556 | dbuf_rele(db, FTAG); | |
2557 | break; | |
2558 | } | |
428870ff | 2559 | |
fcff0f35 PD |
2560 | curlevel = parent_level; |
2561 | curblkid = parent_blkid; | |
2562 | } | |
34dc7c2f | 2563 | |
fcff0f35 PD |
2564 | if (curlevel == nlevels - 1) { |
2565 | /* No cached indirect blocks found. */ | |
2566 | ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); | |
2567 | bp = dn->dn_phys->dn_blkptr[curblkid]; | |
34dc7c2f | 2568 | } |
fcff0f35 PD |
2569 | if (BP_IS_HOLE(&bp)) |
2570 | return; | |
2571 | ||
2572 | ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); | |
2573 | ||
2574 | pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, | |
2575 | ZIO_FLAG_CANFAIL); | |
2576 | ||
2577 | dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); | |
2578 | ds = dn->dn_objset->os_dsl_dataset; | |
2579 | SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, | |
2580 | dn->dn_object, level, blkid); | |
2581 | dpa->dpa_curlevel = curlevel; | |
2582 | dpa->dpa_prio = prio; | |
2583 | dpa->dpa_aflags = aflags; | |
2584 | dpa->dpa_spa = dn->dn_objset->os_spa; | |
d3c2ae1c | 2585 | dpa->dpa_dnode = dn; |
fcff0f35 PD |
2586 | dpa->dpa_epbs = epbs; |
2587 | dpa->dpa_zio = pio; | |
2588 | ||
2589 | /* | |
2590 | * If we have the indirect just above us, no need to do the asynchronous | |
2591 | * prefetch chain; we'll just run the last step ourselves. If we're at | |
2592 | * a higher level, though, we want to issue the prefetches for all the | |
2593 | * indirect blocks asynchronously, so we can go on with whatever we were | |
2594 | * doing. | |
2595 | */ | |
2596 | if (curlevel == level) { | |
2597 | ASSERT3U(curblkid, ==, blkid); | |
2598 | dbuf_issue_final_prefetch(dpa, &bp); | |
2599 | kmem_free(dpa, sizeof (*dpa)); | |
2600 | } else { | |
2601 | arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; | |
2602 | zbookmark_phys_t zb; | |
2603 | ||
2604 | SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, | |
2605 | dn->dn_object, curlevel, curblkid); | |
2606 | (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, | |
2607 | &bp, dbuf_prefetch_indirect_done, dpa, prio, | |
2608 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, | |
2609 | &iter_aflags, &zb); | |
2610 | } | |
2611 | /* | |
2612 | * We use pio here instead of dpa_zio since it's possible that | |
2613 | * dpa may have already been freed. | |
2614 | */ | |
2615 | zio_nowait(pio); | |
34dc7c2f BB |
2616 | } |
2617 | ||
d1d7e268 | 2618 | #define DBUF_HOLD_IMPL_MAX_DEPTH 20 |
fc5bb51f | 2619 | |
34dc7c2f BB |
2620 | /* |
2621 | * Returns with db_holds incremented, and db_mtx not held. | |
2622 | * Note: dn_struct_rwlock must be held. | |
2623 | */ | |
fc5bb51f BB |
2624 | static int |
2625 | __dbuf_hold_impl(struct dbuf_hold_impl_data *dh) | |
34dc7c2f | 2626 | { |
fc5bb51f BB |
2627 | ASSERT3S(dh->dh_depth, <, DBUF_HOLD_IMPL_MAX_DEPTH); |
2628 | dh->dh_parent = NULL; | |
34dc7c2f | 2629 | |
fc5bb51f BB |
2630 | ASSERT(dh->dh_blkid != DMU_BONUS_BLKID); |
2631 | ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock)); | |
2632 | ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level); | |
34dc7c2f | 2633 | |
fc5bb51f | 2634 | *(dh->dh_dbp) = NULL; |
d3c2ae1c | 2635 | |
34dc7c2f | 2636 | /* dbuf_find() returns with db_mtx held */ |
6ebebace JG |
2637 | dh->dh_db = dbuf_find(dh->dh_dn->dn_objset, dh->dh_dn->dn_object, |
2638 | dh->dh_level, dh->dh_blkid); | |
fc5bb51f BB |
2639 | |
2640 | if (dh->dh_db == NULL) { | |
2641 | dh->dh_bp = NULL; | |
2642 | ||
fcff0f35 PD |
2643 | if (dh->dh_fail_uncached) |
2644 | return (SET_ERROR(ENOENT)); | |
2645 | ||
fc5bb51f BB |
2646 | ASSERT3P(dh->dh_parent, ==, NULL); |
2647 | dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid, | |
02730c33 | 2648 | dh->dh_fail_sparse, &dh->dh_parent, &dh->dh_bp, dh); |
fc5bb51f | 2649 | if (dh->dh_fail_sparse) { |
d1d7e268 MK |
2650 | if (dh->dh_err == 0 && |
2651 | dh->dh_bp && BP_IS_HOLE(dh->dh_bp)) | |
2e528b49 | 2652 | dh->dh_err = SET_ERROR(ENOENT); |
fc5bb51f BB |
2653 | if (dh->dh_err) { |
2654 | if (dh->dh_parent) | |
2655 | dbuf_rele(dh->dh_parent, NULL); | |
2656 | return (dh->dh_err); | |
34dc7c2f BB |
2657 | } |
2658 | } | |
fc5bb51f BB |
2659 | if (dh->dh_err && dh->dh_err != ENOENT) |
2660 | return (dh->dh_err); | |
2661 | dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid, | |
02730c33 | 2662 | dh->dh_parent, dh->dh_bp); |
34dc7c2f BB |
2663 | } |
2664 | ||
fcff0f35 PD |
2665 | if (dh->dh_fail_uncached && dh->dh_db->db_state != DB_CACHED) { |
2666 | mutex_exit(&dh->dh_db->db_mtx); | |
2667 | return (SET_ERROR(ENOENT)); | |
2668 | } | |
2669 | ||
d3c2ae1c | 2670 | if (dh->dh_db->db_buf != NULL) |
fc5bb51f | 2671 | ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data); |
34dc7c2f | 2672 | |
fc5bb51f | 2673 | ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf)); |
34dc7c2f BB |
2674 | |
2675 | /* | |
2676 | * If this buffer is currently syncing out, and we are are | |
2677 | * still referencing it from db_data, we need to make a copy | |
2678 | * of it in case we decide we want to dirty it again in this txg. | |
2679 | */ | |
fc5bb51f BB |
2680 | if (dh->dh_db->db_level == 0 && |
2681 | dh->dh_db->db_blkid != DMU_BONUS_BLKID && | |
2682 | dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT && | |
2683 | dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) { | |
2684 | dh->dh_dr = dh->dh_db->db_data_pending; | |
2685 | ||
2686 | if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf) { | |
2687 | dh->dh_type = DBUF_GET_BUFC_TYPE(dh->dh_db); | |
2688 | ||
2689 | dbuf_set_data(dh->dh_db, | |
d3c2ae1c | 2690 | arc_alloc_buf(dh->dh_dn->dn_objset->os_spa, |
2aa34383 | 2691 | dh->dh_db, dh->dh_type, dh->dh_db->db.db_size)); |
fc5bb51f BB |
2692 | bcopy(dh->dh_dr->dt.dl.dr_data->b_data, |
2693 | dh->dh_db->db.db_data, dh->dh_db->db.db_size); | |
34dc7c2f BB |
2694 | } |
2695 | } | |
2696 | ||
d3c2ae1c GW |
2697 | if (multilist_link_active(&dh->dh_db->db_cache_link)) { |
2698 | ASSERT(refcount_is_zero(&dh->dh_db->db_holds)); | |
64fc7762 | 2699 | multilist_remove(dbuf_cache, dh->dh_db); |
d3c2ae1c GW |
2700 | (void) refcount_remove_many(&dbuf_cache_size, |
2701 | dh->dh_db->db.db_size, dh->dh_db); | |
2702 | } | |
fc5bb51f | 2703 | (void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag); |
fc5bb51f BB |
2704 | DBUF_VERIFY(dh->dh_db); |
2705 | mutex_exit(&dh->dh_db->db_mtx); | |
34dc7c2f BB |
2706 | |
2707 | /* NOTE: we can't rele the parent until after we drop the db_mtx */ | |
fc5bb51f BB |
2708 | if (dh->dh_parent) |
2709 | dbuf_rele(dh->dh_parent, NULL); | |
34dc7c2f | 2710 | |
fc5bb51f BB |
2711 | ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn); |
2712 | ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid); | |
2713 | ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level); | |
2714 | *(dh->dh_dbp) = dh->dh_db; | |
34dc7c2f BB |
2715 | |
2716 | return (0); | |
2717 | } | |
2718 | ||
fc5bb51f BB |
2719 | /* |
2720 | * The following code preserves the recursive function dbuf_hold_impl() | |
2721 | * but moves the local variables AND function arguments to the heap to | |
2722 | * minimize the stack frame size. Enough space is initially allocated | |
2723 | * on the stack for 20 levels of recursion. | |
2724 | */ | |
2725 | int | |
fcff0f35 PD |
2726 | dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, |
2727 | boolean_t fail_sparse, boolean_t fail_uncached, | |
fc5bb51f BB |
2728 | void *tag, dmu_buf_impl_t **dbp) |
2729 | { | |
2730 | struct dbuf_hold_impl_data *dh; | |
2731 | int error; | |
2732 | ||
d9eea113 | 2733 | dh = kmem_alloc(sizeof (struct dbuf_hold_impl_data) * |
79c76d5b | 2734 | DBUF_HOLD_IMPL_MAX_DEPTH, KM_SLEEP); |
fcff0f35 | 2735 | __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, |
02730c33 | 2736 | fail_uncached, tag, dbp, 0); |
fc5bb51f BB |
2737 | |
2738 | error = __dbuf_hold_impl(dh); | |
2739 | ||
d1d7e268 | 2740 | kmem_free(dh, sizeof (struct dbuf_hold_impl_data) * |
fc5bb51f BB |
2741 | DBUF_HOLD_IMPL_MAX_DEPTH); |
2742 | ||
2743 | return (error); | |
2744 | } | |
2745 | ||
2746 | static void | |
2747 | __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh, | |
fcff0f35 | 2748 | dnode_t *dn, uint8_t level, uint64_t blkid, |
4ea3f864 GM |
2749 | boolean_t fail_sparse, boolean_t fail_uncached, |
2750 | void *tag, dmu_buf_impl_t **dbp, int depth) | |
fc5bb51f BB |
2751 | { |
2752 | dh->dh_dn = dn; | |
2753 | dh->dh_level = level; | |
2754 | dh->dh_blkid = blkid; | |
fcff0f35 | 2755 | |
fc5bb51f | 2756 | dh->dh_fail_sparse = fail_sparse; |
fcff0f35 PD |
2757 | dh->dh_fail_uncached = fail_uncached; |
2758 | ||
fc5bb51f BB |
2759 | dh->dh_tag = tag; |
2760 | dh->dh_dbp = dbp; | |
d9eea113 MA |
2761 | |
2762 | dh->dh_db = NULL; | |
2763 | dh->dh_parent = NULL; | |
2764 | dh->dh_bp = NULL; | |
2765 | dh->dh_err = 0; | |
2766 | dh->dh_dr = NULL; | |
2767 | dh->dh_type = 0; | |
2768 | ||
fc5bb51f BB |
2769 | dh->dh_depth = depth; |
2770 | } | |
2771 | ||
34dc7c2f BB |
2772 | dmu_buf_impl_t * |
2773 | dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) | |
2774 | { | |
fcff0f35 | 2775 | return (dbuf_hold_level(dn, 0, blkid, tag)); |
34dc7c2f BB |
2776 | } |
2777 | ||
2778 | dmu_buf_impl_t * | |
2779 | dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) | |
2780 | { | |
2781 | dmu_buf_impl_t *db; | |
fcff0f35 | 2782 | int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); |
34dc7c2f BB |
2783 | return (err ? NULL : db); |
2784 | } | |
2785 | ||
2786 | void | |
2787 | dbuf_create_bonus(dnode_t *dn) | |
2788 | { | |
2789 | ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); | |
2790 | ||
2791 | ASSERT(dn->dn_bonus == NULL); | |
428870ff BB |
2792 | dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); |
2793 | } | |
2794 | ||
2795 | int | |
2796 | dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) | |
2797 | { | |
2798 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
572e2857 BB |
2799 | dnode_t *dn; |
2800 | ||
428870ff | 2801 | if (db->db_blkid != DMU_SPILL_BLKID) |
2e528b49 | 2802 | return (SET_ERROR(ENOTSUP)); |
428870ff BB |
2803 | if (blksz == 0) |
2804 | blksz = SPA_MINBLOCKSIZE; | |
f1512ee6 MA |
2805 | ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); |
2806 | blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); | |
428870ff | 2807 | |
572e2857 BB |
2808 | DB_DNODE_ENTER(db); |
2809 | dn = DB_DNODE(db); | |
2810 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
428870ff | 2811 | dbuf_new_size(db, blksz, tx); |
572e2857 BB |
2812 | rw_exit(&dn->dn_struct_rwlock); |
2813 | DB_DNODE_EXIT(db); | |
428870ff BB |
2814 | |
2815 | return (0); | |
2816 | } | |
2817 | ||
2818 | void | |
2819 | dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) | |
2820 | { | |
2821 | dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); | |
34dc7c2f BB |
2822 | } |
2823 | ||
2824 | #pragma weak dmu_buf_add_ref = dbuf_add_ref | |
2825 | void | |
2826 | dbuf_add_ref(dmu_buf_impl_t *db, void *tag) | |
2827 | { | |
d3c2ae1c GW |
2828 | int64_t holds = refcount_add(&db->db_holds, tag); |
2829 | VERIFY3S(holds, >, 1); | |
34dc7c2f BB |
2830 | } |
2831 | ||
6ebebace JG |
2832 | #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref |
2833 | boolean_t | |
2834 | dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, | |
2835 | void *tag) | |
2836 | { | |
2837 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
2838 | dmu_buf_impl_t *found_db; | |
2839 | boolean_t result = B_FALSE; | |
2840 | ||
d617648c | 2841 | if (blkid == DMU_BONUS_BLKID) |
6ebebace JG |
2842 | found_db = dbuf_find_bonus(os, obj); |
2843 | else | |
2844 | found_db = dbuf_find(os, obj, 0, blkid); | |
2845 | ||
2846 | if (found_db != NULL) { | |
2847 | if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { | |
2848 | (void) refcount_add(&db->db_holds, tag); | |
2849 | result = B_TRUE; | |
2850 | } | |
d617648c | 2851 | mutex_exit(&found_db->db_mtx); |
6ebebace JG |
2852 | } |
2853 | return (result); | |
2854 | } | |
2855 | ||
572e2857 BB |
2856 | /* |
2857 | * If you call dbuf_rele() you had better not be referencing the dnode handle | |
2858 | * unless you have some other direct or indirect hold on the dnode. (An indirect | |
2859 | * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) | |
2860 | * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the | |
2861 | * dnode's parent dbuf evicting its dnode handles. | |
2862 | */ | |
34dc7c2f BB |
2863 | void |
2864 | dbuf_rele(dmu_buf_impl_t *db, void *tag) | |
428870ff BB |
2865 | { |
2866 | mutex_enter(&db->db_mtx); | |
2867 | dbuf_rele_and_unlock(db, tag); | |
2868 | } | |
2869 | ||
b0bc7a84 MG |
2870 | void |
2871 | dmu_buf_rele(dmu_buf_t *db, void *tag) | |
2872 | { | |
2873 | dbuf_rele((dmu_buf_impl_t *)db, tag); | |
2874 | } | |
2875 | ||
428870ff BB |
2876 | /* |
2877 | * dbuf_rele() for an already-locked dbuf. This is necessary to allow | |
2878 | * db_dirtycnt and db_holds to be updated atomically. | |
2879 | */ | |
2880 | void | |
2881 | dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) | |
34dc7c2f BB |
2882 | { |
2883 | int64_t holds; | |
2884 | ||
428870ff | 2885 | ASSERT(MUTEX_HELD(&db->db_mtx)); |
34dc7c2f BB |
2886 | DBUF_VERIFY(db); |
2887 | ||
572e2857 BB |
2888 | /* |
2889 | * Remove the reference to the dbuf before removing its hold on the | |
2890 | * dnode so we can guarantee in dnode_move() that a referenced bonus | |
2891 | * buffer has a corresponding dnode hold. | |
2892 | */ | |
34dc7c2f BB |
2893 | holds = refcount_remove(&db->db_holds, tag); |
2894 | ASSERT(holds >= 0); | |
2895 | ||
2896 | /* | |
2897 | * We can't freeze indirects if there is a possibility that they | |
2898 | * may be modified in the current syncing context. | |
2899 | */ | |
d3c2ae1c GW |
2900 | if (db->db_buf != NULL && |
2901 | holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { | |
34dc7c2f | 2902 | arc_buf_freeze(db->db_buf); |
d3c2ae1c | 2903 | } |
34dc7c2f BB |
2904 | |
2905 | if (holds == db->db_dirtycnt && | |
bc4501f7 | 2906 | db->db_level == 0 && db->db_user_immediate_evict) |
34dc7c2f BB |
2907 | dbuf_evict_user(db); |
2908 | ||
2909 | if (holds == 0) { | |
428870ff | 2910 | if (db->db_blkid == DMU_BONUS_BLKID) { |
4c7b7eed | 2911 | dnode_t *dn; |
bc4501f7 | 2912 | boolean_t evict_dbuf = db->db_pending_evict; |
572e2857 BB |
2913 | |
2914 | /* | |
4c7b7eed JG |
2915 | * If the dnode moves here, we cannot cross this |
2916 | * barrier until the move completes. | |
572e2857 BB |
2917 | */ |
2918 | DB_DNODE_ENTER(db); | |
4c7b7eed JG |
2919 | |
2920 | dn = DB_DNODE(db); | |
2921 | atomic_dec_32(&dn->dn_dbufs_count); | |
2922 | ||
2923 | /* | |
2924 | * Decrementing the dbuf count means that the bonus | |
2925 | * buffer's dnode hold is no longer discounted in | |
2926 | * dnode_move(). The dnode cannot move until after | |
bc4501f7 | 2927 | * the dnode_rele() below. |
4c7b7eed | 2928 | */ |
572e2857 | 2929 | DB_DNODE_EXIT(db); |
4c7b7eed JG |
2930 | |
2931 | /* | |
2932 | * Do not reference db after its lock is dropped. | |
2933 | * Another thread may evict it. | |
2934 | */ | |
2935 | mutex_exit(&db->db_mtx); | |
2936 | ||
bc4501f7 | 2937 | if (evict_dbuf) |
4c7b7eed | 2938 | dnode_evict_bonus(dn); |
bc4501f7 JG |
2939 | |
2940 | dnode_rele(dn, db); | |
34dc7c2f BB |
2941 | } else if (db->db_buf == NULL) { |
2942 | /* | |
2943 | * This is a special case: we never associated this | |
2944 | * dbuf with any data allocated from the ARC. | |
2945 | */ | |
b128c09f BB |
2946 | ASSERT(db->db_state == DB_UNCACHED || |
2947 | db->db_state == DB_NOFILL); | |
d3c2ae1c | 2948 | dbuf_destroy(db); |
34dc7c2f | 2949 | } else if (arc_released(db->db_buf)) { |
34dc7c2f BB |
2950 | /* |
2951 | * This dbuf has anonymous data associated with it. | |
2952 | */ | |
d3c2ae1c | 2953 | dbuf_destroy(db); |
34dc7c2f | 2954 | } else { |
d3c2ae1c GW |
2955 | boolean_t do_arc_evict = B_FALSE; |
2956 | blkptr_t bp; | |
2957 | spa_t *spa = dmu_objset_spa(db->db_objset); | |
2958 | ||
2959 | if (!DBUF_IS_CACHEABLE(db) && | |
2960 | db->db_blkptr != NULL && | |
2961 | !BP_IS_HOLE(db->db_blkptr) && | |
2962 | !BP_IS_EMBEDDED(db->db_blkptr)) { | |
2963 | do_arc_evict = B_TRUE; | |
2964 | bp = *db->db_blkptr; | |
2965 | } | |
1eb5bfa3 | 2966 | |
d3c2ae1c GW |
2967 | if (!DBUF_IS_CACHEABLE(db) || |
2968 | db->db_pending_evict) { | |
2969 | dbuf_destroy(db); | |
2970 | } else if (!multilist_link_active(&db->db_cache_link)) { | |
64fc7762 | 2971 | multilist_insert(dbuf_cache, db); |
d3c2ae1c GW |
2972 | (void) refcount_add_many(&dbuf_cache_size, |
2973 | db->db.db_size, db); | |
b128c09f | 2974 | mutex_exit(&db->db_mtx); |
d3c2ae1c GW |
2975 | |
2976 | dbuf_evict_notify(); | |
bd089c54 | 2977 | } |
d3c2ae1c GW |
2978 | |
2979 | if (do_arc_evict) | |
2980 | arc_freed(spa, &bp); | |
34dc7c2f BB |
2981 | } |
2982 | } else { | |
2983 | mutex_exit(&db->db_mtx); | |
2984 | } | |
d3c2ae1c | 2985 | |
34dc7c2f BB |
2986 | } |
2987 | ||
2988 | #pragma weak dmu_buf_refcount = dbuf_refcount | |
2989 | uint64_t | |
2990 | dbuf_refcount(dmu_buf_impl_t *db) | |
2991 | { | |
2992 | return (refcount_count(&db->db_holds)); | |
2993 | } | |
2994 | ||
2995 | void * | |
0c66c32d JG |
2996 | dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, |
2997 | dmu_buf_user_t *new_user) | |
34dc7c2f | 2998 | { |
0c66c32d JG |
2999 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; |
3000 | ||
3001 | mutex_enter(&db->db_mtx); | |
3002 | dbuf_verify_user(db, DBVU_NOT_EVICTING); | |
3003 | if (db->db_user == old_user) | |
3004 | db->db_user = new_user; | |
3005 | else | |
3006 | old_user = db->db_user; | |
3007 | dbuf_verify_user(db, DBVU_NOT_EVICTING); | |
3008 | mutex_exit(&db->db_mtx); | |
3009 | ||
3010 | return (old_user); | |
34dc7c2f BB |
3011 | } |
3012 | ||
3013 | void * | |
0c66c32d | 3014 | dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) |
34dc7c2f | 3015 | { |
0c66c32d | 3016 | return (dmu_buf_replace_user(db_fake, NULL, user)); |
34dc7c2f BB |
3017 | } |
3018 | ||
3019 | void * | |
0c66c32d | 3020 | dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) |
34dc7c2f BB |
3021 | { |
3022 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
34dc7c2f | 3023 | |
bc4501f7 | 3024 | db->db_user_immediate_evict = TRUE; |
0c66c32d JG |
3025 | return (dmu_buf_set_user(db_fake, user)); |
3026 | } | |
34dc7c2f | 3027 | |
0c66c32d JG |
3028 | void * |
3029 | dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) | |
3030 | { | |
3031 | return (dmu_buf_replace_user(db_fake, user, NULL)); | |
34dc7c2f BB |
3032 | } |
3033 | ||
3034 | void * | |
3035 | dmu_buf_get_user(dmu_buf_t *db_fake) | |
3036 | { | |
3037 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
34dc7c2f | 3038 | |
0c66c32d JG |
3039 | dbuf_verify_user(db, DBVU_NOT_EVICTING); |
3040 | return (db->db_user); | |
3041 | } | |
3042 | ||
3043 | void | |
3044 | dmu_buf_user_evict_wait() | |
3045 | { | |
3046 | taskq_wait(dbu_evict_taskq); | |
34dc7c2f BB |
3047 | } |
3048 | ||
03c6040b GW |
3049 | blkptr_t * |
3050 | dmu_buf_get_blkptr(dmu_buf_t *db) | |
3051 | { | |
3052 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
3053 | return (dbi->db_blkptr); | |
3054 | } | |
3055 | ||
8bea9815 MA |
3056 | objset_t * |
3057 | dmu_buf_get_objset(dmu_buf_t *db) | |
3058 | { | |
3059 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
3060 | return (dbi->db_objset); | |
3061 | } | |
3062 | ||
2bce8049 MA |
3063 | dnode_t * |
3064 | dmu_buf_dnode_enter(dmu_buf_t *db) | |
3065 | { | |
3066 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
3067 | DB_DNODE_ENTER(dbi); | |
3068 | return (DB_DNODE(dbi)); | |
3069 | } | |
3070 | ||
3071 | void | |
3072 | dmu_buf_dnode_exit(dmu_buf_t *db) | |
3073 | { | |
3074 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
3075 | DB_DNODE_EXIT(dbi); | |
3076 | } | |
3077 | ||
34dc7c2f BB |
3078 | static void |
3079 | dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) | |
3080 | { | |
3081 | /* ASSERT(dmu_tx_is_syncing(tx) */ | |
3082 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
3083 | ||
3084 | if (db->db_blkptr != NULL) | |
3085 | return; | |
3086 | ||
428870ff | 3087 | if (db->db_blkid == DMU_SPILL_BLKID) { |
50c957f7 | 3088 | db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); |
428870ff BB |
3089 | BP_ZERO(db->db_blkptr); |
3090 | return; | |
3091 | } | |
34dc7c2f BB |
3092 | if (db->db_level == dn->dn_phys->dn_nlevels-1) { |
3093 | /* | |
3094 | * This buffer was allocated at a time when there was | |
3095 | * no available blkptrs from the dnode, or it was | |
3096 | * inappropriate to hook it in (i.e., nlevels mis-match). | |
3097 | */ | |
3098 | ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); | |
3099 | ASSERT(db->db_parent == NULL); | |
3100 | db->db_parent = dn->dn_dbuf; | |
3101 | db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; | |
3102 | DBUF_VERIFY(db); | |
3103 | } else { | |
3104 | dmu_buf_impl_t *parent = db->db_parent; | |
3105 | int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
3106 | ||
3107 | ASSERT(dn->dn_phys->dn_nlevels > 1); | |
3108 | if (parent == NULL) { | |
3109 | mutex_exit(&db->db_mtx); | |
3110 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
fcff0f35 PD |
3111 | parent = dbuf_hold_level(dn, db->db_level + 1, |
3112 | db->db_blkid >> epbs, db); | |
34dc7c2f BB |
3113 | rw_exit(&dn->dn_struct_rwlock); |
3114 | mutex_enter(&db->db_mtx); | |
3115 | db->db_parent = parent; | |
3116 | } | |
3117 | db->db_blkptr = (blkptr_t *)parent->db.db_data + | |
3118 | (db->db_blkid & ((1ULL << epbs) - 1)); | |
3119 | DBUF_VERIFY(db); | |
3120 | } | |
3121 | } | |
3122 | ||
d1d7e268 MK |
3123 | /* |
3124 | * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it | |
60948de1 BB |
3125 | * is critical the we not allow the compiler to inline this function in to |
3126 | * dbuf_sync_list() thereby drastically bloating the stack usage. | |
3127 | */ | |
3128 | noinline static void | |
34dc7c2f BB |
3129 | dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) |
3130 | { | |
3131 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
572e2857 | 3132 | dnode_t *dn; |
34dc7c2f BB |
3133 | zio_t *zio; |
3134 | ||
3135 | ASSERT(dmu_tx_is_syncing(tx)); | |
3136 | ||
3137 | dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); | |
3138 | ||
3139 | mutex_enter(&db->db_mtx); | |
3140 | ||
3141 | ASSERT(db->db_level > 0); | |
3142 | DBUF_VERIFY(db); | |
3143 | ||
e49f1e20 | 3144 | /* Read the block if it hasn't been read yet. */ |
34dc7c2f BB |
3145 | if (db->db_buf == NULL) { |
3146 | mutex_exit(&db->db_mtx); | |
3147 | (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); | |
3148 | mutex_enter(&db->db_mtx); | |
3149 | } | |
3150 | ASSERT3U(db->db_state, ==, DB_CACHED); | |
34dc7c2f BB |
3151 | ASSERT(db->db_buf != NULL); |
3152 | ||
572e2857 BB |
3153 | DB_DNODE_ENTER(db); |
3154 | dn = DB_DNODE(db); | |
e49f1e20 | 3155 | /* Indirect block size must match what the dnode thinks it is. */ |
572e2857 | 3156 | ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); |
34dc7c2f | 3157 | dbuf_check_blkptr(dn, db); |
572e2857 | 3158 | DB_DNODE_EXIT(db); |
34dc7c2f | 3159 | |
e49f1e20 | 3160 | /* Provide the pending dirty record to child dbufs */ |
34dc7c2f BB |
3161 | db->db_data_pending = dr; |
3162 | ||
34dc7c2f | 3163 | mutex_exit(&db->db_mtx); |
b128c09f | 3164 | dbuf_write(dr, db->db_buf, tx); |
34dc7c2f BB |
3165 | |
3166 | zio = dr->dr_zio; | |
3167 | mutex_enter(&dr->dt.di.dr_mtx); | |
4bda3bd0 | 3168 | dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); |
34dc7c2f BB |
3169 | ASSERT(list_head(&dr->dt.di.dr_children) == NULL); |
3170 | mutex_exit(&dr->dt.di.dr_mtx); | |
3171 | zio_nowait(zio); | |
3172 | } | |
3173 | ||
d1d7e268 MK |
3174 | /* |
3175 | * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is | |
60948de1 BB |
3176 | * critical the we not allow the compiler to inline this function in to |
3177 | * dbuf_sync_list() thereby drastically bloating the stack usage. | |
3178 | */ | |
3179 | noinline static void | |
34dc7c2f BB |
3180 | dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) |
3181 | { | |
3182 | arc_buf_t **datap = &dr->dt.dl.dr_data; | |
3183 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
572e2857 BB |
3184 | dnode_t *dn; |
3185 | objset_t *os; | |
34dc7c2f | 3186 | uint64_t txg = tx->tx_txg; |
34dc7c2f BB |
3187 | |
3188 | ASSERT(dmu_tx_is_syncing(tx)); | |
3189 | ||
3190 | dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); | |
3191 | ||
3192 | mutex_enter(&db->db_mtx); | |
3193 | /* | |
3194 | * To be synced, we must be dirtied. But we | |
3195 | * might have been freed after the dirty. | |
3196 | */ | |
3197 | if (db->db_state == DB_UNCACHED) { | |
3198 | /* This buffer has been freed since it was dirtied */ | |
3199 | ASSERT(db->db.db_data == NULL); | |
3200 | } else if (db->db_state == DB_FILL) { | |
3201 | /* This buffer was freed and is now being re-filled */ | |
3202 | ASSERT(db->db.db_data != dr->dt.dl.dr_data); | |
3203 | } else { | |
b128c09f | 3204 | ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); |
34dc7c2f BB |
3205 | } |
3206 | DBUF_VERIFY(db); | |
3207 | ||
572e2857 BB |
3208 | DB_DNODE_ENTER(db); |
3209 | dn = DB_DNODE(db); | |
3210 | ||
428870ff BB |
3211 | if (db->db_blkid == DMU_SPILL_BLKID) { |
3212 | mutex_enter(&dn->dn_mtx); | |
81edd3e8 P |
3213 | if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { |
3214 | /* | |
3215 | * In the previous transaction group, the bonus buffer | |
3216 | * was entirely used to store the attributes for the | |
3217 | * dnode which overrode the dn_spill field. However, | |
3218 | * when adding more attributes to the file a spill | |
3219 | * block was required to hold the extra attributes. | |
3220 | * | |
3221 | * Make sure to clear the garbage left in the dn_spill | |
3222 | * field from the previous attributes in the bonus | |
3223 | * buffer. Otherwise, after writing out the spill | |
3224 | * block to the new allocated dva, it will free | |
3225 | * the old block pointed to by the invalid dn_spill. | |
3226 | */ | |
3227 | db->db_blkptr = NULL; | |
3228 | } | |
428870ff BB |
3229 | dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; |
3230 | mutex_exit(&dn->dn_mtx); | |
3231 | } | |
3232 | ||
34dc7c2f BB |
3233 | /* |
3234 | * If this is a bonus buffer, simply copy the bonus data into the | |
3235 | * dnode. It will be written out when the dnode is synced (and it | |
3236 | * will be synced, since it must have been dirty for dbuf_sync to | |
3237 | * be called). | |
3238 | */ | |
428870ff | 3239 | if (db->db_blkid == DMU_BONUS_BLKID) { |
34dc7c2f BB |
3240 | dbuf_dirty_record_t **drp; |
3241 | ||
3242 | ASSERT(*datap != NULL); | |
c99c9001 | 3243 | ASSERT0(db->db_level); |
50c957f7 NB |
3244 | ASSERT3U(dn->dn_phys->dn_bonuslen, <=, |
3245 | DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); | |
34dc7c2f | 3246 | bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); |
572e2857 BB |
3247 | DB_DNODE_EXIT(db); |
3248 | ||
34dc7c2f | 3249 | if (*datap != db->db.db_data) { |
50c957f7 NB |
3250 | int slots = DB_DNODE(db)->dn_num_slots; |
3251 | int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); | |
a3fd9d9e | 3252 | kmem_free(*datap, bonuslen); |
25458cbe | 3253 | arc_space_return(bonuslen, ARC_SPACE_BONUS); |
34dc7c2f BB |
3254 | } |
3255 | db->db_data_pending = NULL; | |
3256 | drp = &db->db_last_dirty; | |
3257 | while (*drp != dr) | |
3258 | drp = &(*drp)->dr_next; | |
3259 | ASSERT(dr->dr_next == NULL); | |
428870ff | 3260 | ASSERT(dr->dr_dbuf == db); |
34dc7c2f | 3261 | *drp = dr->dr_next; |
753972fc BB |
3262 | if (dr->dr_dbuf->db_level != 0) { |
3263 | mutex_destroy(&dr->dt.di.dr_mtx); | |
3264 | list_destroy(&dr->dt.di.dr_children); | |
3265 | } | |
34dc7c2f BB |
3266 | kmem_free(dr, sizeof (dbuf_dirty_record_t)); |
3267 | ASSERT(db->db_dirtycnt > 0); | |
3268 | db->db_dirtycnt -= 1; | |
428870ff | 3269 | dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); |
34dc7c2f BB |
3270 | return; |
3271 | } | |
3272 | ||
572e2857 BB |
3273 | os = dn->dn_objset; |
3274 | ||
34dc7c2f BB |
3275 | /* |
3276 | * This function may have dropped the db_mtx lock allowing a dmu_sync | |
3277 | * operation to sneak in. As a result, we need to ensure that we | |
3278 | * don't check the dr_override_state until we have returned from | |
3279 | * dbuf_check_blkptr. | |
3280 | */ | |
3281 | dbuf_check_blkptr(dn, db); | |
3282 | ||
3283 | /* | |
572e2857 | 3284 | * If this buffer is in the middle of an immediate write, |
34dc7c2f BB |
3285 | * wait for the synchronous IO to complete. |
3286 | */ | |
3287 | while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { | |
3288 | ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); | |
3289 | cv_wait(&db->db_changed, &db->db_mtx); | |
3290 | ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); | |
3291 | } | |
3292 | ||
9babb374 BB |
3293 | if (db->db_state != DB_NOFILL && |
3294 | dn->dn_object != DMU_META_DNODE_OBJECT && | |
3295 | refcount_count(&db->db_holds) > 1 && | |
428870ff | 3296 | dr->dt.dl.dr_override_state != DR_OVERRIDDEN && |
9babb374 BB |
3297 | *datap == db->db_buf) { |
3298 | /* | |
3299 | * If this buffer is currently "in use" (i.e., there | |
3300 | * are active holds and db_data still references it), | |
3301 | * then make a copy before we start the write so that | |
3302 | * any modifications from the open txg will not leak | |
3303 | * into this write. | |
3304 | * | |
3305 | * NOTE: this copy does not need to be made for | |
3306 | * objects only modified in the syncing context (e.g. | |
3307 | * DNONE_DNODE blocks). | |
3308 | */ | |
2aa34383 | 3309 | int psize = arc_buf_size(*datap); |
9babb374 | 3310 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); |
2aa34383 DK |
3311 | enum zio_compress compress_type = arc_get_compression(*datap); |
3312 | ||
3313 | if (compress_type == ZIO_COMPRESS_OFF) { | |
3314 | *datap = arc_alloc_buf(os->os_spa, db, type, psize); | |
3315 | } else { | |
2aa34383 | 3316 | ASSERT3U(type, ==, ARC_BUFC_DATA); |
a7004725 | 3317 | int lsize = arc_buf_lsize(*datap); |
2aa34383 DK |
3318 | *datap = arc_alloc_compressed_buf(os->os_spa, db, |
3319 | psize, lsize, compress_type); | |
3320 | } | |
3321 | bcopy(db->db.db_data, (*datap)->b_data, psize); | |
b128c09f | 3322 | } |
34dc7c2f BB |
3323 | db->db_data_pending = dr; |
3324 | ||
3325 | mutex_exit(&db->db_mtx); | |
3326 | ||
b128c09f | 3327 | dbuf_write(dr, *datap, tx); |
34dc7c2f BB |
3328 | |
3329 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
572e2857 | 3330 | if (dn->dn_object == DMU_META_DNODE_OBJECT) { |
34dc7c2f | 3331 | list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); |
572e2857 BB |
3332 | DB_DNODE_EXIT(db); |
3333 | } else { | |
3334 | /* | |
3335 | * Although zio_nowait() does not "wait for an IO", it does | |
3336 | * initiate the IO. If this is an empty write it seems plausible | |
3337 | * that the IO could actually be completed before the nowait | |
3338 | * returns. We need to DB_DNODE_EXIT() first in case | |
3339 | * zio_nowait() invalidates the dbuf. | |
3340 | */ | |
3341 | DB_DNODE_EXIT(db); | |
34dc7c2f | 3342 | zio_nowait(dr->dr_zio); |
572e2857 | 3343 | } |
34dc7c2f BB |
3344 | } |
3345 | ||
3346 | void | |
4bda3bd0 | 3347 | dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) |
34dc7c2f BB |
3348 | { |
3349 | dbuf_dirty_record_t *dr; | |
3350 | ||
c65aa5b2 | 3351 | while ((dr = list_head(list))) { |
34dc7c2f BB |
3352 | if (dr->dr_zio != NULL) { |
3353 | /* | |
3354 | * If we find an already initialized zio then we | |
3355 | * are processing the meta-dnode, and we have finished. | |
3356 | * The dbufs for all dnodes are put back on the list | |
3357 | * during processing, so that we can zio_wait() | |
3358 | * these IOs after initiating all child IOs. | |
3359 | */ | |
3360 | ASSERT3U(dr->dr_dbuf->db.db_object, ==, | |
3361 | DMU_META_DNODE_OBJECT); | |
3362 | break; | |
3363 | } | |
4bda3bd0 MA |
3364 | if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && |
3365 | dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { | |
3366 | VERIFY3U(dr->dr_dbuf->db_level, ==, level); | |
3367 | } | |
34dc7c2f BB |
3368 | list_remove(list, dr); |
3369 | if (dr->dr_dbuf->db_level > 0) | |
3370 | dbuf_sync_indirect(dr, tx); | |
3371 | else | |
3372 | dbuf_sync_leaf(dr, tx); | |
3373 | } | |
3374 | } | |
3375 | ||
34dc7c2f BB |
3376 | /* ARGSUSED */ |
3377 | static void | |
3378 | dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | |
3379 | { | |
3380 | dmu_buf_impl_t *db = vdb; | |
572e2857 | 3381 | dnode_t *dn; |
b128c09f | 3382 | blkptr_t *bp = zio->io_bp; |
34dc7c2f | 3383 | blkptr_t *bp_orig = &zio->io_bp_orig; |
428870ff BB |
3384 | spa_t *spa = zio->io_spa; |
3385 | int64_t delta; | |
34dc7c2f | 3386 | uint64_t fill = 0; |
428870ff | 3387 | int i; |
34dc7c2f | 3388 | |
463a8cfe AR |
3389 | ASSERT3P(db->db_blkptr, !=, NULL); |
3390 | ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); | |
b128c09f | 3391 | |
572e2857 BB |
3392 | DB_DNODE_ENTER(db); |
3393 | dn = DB_DNODE(db); | |
428870ff BB |
3394 | delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); |
3395 | dnode_diduse_space(dn, delta - zio->io_prev_space_delta); | |
3396 | zio->io_prev_space_delta = delta; | |
34dc7c2f | 3397 | |
b0bc7a84 MG |
3398 | if (bp->blk_birth != 0) { |
3399 | ASSERT((db->db_blkid != DMU_SPILL_BLKID && | |
3400 | BP_GET_TYPE(bp) == dn->dn_type) || | |
3401 | (db->db_blkid == DMU_SPILL_BLKID && | |
9b67f605 MA |
3402 | BP_GET_TYPE(bp) == dn->dn_bonustype) || |
3403 | BP_IS_EMBEDDED(bp)); | |
b0bc7a84 | 3404 | ASSERT(BP_GET_LEVEL(bp) == db->db_level); |
34dc7c2f BB |
3405 | } |
3406 | ||
3407 | mutex_enter(&db->db_mtx); | |
3408 | ||
428870ff BB |
3409 | #ifdef ZFS_DEBUG |
3410 | if (db->db_blkid == DMU_SPILL_BLKID) { | |
428870ff | 3411 | ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); |
463a8cfe | 3412 | ASSERT(!(BP_IS_HOLE(bp)) && |
50c957f7 | 3413 | db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); |
428870ff BB |
3414 | } |
3415 | #endif | |
3416 | ||
34dc7c2f BB |
3417 | if (db->db_level == 0) { |
3418 | mutex_enter(&dn->dn_mtx); | |
428870ff BB |
3419 | if (db->db_blkid > dn->dn_phys->dn_maxblkid && |
3420 | db->db_blkid != DMU_SPILL_BLKID) | |
34dc7c2f BB |
3421 | dn->dn_phys->dn_maxblkid = db->db_blkid; |
3422 | mutex_exit(&dn->dn_mtx); | |
3423 | ||
3424 | if (dn->dn_type == DMU_OT_DNODE) { | |
50c957f7 NB |
3425 | i = 0; |
3426 | while (i < db->db.db_size) { | |
817b1b6e MA |
3427 | dnode_phys_t *dnp = |
3428 | (void *)(((char *)db->db.db_data) + i); | |
50c957f7 NB |
3429 | |
3430 | i += DNODE_MIN_SIZE; | |
3431 | if (dnp->dn_type != DMU_OT_NONE) { | |
34dc7c2f | 3432 | fill++; |
50c957f7 NB |
3433 | i += dnp->dn_extra_slots * |
3434 | DNODE_MIN_SIZE; | |
3435 | } | |
34dc7c2f BB |
3436 | } |
3437 | } else { | |
b0bc7a84 MG |
3438 | if (BP_IS_HOLE(bp)) { |
3439 | fill = 0; | |
3440 | } else { | |
3441 | fill = 1; | |
3442 | } | |
34dc7c2f BB |
3443 | } |
3444 | } else { | |
b128c09f | 3445 | blkptr_t *ibp = db->db.db_data; |
34dc7c2f | 3446 | ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); |
b128c09f BB |
3447 | for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { |
3448 | if (BP_IS_HOLE(ibp)) | |
34dc7c2f | 3449 | continue; |
9b67f605 | 3450 | fill += BP_GET_FILL(ibp); |
34dc7c2f BB |
3451 | } |
3452 | } | |
572e2857 | 3453 | DB_DNODE_EXIT(db); |
34dc7c2f | 3454 | |
9b67f605 MA |
3455 | if (!BP_IS_EMBEDDED(bp)) |
3456 | bp->blk_fill = fill; | |
34dc7c2f BB |
3457 | |
3458 | mutex_exit(&db->db_mtx); | |
463a8cfe AR |
3459 | |
3460 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
3461 | *db->db_blkptr = *bp; | |
3462 | rw_exit(&dn->dn_struct_rwlock); | |
34dc7c2f BB |
3463 | } |
3464 | ||
bc77ba73 PD |
3465 | /* ARGSUSED */ |
3466 | /* | |
3467 | * This function gets called just prior to running through the compression | |
3468 | * stage of the zio pipeline. If we're an indirect block comprised of only | |
3469 | * holes, then we want this indirect to be compressed away to a hole. In | |
3470 | * order to do that we must zero out any information about the holes that | |
3471 | * this indirect points to prior to before we try to compress it. | |
3472 | */ | |
3473 | static void | |
3474 | dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | |
3475 | { | |
3476 | dmu_buf_impl_t *db = vdb; | |
3477 | dnode_t *dn; | |
3478 | blkptr_t *bp; | |
721ed0ee | 3479 | unsigned int epbs, i; |
bc77ba73 PD |
3480 | |
3481 | ASSERT3U(db->db_level, >, 0); | |
3482 | DB_DNODE_ENTER(db); | |
3483 | dn = DB_DNODE(db); | |
3484 | epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
721ed0ee | 3485 | ASSERT3U(epbs, <, 31); |
bc77ba73 PD |
3486 | |
3487 | /* Determine if all our children are holes */ | |
3f93077b | 3488 | for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) { |
bc77ba73 PD |
3489 | if (!BP_IS_HOLE(bp)) |
3490 | break; | |
3491 | } | |
3492 | ||
3493 | /* | |
3494 | * If all the children are holes, then zero them all out so that | |
3495 | * we may get compressed away. | |
3496 | */ | |
3f93077b | 3497 | if (i == 1ULL << epbs) { |
721ed0ee GM |
3498 | /* |
3499 | * We only found holes. Grab the rwlock to prevent | |
3500 | * anybody from reading the blocks we're about to | |
3501 | * zero out. | |
3502 | */ | |
3503 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
bc77ba73 | 3504 | bzero(db->db.db_data, db->db.db_size); |
721ed0ee | 3505 | rw_exit(&dn->dn_struct_rwlock); |
bc77ba73 PD |
3506 | } |
3507 | DB_DNODE_EXIT(db); | |
3508 | } | |
3509 | ||
e8b96c60 MA |
3510 | /* |
3511 | * The SPA will call this callback several times for each zio - once | |
3512 | * for every physical child i/o (zio->io_phys_children times). This | |
3513 | * allows the DMU to monitor the progress of each logical i/o. For example, | |
3514 | * there may be 2 copies of an indirect block, or many fragments of a RAID-Z | |
3515 | * block. There may be a long delay before all copies/fragments are completed, | |
3516 | * so this callback allows us to retire dirty space gradually, as the physical | |
3517 | * i/os complete. | |
3518 | */ | |
3519 | /* ARGSUSED */ | |
3520 | static void | |
3521 | dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) | |
3522 | { | |
3523 | dmu_buf_impl_t *db = arg; | |
3524 | objset_t *os = db->db_objset; | |
3525 | dsl_pool_t *dp = dmu_objset_pool(os); | |
3526 | dbuf_dirty_record_t *dr; | |
3527 | int delta = 0; | |
3528 | ||
3529 | dr = db->db_data_pending; | |
3530 | ASSERT3U(dr->dr_txg, ==, zio->io_txg); | |
3531 | ||
3532 | /* | |
3533 | * The callback will be called io_phys_children times. Retire one | |
3534 | * portion of our dirty space each time we are called. Any rounding | |
3535 | * error will be cleaned up by dsl_pool_sync()'s call to | |
3536 | * dsl_pool_undirty_space(). | |
3537 | */ | |
3538 | delta = dr->dr_accounted / zio->io_phys_children; | |
3539 | dsl_pool_undirty_space(dp, delta, zio->io_txg); | |
3540 | } | |
3541 | ||
34dc7c2f BB |
3542 | /* ARGSUSED */ |
3543 | static void | |
3544 | dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) | |
3545 | { | |
3546 | dmu_buf_impl_t *db = vdb; | |
428870ff | 3547 | blkptr_t *bp_orig = &zio->io_bp_orig; |
b0bc7a84 MG |
3548 | blkptr_t *bp = db->db_blkptr; |
3549 | objset_t *os = db->db_objset; | |
3550 | dmu_tx_t *tx = os->os_synctx; | |
34dc7c2f BB |
3551 | dbuf_dirty_record_t **drp, *dr; |
3552 | ||
c99c9001 | 3553 | ASSERT0(zio->io_error); |
428870ff BB |
3554 | ASSERT(db->db_blkptr == bp); |
3555 | ||
03c6040b GW |
3556 | /* |
3557 | * For nopwrites and rewrites we ensure that the bp matches our | |
3558 | * original and bypass all the accounting. | |
3559 | */ | |
3560 | if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { | |
428870ff BB |
3561 | ASSERT(BP_EQUAL(bp, bp_orig)); |
3562 | } else { | |
b0bc7a84 | 3563 | dsl_dataset_t *ds = os->os_dsl_dataset; |
428870ff BB |
3564 | (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); |
3565 | dsl_dataset_block_born(ds, bp, tx); | |
3566 | } | |
34dc7c2f BB |
3567 | |
3568 | mutex_enter(&db->db_mtx); | |
3569 | ||
428870ff BB |
3570 | DBUF_VERIFY(db); |
3571 | ||
34dc7c2f BB |
3572 | drp = &db->db_last_dirty; |
3573 | while ((dr = *drp) != db->db_data_pending) | |
3574 | drp = &dr->dr_next; | |
3575 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
428870ff | 3576 | ASSERT(dr->dr_dbuf == db); |
34dc7c2f BB |
3577 | ASSERT(dr->dr_next == NULL); |
3578 | *drp = dr->dr_next; | |
3579 | ||
428870ff BB |
3580 | #ifdef ZFS_DEBUG |
3581 | if (db->db_blkid == DMU_SPILL_BLKID) { | |
572e2857 BB |
3582 | dnode_t *dn; |
3583 | ||
3584 | DB_DNODE_ENTER(db); | |
3585 | dn = DB_DNODE(db); | |
428870ff BB |
3586 | ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); |
3587 | ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && | |
50c957f7 | 3588 | db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); |
572e2857 | 3589 | DB_DNODE_EXIT(db); |
428870ff BB |
3590 | } |
3591 | #endif | |
3592 | ||
34dc7c2f | 3593 | if (db->db_level == 0) { |
428870ff | 3594 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f | 3595 | ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); |
b128c09f BB |
3596 | if (db->db_state != DB_NOFILL) { |
3597 | if (dr->dt.dl.dr_data != db->db_buf) | |
d3c2ae1c | 3598 | arc_buf_destroy(dr->dt.dl.dr_data, db); |
b128c09f | 3599 | } |
34dc7c2f | 3600 | } else { |
572e2857 BB |
3601 | dnode_t *dn; |
3602 | ||
3603 | DB_DNODE_ENTER(db); | |
3604 | dn = DB_DNODE(db); | |
34dc7c2f | 3605 | ASSERT(list_head(&dr->dt.di.dr_children) == NULL); |
b0bc7a84 | 3606 | ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); |
34dc7c2f | 3607 | if (!BP_IS_HOLE(db->db_blkptr)) { |
1fde1e37 BB |
3608 | ASSERTV(int epbs = dn->dn_phys->dn_indblkshift - |
3609 | SPA_BLKPTRSHIFT); | |
b0bc7a84 MG |
3610 | ASSERT3U(db->db_blkid, <=, |
3611 | dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); | |
34dc7c2f BB |
3612 | ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, |
3613 | db->db.db_size); | |
34dc7c2f | 3614 | } |
572e2857 | 3615 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
3616 | mutex_destroy(&dr->dt.di.dr_mtx); |
3617 | list_destroy(&dr->dt.di.dr_children); | |
3618 | } | |
3619 | kmem_free(dr, sizeof (dbuf_dirty_record_t)); | |
3620 | ||
3621 | cv_broadcast(&db->db_changed); | |
3622 | ASSERT(db->db_dirtycnt > 0); | |
3623 | db->db_dirtycnt -= 1; | |
3624 | db->db_data_pending = NULL; | |
b0bc7a84 | 3625 | dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg); |
428870ff BB |
3626 | } |
3627 | ||
3628 | static void | |
3629 | dbuf_write_nofill_ready(zio_t *zio) | |
3630 | { | |
3631 | dbuf_write_ready(zio, NULL, zio->io_private); | |
3632 | } | |
3633 | ||
3634 | static void | |
3635 | dbuf_write_nofill_done(zio_t *zio) | |
3636 | { | |
3637 | dbuf_write_done(zio, NULL, zio->io_private); | |
3638 | } | |
3639 | ||
3640 | static void | |
3641 | dbuf_write_override_ready(zio_t *zio) | |
3642 | { | |
3643 | dbuf_dirty_record_t *dr = zio->io_private; | |
3644 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
3645 | ||
3646 | dbuf_write_ready(zio, NULL, db); | |
3647 | } | |
3648 | ||
3649 | static void | |
3650 | dbuf_write_override_done(zio_t *zio) | |
3651 | { | |
3652 | dbuf_dirty_record_t *dr = zio->io_private; | |
3653 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
3654 | blkptr_t *obp = &dr->dt.dl.dr_overridden_by; | |
3655 | ||
3656 | mutex_enter(&db->db_mtx); | |
3657 | if (!BP_EQUAL(zio->io_bp, obp)) { | |
3658 | if (!BP_IS_HOLE(obp)) | |
3659 | dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); | |
3660 | arc_release(dr->dt.dl.dr_data, db); | |
3661 | } | |
34dc7c2f BB |
3662 | mutex_exit(&db->db_mtx); |
3663 | ||
428870ff | 3664 | dbuf_write_done(zio, NULL, db); |
a6255b7f DQ |
3665 | |
3666 | if (zio->io_abd != NULL) | |
3667 | abd_put(zio->io_abd); | |
428870ff BB |
3668 | } |
3669 | ||
e49f1e20 | 3670 | /* Issue I/O to commit a dirty buffer to disk. */ |
428870ff BB |
3671 | static void |
3672 | dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) | |
3673 | { | |
3674 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
572e2857 BB |
3675 | dnode_t *dn; |
3676 | objset_t *os; | |
428870ff BB |
3677 | dmu_buf_impl_t *parent = db->db_parent; |
3678 | uint64_t txg = tx->tx_txg; | |
5dbd68a3 | 3679 | zbookmark_phys_t zb; |
428870ff BB |
3680 | zio_prop_t zp; |
3681 | zio_t *zio; | |
3682 | int wp_flag = 0; | |
34dc7c2f | 3683 | |
463a8cfe AR |
3684 | ASSERT(dmu_tx_is_syncing(tx)); |
3685 | ||
572e2857 BB |
3686 | DB_DNODE_ENTER(db); |
3687 | dn = DB_DNODE(db); | |
3688 | os = dn->dn_objset; | |
3689 | ||
428870ff BB |
3690 | if (db->db_state != DB_NOFILL) { |
3691 | if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { | |
3692 | /* | |
3693 | * Private object buffers are released here rather | |
3694 | * than in dbuf_dirty() since they are only modified | |
3695 | * in the syncing context and we don't want the | |
3696 | * overhead of making multiple copies of the data. | |
3697 | */ | |
3698 | if (BP_IS_HOLE(db->db_blkptr)) { | |
3699 | arc_buf_thaw(data); | |
3700 | } else { | |
3701 | dbuf_release_bp(db); | |
3702 | } | |
3703 | } | |
3704 | } | |
3705 | ||
3706 | if (parent != dn->dn_dbuf) { | |
e49f1e20 WA |
3707 | /* Our parent is an indirect block. */ |
3708 | /* We have a dirty parent that has been scheduled for write. */ | |
428870ff | 3709 | ASSERT(parent && parent->db_data_pending); |
e49f1e20 | 3710 | /* Our parent's buffer is one level closer to the dnode. */ |
428870ff | 3711 | ASSERT(db->db_level == parent->db_level-1); |
e49f1e20 WA |
3712 | /* |
3713 | * We're about to modify our parent's db_data by modifying | |
3714 | * our block pointer, so the parent must be released. | |
3715 | */ | |
428870ff BB |
3716 | ASSERT(arc_released(parent->db_buf)); |
3717 | zio = parent->db_data_pending->dr_zio; | |
3718 | } else { | |
e49f1e20 | 3719 | /* Our parent is the dnode itself. */ |
428870ff BB |
3720 | ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && |
3721 | db->db_blkid != DMU_SPILL_BLKID) || | |
3722 | (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); | |
3723 | if (db->db_blkid != DMU_SPILL_BLKID) | |
3724 | ASSERT3P(db->db_blkptr, ==, | |
3725 | &dn->dn_phys->dn_blkptr[db->db_blkid]); | |
3726 | zio = dn->dn_zio; | |
3727 | } | |
3728 | ||
3729 | ASSERT(db->db_level == 0 || data == db->db_buf); | |
3730 | ASSERT3U(db->db_blkptr->blk_birth, <=, txg); | |
3731 | ASSERT(zio); | |
3732 | ||
3733 | SET_BOOKMARK(&zb, os->os_dsl_dataset ? | |
3734 | os->os_dsl_dataset->ds_object : DMU_META_OBJSET, | |
3735 | db->db.db_object, db->db_level, db->db_blkid); | |
3736 | ||
3737 | if (db->db_blkid == DMU_SPILL_BLKID) | |
3738 | wp_flag = WP_SPILL; | |
3739 | wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; | |
3740 | ||
82644107 | 3741 | dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); |
572e2857 | 3742 | DB_DNODE_EXIT(db); |
428870ff | 3743 | |
463a8cfe AR |
3744 | /* |
3745 | * We copy the blkptr now (rather than when we instantiate the dirty | |
3746 | * record), because its value can change between open context and | |
3747 | * syncing context. We do not need to hold dn_struct_rwlock to read | |
3748 | * db_blkptr because we are in syncing context. | |
3749 | */ | |
3750 | dr->dr_bp_copy = *db->db_blkptr; | |
3751 | ||
9b67f605 MA |
3752 | if (db->db_level == 0 && |
3753 | dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { | |
3754 | /* | |
3755 | * The BP for this block has been provided by open context | |
3756 | * (by dmu_sync() or dmu_buf_write_embedded()). | |
3757 | */ | |
a6255b7f DQ |
3758 | abd_t *contents = (data != NULL) ? |
3759 | abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; | |
9b67f605 | 3760 | |
428870ff | 3761 | dr->dr_zio = zio_write(zio, os->os_spa, txg, |
2aa34383 DK |
3762 | &dr->dr_bp_copy, contents, db->db.db_size, db->db.db_size, |
3763 | &zp, dbuf_write_override_ready, NULL, NULL, | |
bc77ba73 | 3764 | dbuf_write_override_done, |
e8b96c60 | 3765 | dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); |
428870ff BB |
3766 | mutex_enter(&db->db_mtx); |
3767 | dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; | |
3768 | zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, | |
03c6040b | 3769 | dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); |
428870ff BB |
3770 | mutex_exit(&db->db_mtx); |
3771 | } else if (db->db_state == DB_NOFILL) { | |
3c67d83a TH |
3772 | ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || |
3773 | zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); | |
428870ff | 3774 | dr->dr_zio = zio_write(zio, os->os_spa, txg, |
2aa34383 | 3775 | &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, |
bc77ba73 PD |
3776 | dbuf_write_nofill_ready, NULL, NULL, |
3777 | dbuf_write_nofill_done, db, | |
428870ff BB |
3778 | ZIO_PRIORITY_ASYNC_WRITE, |
3779 | ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); | |
3780 | } else { | |
bc77ba73 | 3781 | arc_done_func_t *children_ready_cb = NULL; |
428870ff | 3782 | ASSERT(arc_released(data)); |
bc77ba73 PD |
3783 | |
3784 | /* | |
3785 | * For indirect blocks, we want to setup the children | |
3786 | * ready callback so that we can properly handle an indirect | |
3787 | * block that only contains holes. | |
3788 | */ | |
3789 | if (db->db_level != 0) | |
3790 | children_ready_cb = dbuf_write_children_ready; | |
3791 | ||
428870ff | 3792 | dr->dr_zio = arc_write(zio, os->os_spa, txg, |
463a8cfe | 3793 | &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), |
d3c2ae1c GW |
3794 | &zp, dbuf_write_ready, |
3795 | children_ready_cb, dbuf_write_physdone, | |
3796 | dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, | |
3797 | ZIO_FLAG_MUSTSUCCEED, &zb); | |
428870ff | 3798 | } |
34dc7c2f | 3799 | } |
c28b2279 BB |
3800 | |
3801 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
8f576c23 BB |
3802 | EXPORT_SYMBOL(dbuf_find); |
3803 | EXPORT_SYMBOL(dbuf_is_metadata); | |
d3c2ae1c | 3804 | EXPORT_SYMBOL(dbuf_destroy); |
8f576c23 BB |
3805 | EXPORT_SYMBOL(dbuf_loan_arcbuf); |
3806 | EXPORT_SYMBOL(dbuf_whichblock); | |
3807 | EXPORT_SYMBOL(dbuf_read); | |
3808 | EXPORT_SYMBOL(dbuf_unoverride); | |
3809 | EXPORT_SYMBOL(dbuf_free_range); | |
3810 | EXPORT_SYMBOL(dbuf_new_size); | |
3811 | EXPORT_SYMBOL(dbuf_release_bp); | |
3812 | EXPORT_SYMBOL(dbuf_dirty); | |
c28b2279 | 3813 | EXPORT_SYMBOL(dmu_buf_will_dirty); |
8f576c23 BB |
3814 | EXPORT_SYMBOL(dmu_buf_will_not_fill); |
3815 | EXPORT_SYMBOL(dmu_buf_will_fill); | |
3816 | EXPORT_SYMBOL(dmu_buf_fill_done); | |
4047414a | 3817 | EXPORT_SYMBOL(dmu_buf_rele); |
8f576c23 | 3818 | EXPORT_SYMBOL(dbuf_assign_arcbuf); |
8f576c23 BB |
3819 | EXPORT_SYMBOL(dbuf_prefetch); |
3820 | EXPORT_SYMBOL(dbuf_hold_impl); | |
3821 | EXPORT_SYMBOL(dbuf_hold); | |
3822 | EXPORT_SYMBOL(dbuf_hold_level); | |
3823 | EXPORT_SYMBOL(dbuf_create_bonus); | |
3824 | EXPORT_SYMBOL(dbuf_spill_set_blksz); | |
3825 | EXPORT_SYMBOL(dbuf_rm_spill); | |
3826 | EXPORT_SYMBOL(dbuf_add_ref); | |
3827 | EXPORT_SYMBOL(dbuf_rele); | |
3828 | EXPORT_SYMBOL(dbuf_rele_and_unlock); | |
3829 | EXPORT_SYMBOL(dbuf_refcount); | |
3830 | EXPORT_SYMBOL(dbuf_sync_list); | |
3831 | EXPORT_SYMBOL(dmu_buf_set_user); | |
3832 | EXPORT_SYMBOL(dmu_buf_set_user_ie); | |
8f576c23 | 3833 | EXPORT_SYMBOL(dmu_buf_get_user); |
0f699108 | 3834 | EXPORT_SYMBOL(dmu_buf_get_blkptr); |
d3c2ae1c | 3835 | |
02730c33 | 3836 | /* BEGIN CSTYLED */ |
d3c2ae1c GW |
3837 | module_param(dbuf_cache_max_bytes, ulong, 0644); |
3838 | MODULE_PARM_DESC(dbuf_cache_max_bytes, | |
02730c33 | 3839 | "Maximum size in bytes of the dbuf cache."); |
d3c2ae1c GW |
3840 | |
3841 | module_param(dbuf_cache_hiwater_pct, uint, 0644); | |
3842 | MODULE_PARM_DESC(dbuf_cache_hiwater_pct, | |
f974e25d | 3843 | "Percentage over dbuf_cache_max_bytes when dbufs must be evicted " |
3844 | "directly."); | |
d3c2ae1c GW |
3845 | |
3846 | module_param(dbuf_cache_lowater_pct, uint, 0644); | |
3847 | MODULE_PARM_DESC(dbuf_cache_lowater_pct, | |
f974e25d | 3848 | "Percentage below dbuf_cache_max_bytes when the evict thread stops " |
3849 | "evicting dbufs."); | |
d3c2ae1c GW |
3850 | |
3851 | module_param(dbuf_cache_max_shift, int, 0644); | |
3852 | MODULE_PARM_DESC(dbuf_cache_max_shift, | |
f974e25d | 3853 | "Cap the size of the dbuf cache to a log2 fraction of arc size."); |
02730c33 | 3854 | /* END CSTYLED */ |
c28b2279 | 3855 | #endif |