]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
ef3c1dea | 23 | * Copyright 2011 Nexenta Systems, Inc. All rights reserved. |
b0319c1f | 24 | * Copyright (c) 2012, 2015 by Delphix. All rights reserved. |
3a17a7a9 | 25 | * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. |
0c66c32d | 26 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. |
34dc7c2f BB |
27 | */ |
28 | ||
34dc7c2f | 29 | #include <sys/zfs_context.h> |
c28b2279 | 30 | #include <sys/arc.h> |
34dc7c2f | 31 | #include <sys/dmu.h> |
ea97f8ce | 32 | #include <sys/dmu_send.h> |
34dc7c2f BB |
33 | #include <sys/dmu_impl.h> |
34 | #include <sys/dbuf.h> | |
35 | #include <sys/dmu_objset.h> | |
36 | #include <sys/dsl_dataset.h> | |
37 | #include <sys/dsl_dir.h> | |
38 | #include <sys/dmu_tx.h> | |
39 | #include <sys/spa.h> | |
40 | #include <sys/zio.h> | |
41 | #include <sys/dmu_zfetch.h> | |
428870ff BB |
42 | #include <sys/sa.h> |
43 | #include <sys/sa_impl.h> | |
9b67f605 MA |
44 | #include <sys/zfeature.h> |
45 | #include <sys/blkptr.h> | |
9bd274dd | 46 | #include <sys/range_tree.h> |
49ee64e5 | 47 | #include <sys/trace_dbuf.h> |
d3c2ae1c | 48 | #include <sys/callb.h> |
a6255b7f | 49 | #include <sys/abd.h> |
34dc7c2f | 50 | |
fc5bb51f BB |
51 | struct dbuf_hold_impl_data { |
52 | /* Function arguments */ | |
53 | dnode_t *dh_dn; | |
54 | uint8_t dh_level; | |
55 | uint64_t dh_blkid; | |
fcff0f35 PD |
56 | boolean_t dh_fail_sparse; |
57 | boolean_t dh_fail_uncached; | |
fc5bb51f BB |
58 | void *dh_tag; |
59 | dmu_buf_impl_t **dh_dbp; | |
60 | /* Local variables */ | |
61 | dmu_buf_impl_t *dh_db; | |
62 | dmu_buf_impl_t *dh_parent; | |
63 | blkptr_t *dh_bp; | |
64 | int dh_err; | |
65 | dbuf_dirty_record_t *dh_dr; | |
66 | arc_buf_contents_t dh_type; | |
67 | int dh_depth; | |
68 | }; | |
69 | ||
70 | static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh, | |
fcff0f35 PD |
71 | dnode_t *dn, uint8_t level, uint64_t blkid, boolean_t fail_sparse, |
72 | boolean_t fail_uncached, | |
73 | void *tag, dmu_buf_impl_t **dbp, int depth); | |
fc5bb51f BB |
74 | static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh); |
75 | ||
d3c2ae1c | 76 | uint_t zfs_dbuf_evict_key; |
b663a23d MA |
77 | /* |
78 | * Number of times that zfs_free_range() took the slow path while doing | |
79 | * a zfs receive. A nonzero value indicates a potential performance problem. | |
80 | */ | |
81 | uint64_t zfs_free_range_recv_miss; | |
82 | ||
13fe0198 | 83 | static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); |
b128c09f | 84 | static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); |
34dc7c2f | 85 | |
0c66c32d JG |
86 | #ifndef __lint |
87 | extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, | |
88 | dmu_buf_evict_func_t *evict_func, dmu_buf_t **clear_on_evict_dbufp); | |
89 | #endif /* ! __lint */ | |
90 | ||
34dc7c2f BB |
91 | /* |
92 | * Global data structures and functions for the dbuf cache. | |
93 | */ | |
d3c2ae1c | 94 | static kmem_cache_t *dbuf_kmem_cache; |
0c66c32d | 95 | static taskq_t *dbu_evict_taskq; |
34dc7c2f | 96 | |
d3c2ae1c GW |
97 | static kthread_t *dbuf_cache_evict_thread; |
98 | static kmutex_t dbuf_evict_lock; | |
99 | static kcondvar_t dbuf_evict_cv; | |
100 | static boolean_t dbuf_evict_thread_exit; | |
101 | ||
102 | /* | |
103 | * LRU cache of dbufs. The dbuf cache maintains a list of dbufs that | |
104 | * are not currently held but have been recently released. These dbufs | |
105 | * are not eligible for arc eviction until they are aged out of the cache. | |
106 | * Dbufs are added to the dbuf cache once the last hold is released. If a | |
107 | * dbuf is later accessed and still exists in the dbuf cache, then it will | |
108 | * be removed from the cache and later re-added to the head of the cache. | |
109 | * Dbufs that are aged out of the cache will be immediately destroyed and | |
110 | * become eligible for arc eviction. | |
111 | */ | |
112 | static multilist_t dbuf_cache; | |
113 | static refcount_t dbuf_cache_size; | |
114 | unsigned long dbuf_cache_max_bytes = 100 * 1024 * 1024; | |
115 | ||
116 | /* Cap the size of the dbuf cache to log2 fraction of arc size. */ | |
117 | int dbuf_cache_max_shift = 5; | |
118 | ||
119 | /* | |
120 | * The dbuf cache uses a three-stage eviction policy: | |
121 | * - A low water marker designates when the dbuf eviction thread | |
122 | * should stop evicting from the dbuf cache. | |
123 | * - When we reach the maximum size (aka mid water mark), we | |
124 | * signal the eviction thread to run. | |
125 | * - The high water mark indicates when the eviction thread | |
126 | * is unable to keep up with the incoming load and eviction must | |
127 | * happen in the context of the calling thread. | |
128 | * | |
129 | * The dbuf cache: | |
130 | * (max size) | |
131 | * low water mid water hi water | |
132 | * +----------------------------------------+----------+----------+ | |
133 | * | | | | | |
134 | * | | | | | |
135 | * | | | | | |
136 | * | | | | | |
137 | * +----------------------------------------+----------+----------+ | |
138 | * stop signal evict | |
139 | * evicting eviction directly | |
140 | * thread | |
141 | * | |
142 | * The high and low water marks indicate the operating range for the eviction | |
143 | * thread. The low water mark is, by default, 90% of the total size of the | |
144 | * cache and the high water mark is at 110% (both of these percentages can be | |
145 | * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, | |
146 | * respectively). The eviction thread will try to ensure that the cache remains | |
147 | * within this range by waking up every second and checking if the cache is | |
148 | * above the low water mark. The thread can also be woken up by callers adding | |
149 | * elements into the cache if the cache is larger than the mid water (i.e max | |
150 | * cache size). Once the eviction thread is woken up and eviction is required, | |
151 | * it will continue evicting buffers until it's able to reduce the cache size | |
152 | * to the low water mark. If the cache size continues to grow and hits the high | |
153 | * water mark, then callers adding elments to the cache will begin to evict | |
154 | * directly from the cache until the cache is no longer above the high water | |
155 | * mark. | |
156 | */ | |
157 | ||
158 | /* | |
159 | * The percentage above and below the maximum cache size. | |
160 | */ | |
161 | uint_t dbuf_cache_hiwater_pct = 10; | |
162 | uint_t dbuf_cache_lowater_pct = 10; | |
163 | ||
34dc7c2f BB |
164 | /* ARGSUSED */ |
165 | static int | |
166 | dbuf_cons(void *vdb, void *unused, int kmflag) | |
167 | { | |
168 | dmu_buf_impl_t *db = vdb; | |
169 | bzero(db, sizeof (dmu_buf_impl_t)); | |
170 | ||
171 | mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); | |
172 | cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); | |
d3c2ae1c | 173 | multilist_link_init(&db->db_cache_link); |
34dc7c2f | 174 | refcount_create(&db->db_holds); |
d3c2ae1c | 175 | multilist_link_init(&db->db_cache_link); |
8951cb8d | 176 | |
34dc7c2f BB |
177 | return (0); |
178 | } | |
179 | ||
180 | /* ARGSUSED */ | |
181 | static void | |
182 | dbuf_dest(void *vdb, void *unused) | |
183 | { | |
184 | dmu_buf_impl_t *db = vdb; | |
185 | mutex_destroy(&db->db_mtx); | |
186 | cv_destroy(&db->db_changed); | |
d3c2ae1c | 187 | ASSERT(!multilist_link_active(&db->db_cache_link)); |
34dc7c2f BB |
188 | refcount_destroy(&db->db_holds); |
189 | } | |
190 | ||
191 | /* | |
192 | * dbuf hash table routines | |
193 | */ | |
194 | static dbuf_hash_table_t dbuf_hash_table; | |
195 | ||
196 | static uint64_t dbuf_hash_count; | |
197 | ||
198 | static uint64_t | |
199 | dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) | |
200 | { | |
201 | uintptr_t osv = (uintptr_t)os; | |
202 | uint64_t crc = -1ULL; | |
203 | ||
204 | ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); | |
205 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; | |
206 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; | |
207 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; | |
208 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; | |
209 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; | |
210 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; | |
211 | ||
212 | crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); | |
213 | ||
214 | return (crc); | |
215 | } | |
216 | ||
34dc7c2f BB |
217 | #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ |
218 | ((dbuf)->db.db_object == (obj) && \ | |
219 | (dbuf)->db_objset == (os) && \ | |
220 | (dbuf)->db_level == (level) && \ | |
221 | (dbuf)->db_blkid == (blkid)) | |
222 | ||
223 | dmu_buf_impl_t * | |
6ebebace | 224 | dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) |
34dc7c2f BB |
225 | { |
226 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
d6320ddb BB |
227 | uint64_t hv; |
228 | uint64_t idx; | |
34dc7c2f BB |
229 | dmu_buf_impl_t *db; |
230 | ||
d3c2ae1c | 231 | hv = dbuf_hash(os, obj, level, blkid); |
d6320ddb BB |
232 | idx = hv & h->hash_table_mask; |
233 | ||
34dc7c2f BB |
234 | mutex_enter(DBUF_HASH_MUTEX(h, idx)); |
235 | for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { | |
236 | if (DBUF_EQUAL(db, os, obj, level, blkid)) { | |
237 | mutex_enter(&db->db_mtx); | |
238 | if (db->db_state != DB_EVICTING) { | |
239 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
240 | return (db); | |
241 | } | |
242 | mutex_exit(&db->db_mtx); | |
243 | } | |
244 | } | |
245 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
246 | return (NULL); | |
247 | } | |
248 | ||
6ebebace JG |
249 | static dmu_buf_impl_t * |
250 | dbuf_find_bonus(objset_t *os, uint64_t object) | |
251 | { | |
252 | dnode_t *dn; | |
253 | dmu_buf_impl_t *db = NULL; | |
254 | ||
255 | if (dnode_hold(os, object, FTAG, &dn) == 0) { | |
256 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
257 | if (dn->dn_bonus != NULL) { | |
258 | db = dn->dn_bonus; | |
259 | mutex_enter(&db->db_mtx); | |
260 | } | |
261 | rw_exit(&dn->dn_struct_rwlock); | |
262 | dnode_rele(dn, FTAG); | |
263 | } | |
264 | return (db); | |
265 | } | |
266 | ||
34dc7c2f BB |
267 | /* |
268 | * Insert an entry into the hash table. If there is already an element | |
269 | * equal to elem in the hash table, then the already existing element | |
270 | * will be returned and the new element will not be inserted. | |
271 | * Otherwise returns NULL. | |
272 | */ | |
273 | static dmu_buf_impl_t * | |
274 | dbuf_hash_insert(dmu_buf_impl_t *db) | |
275 | { | |
276 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
428870ff | 277 | objset_t *os = db->db_objset; |
34dc7c2f BB |
278 | uint64_t obj = db->db.db_object; |
279 | int level = db->db_level; | |
d6320ddb | 280 | uint64_t blkid, hv, idx; |
34dc7c2f BB |
281 | dmu_buf_impl_t *dbf; |
282 | ||
d6320ddb | 283 | blkid = db->db_blkid; |
d3c2ae1c | 284 | hv = dbuf_hash(os, obj, level, blkid); |
d6320ddb BB |
285 | idx = hv & h->hash_table_mask; |
286 | ||
34dc7c2f BB |
287 | mutex_enter(DBUF_HASH_MUTEX(h, idx)); |
288 | for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { | |
289 | if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { | |
290 | mutex_enter(&dbf->db_mtx); | |
291 | if (dbf->db_state != DB_EVICTING) { | |
292 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
293 | return (dbf); | |
294 | } | |
295 | mutex_exit(&dbf->db_mtx); | |
296 | } | |
297 | } | |
298 | ||
299 | mutex_enter(&db->db_mtx); | |
300 | db->db_hash_next = h->hash_table[idx]; | |
301 | h->hash_table[idx] = db; | |
302 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
bc89ac84 | 303 | atomic_inc_64(&dbuf_hash_count); |
34dc7c2f BB |
304 | |
305 | return (NULL); | |
306 | } | |
307 | ||
308 | /* | |
bd089c54 | 309 | * Remove an entry from the hash table. It must be in the EVICTING state. |
34dc7c2f BB |
310 | */ |
311 | static void | |
312 | dbuf_hash_remove(dmu_buf_impl_t *db) | |
313 | { | |
314 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
d6320ddb | 315 | uint64_t hv, idx; |
34dc7c2f BB |
316 | dmu_buf_impl_t *dbf, **dbp; |
317 | ||
d3c2ae1c | 318 | hv = dbuf_hash(db->db_objset, db->db.db_object, |
d6320ddb BB |
319 | db->db_level, db->db_blkid); |
320 | idx = hv & h->hash_table_mask; | |
321 | ||
34dc7c2f | 322 | /* |
bd089c54 | 323 | * We musn't hold db_mtx to maintain lock ordering: |
34dc7c2f BB |
324 | * DBUF_HASH_MUTEX > db_mtx. |
325 | */ | |
326 | ASSERT(refcount_is_zero(&db->db_holds)); | |
327 | ASSERT(db->db_state == DB_EVICTING); | |
328 | ASSERT(!MUTEX_HELD(&db->db_mtx)); | |
329 | ||
330 | mutex_enter(DBUF_HASH_MUTEX(h, idx)); | |
331 | dbp = &h->hash_table[idx]; | |
332 | while ((dbf = *dbp) != db) { | |
333 | dbp = &dbf->db_hash_next; | |
334 | ASSERT(dbf != NULL); | |
335 | } | |
336 | *dbp = db->db_hash_next; | |
337 | db->db_hash_next = NULL; | |
338 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
bc89ac84 | 339 | atomic_dec_64(&dbuf_hash_count); |
34dc7c2f BB |
340 | } |
341 | ||
0c66c32d JG |
342 | typedef enum { |
343 | DBVU_EVICTING, | |
344 | DBVU_NOT_EVICTING | |
345 | } dbvu_verify_type_t; | |
346 | ||
347 | static void | |
348 | dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) | |
349 | { | |
350 | #ifdef ZFS_DEBUG | |
351 | int64_t holds; | |
352 | ||
353 | if (db->db_user == NULL) | |
354 | return; | |
355 | ||
356 | /* Only data blocks support the attachment of user data. */ | |
357 | ASSERT(db->db_level == 0); | |
358 | ||
359 | /* Clients must resolve a dbuf before attaching user data. */ | |
360 | ASSERT(db->db.db_data != NULL); | |
361 | ASSERT3U(db->db_state, ==, DB_CACHED); | |
362 | ||
363 | holds = refcount_count(&db->db_holds); | |
364 | if (verify_type == DBVU_EVICTING) { | |
365 | /* | |
366 | * Immediate eviction occurs when holds == dirtycnt. | |
367 | * For normal eviction buffers, holds is zero on | |
368 | * eviction, except when dbuf_fix_old_data() calls | |
369 | * dbuf_clear_data(). However, the hold count can grow | |
370 | * during eviction even though db_mtx is held (see | |
371 | * dmu_bonus_hold() for an example), so we can only | |
372 | * test the generic invariant that holds >= dirtycnt. | |
373 | */ | |
374 | ASSERT3U(holds, >=, db->db_dirtycnt); | |
375 | } else { | |
bc4501f7 | 376 | if (db->db_user_immediate_evict == TRUE) |
0c66c32d JG |
377 | ASSERT3U(holds, >=, db->db_dirtycnt); |
378 | else | |
379 | ASSERT3U(holds, >, 0); | |
380 | } | |
381 | #endif | |
382 | } | |
383 | ||
34dc7c2f BB |
384 | static void |
385 | dbuf_evict_user(dmu_buf_impl_t *db) | |
386 | { | |
0c66c32d JG |
387 | dmu_buf_user_t *dbu = db->db_user; |
388 | ||
34dc7c2f BB |
389 | ASSERT(MUTEX_HELD(&db->db_mtx)); |
390 | ||
0c66c32d | 391 | if (dbu == NULL) |
34dc7c2f BB |
392 | return; |
393 | ||
0c66c32d JG |
394 | dbuf_verify_user(db, DBVU_EVICTING); |
395 | db->db_user = NULL; | |
396 | ||
397 | #ifdef ZFS_DEBUG | |
398 | if (dbu->dbu_clear_on_evict_dbufp != NULL) | |
399 | *dbu->dbu_clear_on_evict_dbufp = NULL; | |
400 | #endif | |
401 | ||
402 | /* | |
403 | * Invoke the callback from a taskq to avoid lock order reversals | |
404 | * and limit stack depth. | |
405 | */ | |
406 | taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func, dbu, 0, | |
407 | &dbu->dbu_tqent); | |
34dc7c2f BB |
408 | } |
409 | ||
572e2857 BB |
410 | boolean_t |
411 | dbuf_is_metadata(dmu_buf_impl_t *db) | |
412 | { | |
cc79a5c2 BB |
413 | /* |
414 | * Consider indirect blocks and spill blocks to be meta data. | |
415 | */ | |
416 | if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) { | |
572e2857 BB |
417 | return (B_TRUE); |
418 | } else { | |
419 | boolean_t is_metadata; | |
420 | ||
421 | DB_DNODE_ENTER(db); | |
9ae529ec | 422 | is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); |
572e2857 BB |
423 | DB_DNODE_EXIT(db); |
424 | ||
425 | return (is_metadata); | |
426 | } | |
427 | } | |
428 | ||
d3c2ae1c GW |
429 | |
430 | /* | |
431 | * This function *must* return indices evenly distributed between all | |
432 | * sublists of the multilist. This is needed due to how the dbuf eviction | |
433 | * code is laid out; dbuf_evict_thread() assumes dbufs are evenly | |
434 | * distributed between all sublists and uses this assumption when | |
435 | * deciding which sublist to evict from and how much to evict from it. | |
436 | */ | |
437 | unsigned int | |
438 | dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) | |
34dc7c2f | 439 | { |
d3c2ae1c GW |
440 | dmu_buf_impl_t *db = obj; |
441 | ||
442 | /* | |
443 | * The assumption here, is the hash value for a given | |
444 | * dmu_buf_impl_t will remain constant throughout it's lifetime | |
445 | * (i.e. it's objset, object, level and blkid fields don't change). | |
446 | * Thus, we don't need to store the dbuf's sublist index | |
447 | * on insertion, as this index can be recalculated on removal. | |
448 | * | |
449 | * Also, the low order bits of the hash value are thought to be | |
450 | * distributed evenly. Otherwise, in the case that the multilist | |
451 | * has a power of two number of sublists, each sublists' usage | |
452 | * would not be evenly distributed. | |
453 | */ | |
454 | return (dbuf_hash(db->db_objset, db->db.db_object, | |
455 | db->db_level, db->db_blkid) % | |
456 | multilist_get_num_sublists(ml)); | |
457 | } | |
458 | ||
459 | static inline boolean_t | |
460 | dbuf_cache_above_hiwater(void) | |
461 | { | |
462 | uint64_t dbuf_cache_hiwater_bytes = | |
463 | (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100; | |
464 | ||
465 | return (refcount_count(&dbuf_cache_size) > | |
466 | dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes); | |
467 | } | |
468 | ||
469 | static inline boolean_t | |
470 | dbuf_cache_above_lowater(void) | |
471 | { | |
472 | uint64_t dbuf_cache_lowater_bytes = | |
473 | (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100; | |
474 | ||
475 | return (refcount_count(&dbuf_cache_size) > | |
476 | dbuf_cache_max_bytes - dbuf_cache_lowater_bytes); | |
477 | } | |
478 | ||
479 | /* | |
480 | * Evict the oldest eligible dbuf from the dbuf cache. | |
481 | */ | |
482 | static void | |
483 | dbuf_evict_one(void) | |
484 | { | |
485 | int idx = multilist_get_random_index(&dbuf_cache); | |
486 | multilist_sublist_t *mls = multilist_sublist_lock(&dbuf_cache, idx); | |
487 | dmu_buf_impl_t *db; | |
488 | ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); | |
489 | ||
490 | /* | |
491 | * Set the thread's tsd to indicate that it's processing evictions. | |
492 | * Once a thread stops evicting from the dbuf cache it will | |
493 | * reset its tsd to NULL. | |
494 | */ | |
495 | ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL); | |
496 | (void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE); | |
497 | ||
498 | db = multilist_sublist_tail(mls); | |
499 | while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { | |
500 | db = multilist_sublist_prev(mls, db); | |
501 | } | |
502 | ||
503 | DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, | |
504 | multilist_sublist_t *, mls); | |
505 | ||
506 | if (db != NULL) { | |
507 | multilist_sublist_remove(mls, db); | |
508 | multilist_sublist_unlock(mls); | |
509 | (void) refcount_remove_many(&dbuf_cache_size, | |
510 | db->db.db_size, db); | |
511 | dbuf_destroy(db); | |
512 | } else { | |
513 | multilist_sublist_unlock(mls); | |
514 | } | |
515 | (void) tsd_set(zfs_dbuf_evict_key, NULL); | |
516 | } | |
517 | ||
518 | /* | |
519 | * The dbuf evict thread is responsible for aging out dbufs from the | |
520 | * cache. Once the cache has reached it's maximum size, dbufs are removed | |
521 | * and destroyed. The eviction thread will continue running until the size | |
522 | * of the dbuf cache is at or below the maximum size. Once the dbuf is aged | |
523 | * out of the cache it is destroyed and becomes eligible for arc eviction. | |
524 | */ | |
525 | static void | |
526 | dbuf_evict_thread(void) | |
527 | { | |
528 | callb_cpr_t cpr; | |
529 | ||
530 | CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); | |
531 | ||
532 | mutex_enter(&dbuf_evict_lock); | |
533 | while (!dbuf_evict_thread_exit) { | |
534 | while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { | |
535 | CALLB_CPR_SAFE_BEGIN(&cpr); | |
536 | (void) cv_timedwait_sig_hires(&dbuf_evict_cv, | |
537 | &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); | |
538 | CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); | |
539 | } | |
540 | mutex_exit(&dbuf_evict_lock); | |
541 | ||
542 | /* | |
543 | * Keep evicting as long as we're above the low water mark | |
544 | * for the cache. We do this without holding the locks to | |
545 | * minimize lock contention. | |
546 | */ | |
547 | while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { | |
548 | dbuf_evict_one(); | |
549 | } | |
550 | ||
551 | mutex_enter(&dbuf_evict_lock); | |
552 | } | |
553 | ||
554 | dbuf_evict_thread_exit = B_FALSE; | |
555 | cv_broadcast(&dbuf_evict_cv); | |
556 | CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ | |
557 | thread_exit(); | |
558 | } | |
559 | ||
560 | /* | |
561 | * Wake up the dbuf eviction thread if the dbuf cache is at its max size. | |
562 | * If the dbuf cache is at its high water mark, then evict a dbuf from the | |
563 | * dbuf cache using the callers context. | |
564 | */ | |
565 | static void | |
566 | dbuf_evict_notify(void) | |
567 | { | |
568 | ||
569 | /* | |
570 | * We use thread specific data to track when a thread has | |
571 | * started processing evictions. This allows us to avoid deeply | |
572 | * nested stacks that would have a call flow similar to this: | |
573 | * | |
574 | * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() | |
575 | * ^ | | |
576 | * | | | |
577 | * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ | |
578 | * | |
579 | * The dbuf_eviction_thread will always have its tsd set until | |
580 | * that thread exits. All other threads will only set their tsd | |
581 | * if they are participating in the eviction process. This only | |
582 | * happens if the eviction thread is unable to process evictions | |
583 | * fast enough. To keep the dbuf cache size in check, other threads | |
584 | * can evict from the dbuf cache directly. Those threads will set | |
585 | * their tsd values so that we ensure that they only evict one dbuf | |
586 | * from the dbuf cache. | |
587 | */ | |
588 | if (tsd_get(zfs_dbuf_evict_key) != NULL) | |
589 | return; | |
590 | ||
591 | if (refcount_count(&dbuf_cache_size) > dbuf_cache_max_bytes) { | |
592 | boolean_t evict_now = B_FALSE; | |
34dc7c2f | 593 | |
d3c2ae1c GW |
594 | mutex_enter(&dbuf_evict_lock); |
595 | if (refcount_count(&dbuf_cache_size) > dbuf_cache_max_bytes) { | |
596 | evict_now = dbuf_cache_above_hiwater(); | |
597 | cv_signal(&dbuf_evict_cv); | |
598 | } | |
599 | mutex_exit(&dbuf_evict_lock); | |
600 | ||
601 | if (evict_now) { | |
602 | dbuf_evict_one(); | |
603 | } | |
604 | } | |
34dc7c2f BB |
605 | } |
606 | ||
d3c2ae1c GW |
607 | |
608 | ||
34dc7c2f BB |
609 | void |
610 | dbuf_init(void) | |
611 | { | |
612 | uint64_t hsize = 1ULL << 16; | |
613 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
614 | int i; | |
615 | ||
616 | /* | |
617 | * The hash table is big enough to fill all of physical memory | |
69de3421 TC |
618 | * with an average block size of zfs_arc_average_blocksize (default 8K). |
619 | * By default, the table will take up | |
620 | * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). | |
34dc7c2f | 621 | */ |
69de3421 | 622 | while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE) |
34dc7c2f BB |
623 | hsize <<= 1; |
624 | ||
625 | retry: | |
626 | h->hash_table_mask = hsize - 1; | |
00b46022 | 627 | #if defined(_KERNEL) && defined(HAVE_SPL) |
d1d7e268 MK |
628 | /* |
629 | * Large allocations which do not require contiguous pages | |
630 | * should be using vmem_alloc() in the linux kernel | |
631 | */ | |
79c76d5b | 632 | h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP); |
00b46022 | 633 | #else |
34dc7c2f | 634 | h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); |
00b46022 | 635 | #endif |
34dc7c2f BB |
636 | if (h->hash_table == NULL) { |
637 | /* XXX - we should really return an error instead of assert */ | |
638 | ASSERT(hsize > (1ULL << 10)); | |
639 | hsize >>= 1; | |
640 | goto retry; | |
641 | } | |
642 | ||
d3c2ae1c | 643 | dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", |
34dc7c2f BB |
644 | sizeof (dmu_buf_impl_t), |
645 | 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); | |
646 | ||
647 | for (i = 0; i < DBUF_MUTEXES; i++) | |
40d06e3c | 648 | mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); |
e0b0ca98 BB |
649 | |
650 | dbuf_stats_init(h); | |
0c66c32d | 651 | |
d3c2ae1c GW |
652 | /* |
653 | * Setup the parameters for the dbuf cache. We cap the size of the | |
654 | * dbuf cache to 1/32nd (default) of the size of the ARC. | |
655 | */ | |
656 | dbuf_cache_max_bytes = MIN(dbuf_cache_max_bytes, | |
657 | arc_max_bytes() >> dbuf_cache_max_shift); | |
658 | ||
0c66c32d JG |
659 | /* |
660 | * All entries are queued via taskq_dispatch_ent(), so min/maxalloc | |
661 | * configuration is not required. | |
662 | */ | |
1229323d | 663 | dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0); |
d3c2ae1c GW |
664 | |
665 | multilist_create(&dbuf_cache, sizeof (dmu_buf_impl_t), | |
666 | offsetof(dmu_buf_impl_t, db_cache_link), | |
667 | zfs_arc_num_sublists_per_state, | |
668 | dbuf_cache_multilist_index_func); | |
669 | refcount_create(&dbuf_cache_size); | |
670 | ||
671 | tsd_create(&zfs_dbuf_evict_key, NULL); | |
672 | dbuf_evict_thread_exit = B_FALSE; | |
673 | mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); | |
674 | cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); | |
675 | dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, | |
676 | NULL, 0, &p0, TS_RUN, minclsyspri); | |
34dc7c2f BB |
677 | } |
678 | ||
679 | void | |
680 | dbuf_fini(void) | |
681 | { | |
682 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
683 | int i; | |
684 | ||
e0b0ca98 BB |
685 | dbuf_stats_destroy(); |
686 | ||
34dc7c2f BB |
687 | for (i = 0; i < DBUF_MUTEXES; i++) |
688 | mutex_destroy(&h->hash_mutexes[i]); | |
00b46022 | 689 | #if defined(_KERNEL) && defined(HAVE_SPL) |
d1d7e268 MK |
690 | /* |
691 | * Large allocations which do not require contiguous pages | |
692 | * should be using vmem_free() in the linux kernel | |
693 | */ | |
00b46022 BB |
694 | vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); |
695 | #else | |
34dc7c2f | 696 | kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); |
00b46022 | 697 | #endif |
d3c2ae1c | 698 | kmem_cache_destroy(dbuf_kmem_cache); |
0c66c32d | 699 | taskq_destroy(dbu_evict_taskq); |
d3c2ae1c GW |
700 | |
701 | mutex_enter(&dbuf_evict_lock); | |
702 | dbuf_evict_thread_exit = B_TRUE; | |
703 | while (dbuf_evict_thread_exit) { | |
704 | cv_signal(&dbuf_evict_cv); | |
705 | cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); | |
706 | } | |
707 | mutex_exit(&dbuf_evict_lock); | |
708 | tsd_destroy(&zfs_dbuf_evict_key); | |
709 | ||
710 | mutex_destroy(&dbuf_evict_lock); | |
711 | cv_destroy(&dbuf_evict_cv); | |
712 | ||
713 | refcount_destroy(&dbuf_cache_size); | |
714 | multilist_destroy(&dbuf_cache); | |
34dc7c2f BB |
715 | } |
716 | ||
717 | /* | |
718 | * Other stuff. | |
719 | */ | |
720 | ||
721 | #ifdef ZFS_DEBUG | |
722 | static void | |
723 | dbuf_verify(dmu_buf_impl_t *db) | |
724 | { | |
572e2857 | 725 | dnode_t *dn; |
428870ff | 726 | dbuf_dirty_record_t *dr; |
34dc7c2f BB |
727 | |
728 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
729 | ||
730 | if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) | |
731 | return; | |
732 | ||
733 | ASSERT(db->db_objset != NULL); | |
572e2857 BB |
734 | DB_DNODE_ENTER(db); |
735 | dn = DB_DNODE(db); | |
34dc7c2f BB |
736 | if (dn == NULL) { |
737 | ASSERT(db->db_parent == NULL); | |
738 | ASSERT(db->db_blkptr == NULL); | |
739 | } else { | |
740 | ASSERT3U(db->db.db_object, ==, dn->dn_object); | |
741 | ASSERT3P(db->db_objset, ==, dn->dn_objset); | |
742 | ASSERT3U(db->db_level, <, dn->dn_nlevels); | |
572e2857 BB |
743 | ASSERT(db->db_blkid == DMU_BONUS_BLKID || |
744 | db->db_blkid == DMU_SPILL_BLKID || | |
8951cb8d | 745 | !avl_is_empty(&dn->dn_dbufs)); |
34dc7c2f | 746 | } |
428870ff BB |
747 | if (db->db_blkid == DMU_BONUS_BLKID) { |
748 | ASSERT(dn != NULL); | |
749 | ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); | |
750 | ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); | |
751 | } else if (db->db_blkid == DMU_SPILL_BLKID) { | |
34dc7c2f | 752 | ASSERT(dn != NULL); |
c99c9001 | 753 | ASSERT0(db->db.db_offset); |
34dc7c2f BB |
754 | } else { |
755 | ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); | |
756 | } | |
757 | ||
428870ff BB |
758 | for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) |
759 | ASSERT(dr->dr_dbuf == db); | |
760 | ||
761 | for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) | |
762 | ASSERT(dr->dr_dbuf == db); | |
763 | ||
b128c09f BB |
764 | /* |
765 | * We can't assert that db_size matches dn_datablksz because it | |
766 | * can be momentarily different when another thread is doing | |
767 | * dnode_set_blksz(). | |
768 | */ | |
769 | if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { | |
428870ff | 770 | dr = db->db_data_pending; |
b128c09f BB |
771 | /* |
772 | * It should only be modified in syncing context, so | |
773 | * make sure we only have one copy of the data. | |
774 | */ | |
775 | ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); | |
34dc7c2f BB |
776 | } |
777 | ||
778 | /* verify db->db_blkptr */ | |
779 | if (db->db_blkptr) { | |
780 | if (db->db_parent == dn->dn_dbuf) { | |
781 | /* db is pointed to by the dnode */ | |
782 | /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ | |
9babb374 | 783 | if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) |
34dc7c2f BB |
784 | ASSERT(db->db_parent == NULL); |
785 | else | |
786 | ASSERT(db->db_parent != NULL); | |
428870ff BB |
787 | if (db->db_blkid != DMU_SPILL_BLKID) |
788 | ASSERT3P(db->db_blkptr, ==, | |
789 | &dn->dn_phys->dn_blkptr[db->db_blkid]); | |
34dc7c2f BB |
790 | } else { |
791 | /* db is pointed to by an indirect block */ | |
1fde1e37 BB |
792 | ASSERTV(int epb = db->db_parent->db.db_size >> |
793 | SPA_BLKPTRSHIFT); | |
34dc7c2f BB |
794 | ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); |
795 | ASSERT3U(db->db_parent->db.db_object, ==, | |
796 | db->db.db_object); | |
797 | /* | |
798 | * dnode_grow_indblksz() can make this fail if we don't | |
799 | * have the struct_rwlock. XXX indblksz no longer | |
800 | * grows. safe to do this now? | |
801 | */ | |
572e2857 | 802 | if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { |
34dc7c2f BB |
803 | ASSERT3P(db->db_blkptr, ==, |
804 | ((blkptr_t *)db->db_parent->db.db_data + | |
805 | db->db_blkid % epb)); | |
806 | } | |
807 | } | |
808 | } | |
809 | if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && | |
428870ff BB |
810 | (db->db_buf == NULL || db->db_buf->b_data) && |
811 | db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && | |
34dc7c2f BB |
812 | db->db_state != DB_FILL && !dn->dn_free_txg) { |
813 | /* | |
814 | * If the blkptr isn't set but they have nonzero data, | |
815 | * it had better be dirty, otherwise we'll lose that | |
816 | * data when we evict this buffer. | |
bc77ba73 PD |
817 | * |
818 | * There is an exception to this rule for indirect blocks; in | |
819 | * this case, if the indirect block is a hole, we fill in a few | |
820 | * fields on each of the child blocks (importantly, birth time) | |
821 | * to prevent hole birth times from being lost when you | |
822 | * partially fill in a hole. | |
34dc7c2f BB |
823 | */ |
824 | if (db->db_dirtycnt == 0) { | |
bc77ba73 PD |
825 | if (db->db_level == 0) { |
826 | uint64_t *buf = db->db.db_data; | |
827 | int i; | |
34dc7c2f | 828 | |
bc77ba73 PD |
829 | for (i = 0; i < db->db.db_size >> 3; i++) { |
830 | ASSERT(buf[i] == 0); | |
831 | } | |
832 | } else { | |
833 | int i; | |
834 | blkptr_t *bps = db->db.db_data; | |
835 | ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, | |
836 | db->db.db_size); | |
837 | /* | |
838 | * We want to verify that all the blkptrs in the | |
839 | * indirect block are holes, but we may have | |
840 | * automatically set up a few fields for them. | |
841 | * We iterate through each blkptr and verify | |
842 | * they only have those fields set. | |
843 | */ | |
844 | for (i = 0; | |
845 | i < db->db.db_size / sizeof (blkptr_t); | |
846 | i++) { | |
847 | blkptr_t *bp = &bps[i]; | |
848 | ASSERT(ZIO_CHECKSUM_IS_ZERO( | |
849 | &bp->blk_cksum)); | |
850 | ASSERT( | |
851 | DVA_IS_EMPTY(&bp->blk_dva[0]) && | |
852 | DVA_IS_EMPTY(&bp->blk_dva[1]) && | |
853 | DVA_IS_EMPTY(&bp->blk_dva[2])); | |
854 | ASSERT0(bp->blk_fill); | |
855 | ASSERT0(bp->blk_pad[0]); | |
856 | ASSERT0(bp->blk_pad[1]); | |
857 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
858 | ASSERT(BP_IS_HOLE(bp)); | |
859 | ASSERT0(bp->blk_phys_birth); | |
860 | } | |
34dc7c2f BB |
861 | } |
862 | } | |
863 | } | |
572e2857 | 864 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
865 | } |
866 | #endif | |
867 | ||
0c66c32d JG |
868 | static void |
869 | dbuf_clear_data(dmu_buf_impl_t *db) | |
870 | { | |
871 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
872 | dbuf_evict_user(db); | |
d3c2ae1c | 873 | ASSERT3P(db->db_buf, ==, NULL); |
0c66c32d JG |
874 | db->db.db_data = NULL; |
875 | if (db->db_state != DB_NOFILL) | |
876 | db->db_state = DB_UNCACHED; | |
877 | } | |
878 | ||
34dc7c2f BB |
879 | static void |
880 | dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) | |
881 | { | |
882 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
0c66c32d JG |
883 | ASSERT(buf != NULL); |
884 | ||
34dc7c2f | 885 | db->db_buf = buf; |
0c66c32d JG |
886 | ASSERT(buf->b_data != NULL); |
887 | db->db.db_data = buf->b_data; | |
34dc7c2f BB |
888 | } |
889 | ||
428870ff BB |
890 | /* |
891 | * Loan out an arc_buf for read. Return the loaned arc_buf. | |
892 | */ | |
893 | arc_buf_t * | |
894 | dbuf_loan_arcbuf(dmu_buf_impl_t *db) | |
895 | { | |
896 | arc_buf_t *abuf; | |
897 | ||
d3c2ae1c | 898 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
428870ff BB |
899 | mutex_enter(&db->db_mtx); |
900 | if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { | |
901 | int blksz = db->db.db_size; | |
b0bc7a84 | 902 | spa_t *spa = db->db_objset->os_spa; |
572e2857 | 903 | |
428870ff | 904 | mutex_exit(&db->db_mtx); |
2aa34383 | 905 | abuf = arc_loan_buf(spa, B_FALSE, blksz); |
428870ff BB |
906 | bcopy(db->db.db_data, abuf->b_data, blksz); |
907 | } else { | |
908 | abuf = db->db_buf; | |
909 | arc_loan_inuse_buf(abuf, db); | |
d3c2ae1c | 910 | db->db_buf = NULL; |
0c66c32d | 911 | dbuf_clear_data(db); |
428870ff BB |
912 | mutex_exit(&db->db_mtx); |
913 | } | |
914 | return (abuf); | |
915 | } | |
916 | ||
fcff0f35 PD |
917 | /* |
918 | * Calculate which level n block references the data at the level 0 offset | |
919 | * provided. | |
920 | */ | |
34dc7c2f | 921 | uint64_t |
031d7c2f | 922 | dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset) |
34dc7c2f | 923 | { |
fcff0f35 PD |
924 | if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { |
925 | /* | |
926 | * The level n blkid is equal to the level 0 blkid divided by | |
927 | * the number of level 0s in a level n block. | |
928 | * | |
929 | * The level 0 blkid is offset >> datablkshift = | |
930 | * offset / 2^datablkshift. | |
931 | * | |
932 | * The number of level 0s in a level n is the number of block | |
933 | * pointers in an indirect block, raised to the power of level. | |
934 | * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = | |
935 | * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). | |
936 | * | |
937 | * Thus, the level n blkid is: offset / | |
938 | * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) | |
939 | * = offset / 2^(datablkshift + level * | |
940 | * (indblkshift - SPA_BLKPTRSHIFT)) | |
941 | * = offset >> (datablkshift + level * | |
942 | * (indblkshift - SPA_BLKPTRSHIFT)) | |
943 | */ | |
031d7c2f GN |
944 | |
945 | const unsigned exp = dn->dn_datablkshift + | |
946 | level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT); | |
947 | ||
948 | if (exp >= 8 * sizeof (offset)) { | |
949 | /* This only happens on the highest indirection level */ | |
950 | ASSERT3U(level, ==, dn->dn_nlevels - 1); | |
951 | return (0); | |
952 | } | |
953 | ||
954 | ASSERT3U(exp, <, 8 * sizeof (offset)); | |
955 | ||
956 | return (offset >> exp); | |
34dc7c2f BB |
957 | } else { |
958 | ASSERT3U(offset, <, dn->dn_datablksz); | |
959 | return (0); | |
960 | } | |
961 | } | |
962 | ||
963 | static void | |
964 | dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) | |
965 | { | |
966 | dmu_buf_impl_t *db = vdb; | |
967 | ||
968 | mutex_enter(&db->db_mtx); | |
969 | ASSERT3U(db->db_state, ==, DB_READ); | |
970 | /* | |
971 | * All reads are synchronous, so we must have a hold on the dbuf | |
972 | */ | |
973 | ASSERT(refcount_count(&db->db_holds) > 0); | |
974 | ASSERT(db->db_buf == NULL); | |
975 | ASSERT(db->db.db_data == NULL); | |
976 | if (db->db_level == 0 && db->db_freed_in_flight) { | |
977 | /* we were freed in flight; disregard any error */ | |
978 | arc_release(buf, db); | |
979 | bzero(buf->b_data, db->db.db_size); | |
980 | arc_buf_freeze(buf); | |
981 | db->db_freed_in_flight = FALSE; | |
982 | dbuf_set_data(db, buf); | |
983 | db->db_state = DB_CACHED; | |
984 | } else if (zio == NULL || zio->io_error == 0) { | |
985 | dbuf_set_data(db, buf); | |
986 | db->db_state = DB_CACHED; | |
987 | } else { | |
428870ff | 988 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f | 989 | ASSERT3P(db->db_buf, ==, NULL); |
d3c2ae1c | 990 | arc_buf_destroy(buf, db); |
34dc7c2f BB |
991 | db->db_state = DB_UNCACHED; |
992 | } | |
993 | cv_broadcast(&db->db_changed); | |
428870ff | 994 | dbuf_rele_and_unlock(db, NULL); |
34dc7c2f BB |
995 | } |
996 | ||
5f6d0b6f | 997 | static int |
7f60329a | 998 | dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) |
34dc7c2f | 999 | { |
572e2857 | 1000 | dnode_t *dn; |
5dbd68a3 | 1001 | zbookmark_phys_t zb; |
2a432414 | 1002 | uint32_t aflags = ARC_FLAG_NOWAIT; |
5f6d0b6f | 1003 | int err; |
34dc7c2f | 1004 | |
572e2857 BB |
1005 | DB_DNODE_ENTER(db); |
1006 | dn = DB_DNODE(db); | |
34dc7c2f BB |
1007 | ASSERT(!refcount_is_zero(&db->db_holds)); |
1008 | /* We need the struct_rwlock to prevent db_blkptr from changing. */ | |
b128c09f | 1009 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); |
34dc7c2f BB |
1010 | ASSERT(MUTEX_HELD(&db->db_mtx)); |
1011 | ASSERT(db->db_state == DB_UNCACHED); | |
1012 | ASSERT(db->db_buf == NULL); | |
1013 | ||
428870ff | 1014 | if (db->db_blkid == DMU_BONUS_BLKID) { |
50c957f7 NB |
1015 | /* |
1016 | * The bonus length stored in the dnode may be less than | |
1017 | * the maximum available space in the bonus buffer. | |
1018 | */ | |
9babb374 | 1019 | int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); |
50c957f7 | 1020 | int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); |
34dc7c2f BB |
1021 | |
1022 | ASSERT3U(bonuslen, <=, db->db.db_size); | |
a3fd9d9e | 1023 | db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP); |
25458cbe | 1024 | arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); |
50c957f7 NB |
1025 | if (bonuslen < max_bonuslen) |
1026 | bzero(db->db.db_data, max_bonuslen); | |
9babb374 BB |
1027 | if (bonuslen) |
1028 | bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); | |
572e2857 | 1029 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
1030 | db->db_state = DB_CACHED; |
1031 | mutex_exit(&db->db_mtx); | |
5f6d0b6f | 1032 | return (0); |
34dc7c2f BB |
1033 | } |
1034 | ||
b128c09f BB |
1035 | /* |
1036 | * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() | |
1037 | * processes the delete record and clears the bp while we are waiting | |
1038 | * for the dn_mtx (resulting in a "no" from block_freed). | |
1039 | */ | |
1040 | if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || | |
1041 | (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || | |
1042 | BP_IS_HOLE(db->db_blkptr)))) { | |
34dc7c2f BB |
1043 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); |
1044 | ||
2aa34383 DK |
1045 | dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, |
1046 | db->db.db_size)); | |
34dc7c2f | 1047 | bzero(db->db.db_data, db->db.db_size); |
bc77ba73 PD |
1048 | |
1049 | if (db->db_blkptr != NULL && db->db_level > 0 && | |
1050 | BP_IS_HOLE(db->db_blkptr) && | |
1051 | db->db_blkptr->blk_birth != 0) { | |
1052 | blkptr_t *bps = db->db.db_data; | |
1053 | int i; | |
1054 | for (i = 0; i < ((1 << | |
1055 | DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); | |
1056 | i++) { | |
1057 | blkptr_t *bp = &bps[i]; | |
1058 | ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, | |
1059 | 1 << dn->dn_indblkshift); | |
1060 | BP_SET_LSIZE(bp, | |
1061 | BP_GET_LEVEL(db->db_blkptr) == 1 ? | |
1062 | dn->dn_datablksz : | |
1063 | BP_GET_LSIZE(db->db_blkptr)); | |
1064 | BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); | |
1065 | BP_SET_LEVEL(bp, | |
1066 | BP_GET_LEVEL(db->db_blkptr) - 1); | |
1067 | BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); | |
1068 | } | |
1069 | } | |
1070 | DB_DNODE_EXIT(db); | |
34dc7c2f | 1071 | db->db_state = DB_CACHED; |
34dc7c2f | 1072 | mutex_exit(&db->db_mtx); |
5f6d0b6f | 1073 | return (0); |
34dc7c2f BB |
1074 | } |
1075 | ||
572e2857 BB |
1076 | DB_DNODE_EXIT(db); |
1077 | ||
34dc7c2f BB |
1078 | db->db_state = DB_READ; |
1079 | mutex_exit(&db->db_mtx); | |
1080 | ||
b128c09f | 1081 | if (DBUF_IS_L2CACHEABLE(db)) |
2a432414 | 1082 | aflags |= ARC_FLAG_L2CACHE; |
b128c09f | 1083 | |
428870ff BB |
1084 | SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? |
1085 | db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, | |
1086 | db->db.db_object, db->db_level, db->db_blkid); | |
34dc7c2f BB |
1087 | |
1088 | dbuf_add_ref(db, NULL); | |
b128c09f | 1089 | |
5f6d0b6f | 1090 | err = arc_read(zio, db->db_objset->os_spa, db->db_blkptr, |
34dc7c2f | 1091 | dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, |
7f60329a | 1092 | (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, |
34dc7c2f | 1093 | &aflags, &zb); |
5f6d0b6f | 1094 | |
da8d5748 | 1095 | return (err); |
34dc7c2f BB |
1096 | } |
1097 | ||
2aa34383 DK |
1098 | /* |
1099 | * This is our just-in-time copy function. It makes a copy of buffers that | |
1100 | * have been modified in a previous transaction group before we access them in | |
1101 | * the current active group. | |
1102 | * | |
1103 | * This function is used in three places: when we are dirtying a buffer for the | |
1104 | * first time in a txg, when we are freeing a range in a dnode that includes | |
1105 | * this buffer, and when we are accessing a buffer which was received compressed | |
1106 | * and later referenced in a WRITE_BYREF record. | |
1107 | * | |
1108 | * Note that when we are called from dbuf_free_range() we do not put a hold on | |
1109 | * the buffer, we just traverse the active dbuf list for the dnode. | |
1110 | */ | |
1111 | static void | |
1112 | dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) | |
1113 | { | |
1114 | dbuf_dirty_record_t *dr = db->db_last_dirty; | |
1115 | ||
1116 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1117 | ASSERT(db->db.db_data != NULL); | |
1118 | ASSERT(db->db_level == 0); | |
1119 | ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); | |
1120 | ||
1121 | if (dr == NULL || | |
1122 | (dr->dt.dl.dr_data != | |
1123 | ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) | |
1124 | return; | |
1125 | ||
1126 | /* | |
1127 | * If the last dirty record for this dbuf has not yet synced | |
1128 | * and its referencing the dbuf data, either: | |
1129 | * reset the reference to point to a new copy, | |
1130 | * or (if there a no active holders) | |
1131 | * just null out the current db_data pointer. | |
1132 | */ | |
1133 | ASSERT(dr->dr_txg >= txg - 2); | |
1134 | if (db->db_blkid == DMU_BONUS_BLKID) { | |
2aa34383 DK |
1135 | dnode_t *dn = DB_DNODE(db); |
1136 | int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); | |
a3fd9d9e | 1137 | dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP); |
2aa34383 DK |
1138 | arc_space_consume(bonuslen, ARC_SPACE_BONUS); |
1139 | bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen); | |
1140 | } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { | |
1141 | int size = arc_buf_size(db->db_buf); | |
1142 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
1143 | spa_t *spa = db->db_objset->os_spa; | |
1144 | enum zio_compress compress_type = | |
1145 | arc_get_compression(db->db_buf); | |
1146 | ||
1147 | if (compress_type == ZIO_COMPRESS_OFF) { | |
1148 | dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); | |
1149 | } else { | |
1150 | ASSERT3U(type, ==, ARC_BUFC_DATA); | |
1151 | dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, | |
1152 | size, arc_buf_lsize(db->db_buf), compress_type); | |
1153 | } | |
1154 | bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); | |
1155 | } else { | |
1156 | db->db_buf = NULL; | |
1157 | dbuf_clear_data(db); | |
1158 | } | |
1159 | } | |
1160 | ||
34dc7c2f BB |
1161 | int |
1162 | dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) | |
1163 | { | |
1164 | int err = 0; | |
b0bc7a84 MG |
1165 | boolean_t havepzio = (zio != NULL); |
1166 | boolean_t prefetch; | |
572e2857 | 1167 | dnode_t *dn; |
34dc7c2f BB |
1168 | |
1169 | /* | |
1170 | * We don't have to hold the mutex to check db_state because it | |
1171 | * can't be freed while we have a hold on the buffer. | |
1172 | */ | |
1173 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
1174 | ||
b128c09f | 1175 | if (db->db_state == DB_NOFILL) |
2e528b49 | 1176 | return (SET_ERROR(EIO)); |
b128c09f | 1177 | |
572e2857 BB |
1178 | DB_DNODE_ENTER(db); |
1179 | dn = DB_DNODE(db); | |
34dc7c2f | 1180 | if ((flags & DB_RF_HAVESTRUCT) == 0) |
572e2857 | 1181 | rw_enter(&dn->dn_struct_rwlock, RW_READER); |
34dc7c2f | 1182 | |
428870ff | 1183 | prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && |
572e2857 | 1184 | (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && |
b128c09f | 1185 | DBUF_IS_CACHEABLE(db); |
34dc7c2f BB |
1186 | |
1187 | mutex_enter(&db->db_mtx); | |
1188 | if (db->db_state == DB_CACHED) { | |
2aa34383 DK |
1189 | /* |
1190 | * If the arc buf is compressed, we need to decompress it to | |
1191 | * read the data. This could happen during the "zfs receive" of | |
1192 | * a stream which is compressed and deduplicated. | |
1193 | */ | |
1194 | if (db->db_buf != NULL && | |
1195 | arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF) { | |
1196 | dbuf_fix_old_data(db, | |
1197 | spa_syncing_txg(dmu_objset_spa(db->db_objset))); | |
1198 | err = arc_decompress(db->db_buf); | |
1199 | dbuf_set_data(db, db->db_buf); | |
1200 | } | |
34dc7c2f BB |
1201 | mutex_exit(&db->db_mtx); |
1202 | if (prefetch) | |
755065f3 | 1203 | dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); |
34dc7c2f | 1204 | if ((flags & DB_RF_HAVESTRUCT) == 0) |
572e2857 BB |
1205 | rw_exit(&dn->dn_struct_rwlock); |
1206 | DB_DNODE_EXIT(db); | |
34dc7c2f | 1207 | } else if (db->db_state == DB_UNCACHED) { |
572e2857 BB |
1208 | spa_t *spa = dn->dn_objset->os_spa; |
1209 | ||
b0319c1f BB |
1210 | if (zio == NULL && |
1211 | db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) | |
572e2857 | 1212 | zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); |
5f6d0b6f | 1213 | |
7f60329a | 1214 | err = dbuf_read_impl(db, zio, flags); |
34dc7c2f BB |
1215 | |
1216 | /* dbuf_read_impl has dropped db_mtx for us */ | |
1217 | ||
5f6d0b6f | 1218 | if (!err && prefetch) |
755065f3 | 1219 | dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); |
34dc7c2f BB |
1220 | |
1221 | if ((flags & DB_RF_HAVESTRUCT) == 0) | |
572e2857 BB |
1222 | rw_exit(&dn->dn_struct_rwlock); |
1223 | DB_DNODE_EXIT(db); | |
34dc7c2f | 1224 | |
b0319c1f | 1225 | if (!err && !havepzio && zio != NULL) |
34dc7c2f BB |
1226 | err = zio_wait(zio); |
1227 | } else { | |
e49f1e20 WA |
1228 | /* |
1229 | * Another reader came in while the dbuf was in flight | |
1230 | * between UNCACHED and CACHED. Either a writer will finish | |
1231 | * writing the buffer (sending the dbuf to CACHED) or the | |
1232 | * first reader's request will reach the read_done callback | |
1233 | * and send the dbuf to CACHED. Otherwise, a failure | |
1234 | * occurred and the dbuf went to UNCACHED. | |
1235 | */ | |
34dc7c2f BB |
1236 | mutex_exit(&db->db_mtx); |
1237 | if (prefetch) | |
755065f3 | 1238 | dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); |
34dc7c2f | 1239 | if ((flags & DB_RF_HAVESTRUCT) == 0) |
572e2857 BB |
1240 | rw_exit(&dn->dn_struct_rwlock); |
1241 | DB_DNODE_EXIT(db); | |
34dc7c2f | 1242 | |
e49f1e20 | 1243 | /* Skip the wait per the caller's request. */ |
34dc7c2f BB |
1244 | mutex_enter(&db->db_mtx); |
1245 | if ((flags & DB_RF_NEVERWAIT) == 0) { | |
1246 | while (db->db_state == DB_READ || | |
1247 | db->db_state == DB_FILL) { | |
1248 | ASSERT(db->db_state == DB_READ || | |
1249 | (flags & DB_RF_HAVESTRUCT) == 0); | |
64dbba36 AL |
1250 | DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, |
1251 | db, zio_t *, zio); | |
34dc7c2f BB |
1252 | cv_wait(&db->db_changed, &db->db_mtx); |
1253 | } | |
1254 | if (db->db_state == DB_UNCACHED) | |
2e528b49 | 1255 | err = SET_ERROR(EIO); |
34dc7c2f BB |
1256 | } |
1257 | mutex_exit(&db->db_mtx); | |
1258 | } | |
1259 | ||
1260 | ASSERT(err || havepzio || db->db_state == DB_CACHED); | |
1261 | return (err); | |
1262 | } | |
1263 | ||
1264 | static void | |
1265 | dbuf_noread(dmu_buf_impl_t *db) | |
1266 | { | |
1267 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
428870ff | 1268 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f BB |
1269 | mutex_enter(&db->db_mtx); |
1270 | while (db->db_state == DB_READ || db->db_state == DB_FILL) | |
1271 | cv_wait(&db->db_changed, &db->db_mtx); | |
1272 | if (db->db_state == DB_UNCACHED) { | |
1273 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
b0bc7a84 | 1274 | spa_t *spa = db->db_objset->os_spa; |
34dc7c2f BB |
1275 | |
1276 | ASSERT(db->db_buf == NULL); | |
1277 | ASSERT(db->db.db_data == NULL); | |
2aa34383 | 1278 | dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); |
34dc7c2f | 1279 | db->db_state = DB_FILL; |
b128c09f | 1280 | } else if (db->db_state == DB_NOFILL) { |
0c66c32d | 1281 | dbuf_clear_data(db); |
34dc7c2f BB |
1282 | } else { |
1283 | ASSERT3U(db->db_state, ==, DB_CACHED); | |
1284 | } | |
1285 | mutex_exit(&db->db_mtx); | |
1286 | } | |
1287 | ||
34dc7c2f BB |
1288 | void |
1289 | dbuf_unoverride(dbuf_dirty_record_t *dr) | |
1290 | { | |
1291 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
428870ff | 1292 | blkptr_t *bp = &dr->dt.dl.dr_overridden_by; |
34dc7c2f BB |
1293 | uint64_t txg = dr->dr_txg; |
1294 | ||
1295 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1296 | ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); | |
1297 | ASSERT(db->db_level == 0); | |
1298 | ||
428870ff | 1299 | if (db->db_blkid == DMU_BONUS_BLKID || |
34dc7c2f BB |
1300 | dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) |
1301 | return; | |
1302 | ||
428870ff BB |
1303 | ASSERT(db->db_data_pending != dr); |
1304 | ||
34dc7c2f | 1305 | /* free this block */ |
b0bc7a84 MG |
1306 | if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) |
1307 | zio_free(db->db_objset->os_spa, txg, bp); | |
428870ff | 1308 | |
34dc7c2f | 1309 | dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; |
03c6040b GW |
1310 | dr->dt.dl.dr_nopwrite = B_FALSE; |
1311 | ||
34dc7c2f BB |
1312 | /* |
1313 | * Release the already-written buffer, so we leave it in | |
1314 | * a consistent dirty state. Note that all callers are | |
1315 | * modifying the buffer, so they will immediately do | |
1316 | * another (redundant) arc_release(). Therefore, leave | |
1317 | * the buf thawed to save the effort of freezing & | |
1318 | * immediately re-thawing it. | |
1319 | */ | |
1320 | arc_release(dr->dt.dl.dr_data, db); | |
1321 | } | |
1322 | ||
b128c09f BB |
1323 | /* |
1324 | * Evict (if its unreferenced) or clear (if its referenced) any level-0 | |
1325 | * data blocks in the free range, so that any future readers will find | |
b0bc7a84 | 1326 | * empty blocks. |
ea97f8ce MA |
1327 | * |
1328 | * This is a no-op if the dataset is in the middle of an incremental | |
1329 | * receive; see comment below for details. | |
b128c09f | 1330 | */ |
34dc7c2f | 1331 | void |
8951cb8d AR |
1332 | dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, |
1333 | dmu_tx_t *tx) | |
34dc7c2f | 1334 | { |
0c66c32d JG |
1335 | dmu_buf_impl_t *db_search; |
1336 | dmu_buf_impl_t *db, *db_next; | |
34dc7c2f | 1337 | uint64_t txg = tx->tx_txg; |
8951cb8d | 1338 | avl_index_t where; |
4254acb0 | 1339 | boolean_t freespill = |
8951cb8d AR |
1340 | (start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID); |
1341 | ||
1342 | if (end_blkid > dn->dn_maxblkid && !freespill) | |
1343 | end_blkid = dn->dn_maxblkid; | |
1344 | dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); | |
34dc7c2f | 1345 | |
0c66c32d | 1346 | db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP); |
8951cb8d AR |
1347 | db_search->db_level = 0; |
1348 | db_search->db_blkid = start_blkid; | |
9925c28c | 1349 | db_search->db_state = DB_SEARCH; |
ea97f8ce | 1350 | |
b663a23d | 1351 | mutex_enter(&dn->dn_dbufs_mtx); |
8951cb8d | 1352 | if (start_blkid >= dn->dn_unlisted_l0_blkid && !freespill) { |
b663a23d | 1353 | /* There can't be any dbufs in this range; no need to search. */ |
8951cb8d AR |
1354 | #ifdef DEBUG |
1355 | db = avl_find(&dn->dn_dbufs, db_search, &where); | |
1356 | ASSERT3P(db, ==, NULL); | |
1357 | db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); | |
1358 | ASSERT(db == NULL || db->db_level > 0); | |
1359 | #endif | |
1360 | goto out; | |
b663a23d | 1361 | } else if (dmu_objset_is_receiving(dn->dn_objset)) { |
ea97f8ce | 1362 | /* |
b663a23d MA |
1363 | * If we are receiving, we expect there to be no dbufs in |
1364 | * the range to be freed, because receive modifies each | |
1365 | * block at most once, and in offset order. If this is | |
1366 | * not the case, it can lead to performance problems, | |
1367 | * so note that we unexpectedly took the slow path. | |
ea97f8ce | 1368 | */ |
b663a23d | 1369 | atomic_inc_64(&zfs_free_range_recv_miss); |
ea97f8ce MA |
1370 | } |
1371 | ||
8951cb8d AR |
1372 | db = avl_find(&dn->dn_dbufs, db_search, &where); |
1373 | ASSERT3P(db, ==, NULL); | |
1374 | db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); | |
1375 | ||
1376 | for (; db != NULL; db = db_next) { | |
1377 | db_next = AVL_NEXT(&dn->dn_dbufs, db); | |
428870ff | 1378 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
b128c09f | 1379 | |
8951cb8d AR |
1380 | if (db->db_level != 0 || db->db_blkid > end_blkid) { |
1381 | break; | |
1382 | } | |
1383 | ASSERT3U(db->db_blkid, >=, start_blkid); | |
34dc7c2f BB |
1384 | |
1385 | /* found a level 0 buffer in the range */ | |
13fe0198 MA |
1386 | mutex_enter(&db->db_mtx); |
1387 | if (dbuf_undirty(db, tx)) { | |
1388 | /* mutex has been dropped and dbuf destroyed */ | |
34dc7c2f | 1389 | continue; |
13fe0198 | 1390 | } |
34dc7c2f | 1391 | |
34dc7c2f | 1392 | if (db->db_state == DB_UNCACHED || |
b128c09f | 1393 | db->db_state == DB_NOFILL || |
34dc7c2f BB |
1394 | db->db_state == DB_EVICTING) { |
1395 | ASSERT(db->db.db_data == NULL); | |
1396 | mutex_exit(&db->db_mtx); | |
1397 | continue; | |
1398 | } | |
1399 | if (db->db_state == DB_READ || db->db_state == DB_FILL) { | |
1400 | /* will be handled in dbuf_read_done or dbuf_rele */ | |
1401 | db->db_freed_in_flight = TRUE; | |
1402 | mutex_exit(&db->db_mtx); | |
1403 | continue; | |
1404 | } | |
1405 | if (refcount_count(&db->db_holds) == 0) { | |
1406 | ASSERT(db->db_buf); | |
d3c2ae1c | 1407 | dbuf_destroy(db); |
34dc7c2f BB |
1408 | continue; |
1409 | } | |
1410 | /* The dbuf is referenced */ | |
1411 | ||
1412 | if (db->db_last_dirty != NULL) { | |
1413 | dbuf_dirty_record_t *dr = db->db_last_dirty; | |
1414 | ||
1415 | if (dr->dr_txg == txg) { | |
1416 | /* | |
1417 | * This buffer is "in-use", re-adjust the file | |
1418 | * size to reflect that this buffer may | |
1419 | * contain new data when we sync. | |
1420 | */ | |
428870ff BB |
1421 | if (db->db_blkid != DMU_SPILL_BLKID && |
1422 | db->db_blkid > dn->dn_maxblkid) | |
34dc7c2f BB |
1423 | dn->dn_maxblkid = db->db_blkid; |
1424 | dbuf_unoverride(dr); | |
1425 | } else { | |
1426 | /* | |
1427 | * This dbuf is not dirty in the open context. | |
1428 | * Either uncache it (if its not referenced in | |
1429 | * the open context) or reset its contents to | |
1430 | * empty. | |
1431 | */ | |
1432 | dbuf_fix_old_data(db, txg); | |
1433 | } | |
1434 | } | |
1435 | /* clear the contents if its cached */ | |
1436 | if (db->db_state == DB_CACHED) { | |
1437 | ASSERT(db->db.db_data != NULL); | |
1438 | arc_release(db->db_buf, db); | |
1439 | bzero(db->db.db_data, db->db.db_size); | |
1440 | arc_buf_freeze(db->db_buf); | |
1441 | } | |
1442 | ||
1443 | mutex_exit(&db->db_mtx); | |
1444 | } | |
8951cb8d AR |
1445 | |
1446 | out: | |
1447 | kmem_free(db_search, sizeof (dmu_buf_impl_t)); | |
34dc7c2f BB |
1448 | mutex_exit(&dn->dn_dbufs_mtx); |
1449 | } | |
1450 | ||
1451 | static int | |
1452 | dbuf_block_freeable(dmu_buf_impl_t *db) | |
1453 | { | |
1454 | dsl_dataset_t *ds = db->db_objset->os_dsl_dataset; | |
1455 | uint64_t birth_txg = 0; | |
1456 | ||
1457 | /* | |
1458 | * We don't need any locking to protect db_blkptr: | |
1459 | * If it's syncing, then db_last_dirty will be set | |
1460 | * so we'll ignore db_blkptr. | |
b0bc7a84 MG |
1461 | * |
1462 | * This logic ensures that only block births for | |
1463 | * filled blocks are considered. | |
34dc7c2f BB |
1464 | */ |
1465 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
b0bc7a84 MG |
1466 | if (db->db_last_dirty && (db->db_blkptr == NULL || |
1467 | !BP_IS_HOLE(db->db_blkptr))) { | |
34dc7c2f | 1468 | birth_txg = db->db_last_dirty->dr_txg; |
b0bc7a84 | 1469 | } else if (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { |
34dc7c2f | 1470 | birth_txg = db->db_blkptr->blk_birth; |
b0bc7a84 | 1471 | } |
34dc7c2f | 1472 | |
572e2857 | 1473 | /* |
b0bc7a84 | 1474 | * If this block don't exist or is in a snapshot, it can't be freed. |
572e2857 BB |
1475 | * Don't pass the bp to dsl_dataset_block_freeable() since we |
1476 | * are holding the db_mtx lock and might deadlock if we are | |
1477 | * prefetching a dedup-ed block. | |
1478 | */ | |
b0bc7a84 | 1479 | if (birth_txg != 0) |
34dc7c2f | 1480 | return (ds == NULL || |
572e2857 | 1481 | dsl_dataset_block_freeable(ds, NULL, birth_txg)); |
34dc7c2f | 1482 | else |
b0bc7a84 | 1483 | return (B_FALSE); |
34dc7c2f BB |
1484 | } |
1485 | ||
1486 | void | |
1487 | dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) | |
1488 | { | |
1489 | arc_buf_t *buf, *obuf; | |
1490 | int osize = db->db.db_size; | |
1491 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
572e2857 | 1492 | dnode_t *dn; |
34dc7c2f | 1493 | |
428870ff | 1494 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f | 1495 | |
572e2857 BB |
1496 | DB_DNODE_ENTER(db); |
1497 | dn = DB_DNODE(db); | |
1498 | ||
34dc7c2f | 1499 | /* XXX does *this* func really need the lock? */ |
572e2857 | 1500 | ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); |
34dc7c2f BB |
1501 | |
1502 | /* | |
b0bc7a84 | 1503 | * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held |
34dc7c2f BB |
1504 | * is OK, because there can be no other references to the db |
1505 | * when we are changing its size, so no concurrent DB_FILL can | |
1506 | * be happening. | |
1507 | */ | |
1508 | /* | |
1509 | * XXX we should be doing a dbuf_read, checking the return | |
1510 | * value and returning that up to our callers | |
1511 | */ | |
b0bc7a84 | 1512 | dmu_buf_will_dirty(&db->db, tx); |
34dc7c2f BB |
1513 | |
1514 | /* create the data buffer for the new block */ | |
2aa34383 | 1515 | buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); |
34dc7c2f BB |
1516 | |
1517 | /* copy old block data to the new block */ | |
1518 | obuf = db->db_buf; | |
1519 | bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); | |
1520 | /* zero the remainder */ | |
1521 | if (size > osize) | |
1522 | bzero((uint8_t *)buf->b_data + osize, size - osize); | |
1523 | ||
1524 | mutex_enter(&db->db_mtx); | |
1525 | dbuf_set_data(db, buf); | |
d3c2ae1c | 1526 | arc_buf_destroy(obuf, db); |
34dc7c2f BB |
1527 | db->db.db_size = size; |
1528 | ||
1529 | if (db->db_level == 0) { | |
1530 | ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); | |
1531 | db->db_last_dirty->dt.dl.dr_data = buf; | |
1532 | } | |
1533 | mutex_exit(&db->db_mtx); | |
1534 | ||
572e2857 BB |
1535 | dnode_willuse_space(dn, size-osize, tx); |
1536 | DB_DNODE_EXIT(db); | |
34dc7c2f BB |
1537 | } |
1538 | ||
428870ff BB |
1539 | void |
1540 | dbuf_release_bp(dmu_buf_impl_t *db) | |
1541 | { | |
b0bc7a84 | 1542 | ASSERTV(objset_t *os = db->db_objset); |
428870ff BB |
1543 | |
1544 | ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); | |
1545 | ASSERT(arc_released(os->os_phys_buf) || | |
1546 | list_link_active(&os->os_dsl_dataset->ds_synced_link)); | |
1547 | ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); | |
1548 | ||
294f6806 | 1549 | (void) arc_release(db->db_buf, db); |
428870ff BB |
1550 | } |
1551 | ||
5a28a973 MA |
1552 | /* |
1553 | * We already have a dirty record for this TXG, and we are being | |
1554 | * dirtied again. | |
1555 | */ | |
1556 | static void | |
1557 | dbuf_redirty(dbuf_dirty_record_t *dr) | |
1558 | { | |
1559 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
1560 | ||
1561 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1562 | ||
1563 | if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { | |
1564 | /* | |
1565 | * If this buffer has already been written out, | |
1566 | * we now need to reset its state. | |
1567 | */ | |
1568 | dbuf_unoverride(dr); | |
1569 | if (db->db.db_object != DMU_META_DNODE_OBJECT && | |
1570 | db->db_state != DB_NOFILL) { | |
1571 | /* Already released on initial dirty, so just thaw. */ | |
1572 | ASSERT(arc_released(db->db_buf)); | |
1573 | arc_buf_thaw(db->db_buf); | |
1574 | } | |
1575 | } | |
1576 | } | |
1577 | ||
34dc7c2f BB |
1578 | dbuf_dirty_record_t * |
1579 | dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) | |
1580 | { | |
572e2857 BB |
1581 | dnode_t *dn; |
1582 | objset_t *os; | |
34dc7c2f BB |
1583 | dbuf_dirty_record_t **drp, *dr; |
1584 | int drop_struct_lock = FALSE; | |
b128c09f | 1585 | boolean_t do_free_accounting = B_FALSE; |
34dc7c2f BB |
1586 | int txgoff = tx->tx_txg & TXG_MASK; |
1587 | ||
1588 | ASSERT(tx->tx_txg != 0); | |
1589 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
1590 | DMU_TX_DIRTY_BUF(tx, db); | |
1591 | ||
572e2857 BB |
1592 | DB_DNODE_ENTER(db); |
1593 | dn = DB_DNODE(db); | |
34dc7c2f BB |
1594 | /* |
1595 | * Shouldn't dirty a regular buffer in syncing context. Private | |
1596 | * objects may be dirtied in syncing context, but only if they | |
1597 | * were already pre-dirtied in open context. | |
34dc7c2f BB |
1598 | */ |
1599 | ASSERT(!dmu_tx_is_syncing(tx) || | |
1600 | BP_IS_HOLE(dn->dn_objset->os_rootbp) || | |
9babb374 BB |
1601 | DMU_OBJECT_IS_SPECIAL(dn->dn_object) || |
1602 | dn->dn_objset->os_dsl_dataset == NULL); | |
34dc7c2f BB |
1603 | /* |
1604 | * We make this assert for private objects as well, but after we | |
1605 | * check if we're already dirty. They are allowed to re-dirty | |
1606 | * in syncing context. | |
1607 | */ | |
1608 | ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || | |
1609 | dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == | |
1610 | (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); | |
1611 | ||
1612 | mutex_enter(&db->db_mtx); | |
1613 | /* | |
1614 | * XXX make this true for indirects too? The problem is that | |
1615 | * transactions created with dmu_tx_create_assigned() from | |
1616 | * syncing context don't bother holding ahead. | |
1617 | */ | |
1618 | ASSERT(db->db_level != 0 || | |
b128c09f BB |
1619 | db->db_state == DB_CACHED || db->db_state == DB_FILL || |
1620 | db->db_state == DB_NOFILL); | |
34dc7c2f BB |
1621 | |
1622 | mutex_enter(&dn->dn_mtx); | |
1623 | /* | |
1624 | * Don't set dirtyctx to SYNC if we're just modifying this as we | |
1625 | * initialize the objset. | |
1626 | */ | |
1627 | if (dn->dn_dirtyctx == DN_UNDIRTIED && | |
1628 | !BP_IS_HOLE(dn->dn_objset->os_rootbp)) { | |
1629 | dn->dn_dirtyctx = | |
1630 | (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN); | |
1631 | ASSERT(dn->dn_dirtyctx_firstset == NULL); | |
79c76d5b | 1632 | dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); |
34dc7c2f BB |
1633 | } |
1634 | mutex_exit(&dn->dn_mtx); | |
1635 | ||
428870ff BB |
1636 | if (db->db_blkid == DMU_SPILL_BLKID) |
1637 | dn->dn_have_spill = B_TRUE; | |
1638 | ||
34dc7c2f BB |
1639 | /* |
1640 | * If this buffer is already dirty, we're done. | |
1641 | */ | |
1642 | drp = &db->db_last_dirty; | |
1643 | ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || | |
1644 | db->db.db_object == DMU_META_DNODE_OBJECT); | |
1645 | while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) | |
1646 | drp = &dr->dr_next; | |
1647 | if (dr && dr->dr_txg == tx->tx_txg) { | |
572e2857 BB |
1648 | DB_DNODE_EXIT(db); |
1649 | ||
5a28a973 | 1650 | dbuf_redirty(dr); |
34dc7c2f BB |
1651 | mutex_exit(&db->db_mtx); |
1652 | return (dr); | |
1653 | } | |
1654 | ||
1655 | /* | |
1656 | * Only valid if not already dirty. | |
1657 | */ | |
9babb374 BB |
1658 | ASSERT(dn->dn_object == 0 || |
1659 | dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == | |
34dc7c2f BB |
1660 | (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); |
1661 | ||
1662 | ASSERT3U(dn->dn_nlevels, >, db->db_level); | |
1663 | ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || | |
1664 | dn->dn_phys->dn_nlevels > db->db_level || | |
1665 | dn->dn_next_nlevels[txgoff] > db->db_level || | |
1666 | dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || | |
1667 | dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); | |
1668 | ||
1669 | /* | |
1670 | * We should only be dirtying in syncing context if it's the | |
9babb374 BB |
1671 | * mos or we're initializing the os or it's a special object. |
1672 | * However, we are allowed to dirty in syncing context provided | |
1673 | * we already dirtied it in open context. Hence we must make | |
1674 | * this assertion only if we're not already dirty. | |
34dc7c2f | 1675 | */ |
572e2857 | 1676 | os = dn->dn_objset; |
9babb374 BB |
1677 | ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || |
1678 | os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); | |
34dc7c2f BB |
1679 | ASSERT(db->db.db_size != 0); |
1680 | ||
1681 | dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); | |
1682 | ||
428870ff | 1683 | if (db->db_blkid != DMU_BONUS_BLKID) { |
34dc7c2f BB |
1684 | /* |
1685 | * Update the accounting. | |
b128c09f BB |
1686 | * Note: we delay "free accounting" until after we drop |
1687 | * the db_mtx. This keeps us from grabbing other locks | |
428870ff | 1688 | * (and possibly deadlocking) in bp_get_dsize() while |
b128c09f | 1689 | * also holding the db_mtx. |
34dc7c2f | 1690 | */ |
34dc7c2f | 1691 | dnode_willuse_space(dn, db->db.db_size, tx); |
b128c09f | 1692 | do_free_accounting = dbuf_block_freeable(db); |
34dc7c2f BB |
1693 | } |
1694 | ||
1695 | /* | |
1696 | * If this buffer is dirty in an old transaction group we need | |
1697 | * to make a copy of it so that the changes we make in this | |
1698 | * transaction group won't leak out when we sync the older txg. | |
1699 | */ | |
79c76d5b | 1700 | dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); |
98f72a53 | 1701 | list_link_init(&dr->dr_dirty_node); |
34dc7c2f BB |
1702 | if (db->db_level == 0) { |
1703 | void *data_old = db->db_buf; | |
1704 | ||
b128c09f | 1705 | if (db->db_state != DB_NOFILL) { |
428870ff | 1706 | if (db->db_blkid == DMU_BONUS_BLKID) { |
b128c09f BB |
1707 | dbuf_fix_old_data(db, tx->tx_txg); |
1708 | data_old = db->db.db_data; | |
1709 | } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { | |
1710 | /* | |
1711 | * Release the data buffer from the cache so | |
1712 | * that we can modify it without impacting | |
1713 | * possible other users of this cached data | |
1714 | * block. Note that indirect blocks and | |
1715 | * private objects are not released until the | |
1716 | * syncing state (since they are only modified | |
1717 | * then). | |
1718 | */ | |
1719 | arc_release(db->db_buf, db); | |
1720 | dbuf_fix_old_data(db, tx->tx_txg); | |
1721 | data_old = db->db_buf; | |
1722 | } | |
1723 | ASSERT(data_old != NULL); | |
34dc7c2f | 1724 | } |
34dc7c2f BB |
1725 | dr->dt.dl.dr_data = data_old; |
1726 | } else { | |
448d7aaa | 1727 | mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL); |
34dc7c2f BB |
1728 | list_create(&dr->dt.di.dr_children, |
1729 | sizeof (dbuf_dirty_record_t), | |
1730 | offsetof(dbuf_dirty_record_t, dr_dirty_node)); | |
1731 | } | |
e8b96c60 MA |
1732 | if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) |
1733 | dr->dr_accounted = db->db.db_size; | |
34dc7c2f BB |
1734 | dr->dr_dbuf = db; |
1735 | dr->dr_txg = tx->tx_txg; | |
1736 | dr->dr_next = *drp; | |
1737 | *drp = dr; | |
1738 | ||
1739 | /* | |
1740 | * We could have been freed_in_flight between the dbuf_noread | |
1741 | * and dbuf_dirty. We win, as though the dbuf_noread() had | |
1742 | * happened after the free. | |
1743 | */ | |
428870ff BB |
1744 | if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && |
1745 | db->db_blkid != DMU_SPILL_BLKID) { | |
34dc7c2f | 1746 | mutex_enter(&dn->dn_mtx); |
9bd274dd MA |
1747 | if (dn->dn_free_ranges[txgoff] != NULL) { |
1748 | range_tree_clear(dn->dn_free_ranges[txgoff], | |
1749 | db->db_blkid, 1); | |
1750 | } | |
34dc7c2f BB |
1751 | mutex_exit(&dn->dn_mtx); |
1752 | db->db_freed_in_flight = FALSE; | |
1753 | } | |
1754 | ||
1755 | /* | |
1756 | * This buffer is now part of this txg | |
1757 | */ | |
1758 | dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); | |
1759 | db->db_dirtycnt += 1; | |
1760 | ASSERT3U(db->db_dirtycnt, <=, 3); | |
1761 | ||
1762 | mutex_exit(&db->db_mtx); | |
1763 | ||
428870ff BB |
1764 | if (db->db_blkid == DMU_BONUS_BLKID || |
1765 | db->db_blkid == DMU_SPILL_BLKID) { | |
34dc7c2f BB |
1766 | mutex_enter(&dn->dn_mtx); |
1767 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
1768 | list_insert_tail(&dn->dn_dirty_records[txgoff], dr); | |
1769 | mutex_exit(&dn->dn_mtx); | |
1770 | dnode_setdirty(dn, tx); | |
572e2857 | 1771 | DB_DNODE_EXIT(db); |
34dc7c2f | 1772 | return (dr); |
98ace739 MA |
1773 | } |
1774 | ||
1775 | /* | |
1776 | * The dn_struct_rwlock prevents db_blkptr from changing | |
1777 | * due to a write from syncing context completing | |
1778 | * while we are running, so we want to acquire it before | |
1779 | * looking at db_blkptr. | |
1780 | */ | |
1781 | if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { | |
1782 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
1783 | drop_struct_lock = TRUE; | |
1784 | } | |
1785 | ||
1786 | if (do_free_accounting) { | |
b128c09f BB |
1787 | blkptr_t *bp = db->db_blkptr; |
1788 | int64_t willfree = (bp && !BP_IS_HOLE(bp)) ? | |
428870ff | 1789 | bp_get_dsize(os->os_spa, bp) : db->db.db_size; |
b128c09f BB |
1790 | /* |
1791 | * This is only a guess -- if the dbuf is dirty | |
1792 | * in a previous txg, we don't know how much | |
1793 | * space it will use on disk yet. We should | |
1794 | * really have the struct_rwlock to access | |
1795 | * db_blkptr, but since this is just a guess, | |
1796 | * it's OK if we get an odd answer. | |
1797 | */ | |
572e2857 | 1798 | ddt_prefetch(os->os_spa, bp); |
b128c09f | 1799 | dnode_willuse_space(dn, -willfree, tx); |
34dc7c2f BB |
1800 | } |
1801 | ||
b128c09f BB |
1802 | if (db->db_level == 0) { |
1803 | dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); | |
1804 | ASSERT(dn->dn_maxblkid >= db->db_blkid); | |
1805 | } | |
1806 | ||
34dc7c2f BB |
1807 | if (db->db_level+1 < dn->dn_nlevels) { |
1808 | dmu_buf_impl_t *parent = db->db_parent; | |
1809 | dbuf_dirty_record_t *di; | |
1810 | int parent_held = FALSE; | |
1811 | ||
1812 | if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { | |
1813 | int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
1814 | ||
1815 | parent = dbuf_hold_level(dn, db->db_level+1, | |
1816 | db->db_blkid >> epbs, FTAG); | |
428870ff | 1817 | ASSERT(parent != NULL); |
34dc7c2f BB |
1818 | parent_held = TRUE; |
1819 | } | |
1820 | if (drop_struct_lock) | |
1821 | rw_exit(&dn->dn_struct_rwlock); | |
1822 | ASSERT3U(db->db_level+1, ==, parent->db_level); | |
1823 | di = dbuf_dirty(parent, tx); | |
1824 | if (parent_held) | |
1825 | dbuf_rele(parent, FTAG); | |
1826 | ||
1827 | mutex_enter(&db->db_mtx); | |
e8b96c60 MA |
1828 | /* |
1829 | * Since we've dropped the mutex, it's possible that | |
1830 | * dbuf_undirty() might have changed this out from under us. | |
1831 | */ | |
34dc7c2f BB |
1832 | if (db->db_last_dirty == dr || |
1833 | dn->dn_object == DMU_META_DNODE_OBJECT) { | |
1834 | mutex_enter(&di->dt.di.dr_mtx); | |
1835 | ASSERT3U(di->dr_txg, ==, tx->tx_txg); | |
1836 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
1837 | list_insert_tail(&di->dt.di.dr_children, dr); | |
1838 | mutex_exit(&di->dt.di.dr_mtx); | |
1839 | dr->dr_parent = di; | |
1840 | } | |
1841 | mutex_exit(&db->db_mtx); | |
1842 | } else { | |
1843 | ASSERT(db->db_level+1 == dn->dn_nlevels); | |
1844 | ASSERT(db->db_blkid < dn->dn_nblkptr); | |
572e2857 | 1845 | ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); |
34dc7c2f BB |
1846 | mutex_enter(&dn->dn_mtx); |
1847 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
1848 | list_insert_tail(&dn->dn_dirty_records[txgoff], dr); | |
1849 | mutex_exit(&dn->dn_mtx); | |
1850 | if (drop_struct_lock) | |
1851 | rw_exit(&dn->dn_struct_rwlock); | |
1852 | } | |
1853 | ||
1854 | dnode_setdirty(dn, tx); | |
572e2857 | 1855 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
1856 | return (dr); |
1857 | } | |
1858 | ||
13fe0198 | 1859 | /* |
e49f1e20 WA |
1860 | * Undirty a buffer in the transaction group referenced by the given |
1861 | * transaction. Return whether this evicted the dbuf. | |
13fe0198 MA |
1862 | */ |
1863 | static boolean_t | |
34dc7c2f BB |
1864 | dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) |
1865 | { | |
572e2857 | 1866 | dnode_t *dn; |
34dc7c2f BB |
1867 | uint64_t txg = tx->tx_txg; |
1868 | dbuf_dirty_record_t *dr, **drp; | |
1869 | ||
1870 | ASSERT(txg != 0); | |
4bda3bd0 MA |
1871 | |
1872 | /* | |
1873 | * Due to our use of dn_nlevels below, this can only be called | |
1874 | * in open context, unless we are operating on the MOS. | |
1875 | * From syncing context, dn_nlevels may be different from the | |
1876 | * dn_nlevels used when dbuf was dirtied. | |
1877 | */ | |
1878 | ASSERT(db->db_objset == | |
1879 | dmu_objset_pool(db->db_objset)->dp_meta_objset || | |
1880 | txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); | |
428870ff | 1881 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
13fe0198 MA |
1882 | ASSERT0(db->db_level); |
1883 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
34dc7c2f | 1884 | |
34dc7c2f BB |
1885 | /* |
1886 | * If this buffer is not dirty, we're done. | |
1887 | */ | |
1888 | for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) | |
1889 | if (dr->dr_txg <= txg) | |
1890 | break; | |
13fe0198 MA |
1891 | if (dr == NULL || dr->dr_txg < txg) |
1892 | return (B_FALSE); | |
34dc7c2f | 1893 | ASSERT(dr->dr_txg == txg); |
428870ff | 1894 | ASSERT(dr->dr_dbuf == db); |
34dc7c2f | 1895 | |
572e2857 BB |
1896 | DB_DNODE_ENTER(db); |
1897 | dn = DB_DNODE(db); | |
1898 | ||
34dc7c2f BB |
1899 | dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); |
1900 | ||
1901 | ASSERT(db->db.db_size != 0); | |
1902 | ||
4bda3bd0 MA |
1903 | dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), |
1904 | dr->dr_accounted, txg); | |
34dc7c2f BB |
1905 | |
1906 | *drp = dr->dr_next; | |
1907 | ||
ef3c1dea GR |
1908 | /* |
1909 | * Note that there are three places in dbuf_dirty() | |
1910 | * where this dirty record may be put on a list. | |
1911 | * Make sure to do a list_remove corresponding to | |
1912 | * every one of those list_insert calls. | |
1913 | */ | |
34dc7c2f BB |
1914 | if (dr->dr_parent) { |
1915 | mutex_enter(&dr->dr_parent->dt.di.dr_mtx); | |
1916 | list_remove(&dr->dr_parent->dt.di.dr_children, dr); | |
1917 | mutex_exit(&dr->dr_parent->dt.di.dr_mtx); | |
ef3c1dea | 1918 | } else if (db->db_blkid == DMU_SPILL_BLKID || |
4bda3bd0 | 1919 | db->db_level + 1 == dn->dn_nlevels) { |
b128c09f | 1920 | ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); |
34dc7c2f BB |
1921 | mutex_enter(&dn->dn_mtx); |
1922 | list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); | |
1923 | mutex_exit(&dn->dn_mtx); | |
1924 | } | |
572e2857 | 1925 | DB_DNODE_EXIT(db); |
34dc7c2f | 1926 | |
13fe0198 MA |
1927 | if (db->db_state != DB_NOFILL) { |
1928 | dbuf_unoverride(dr); | |
34dc7c2f | 1929 | |
34dc7c2f | 1930 | ASSERT(db->db_buf != NULL); |
13fe0198 MA |
1931 | ASSERT(dr->dt.dl.dr_data != NULL); |
1932 | if (dr->dt.dl.dr_data != db->db_buf) | |
d3c2ae1c | 1933 | arc_buf_destroy(dr->dt.dl.dr_data, db); |
34dc7c2f | 1934 | } |
58c4aa00 | 1935 | |
34dc7c2f BB |
1936 | kmem_free(dr, sizeof (dbuf_dirty_record_t)); |
1937 | ||
1938 | ASSERT(db->db_dirtycnt > 0); | |
1939 | db->db_dirtycnt -= 1; | |
1940 | ||
1941 | if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { | |
d3c2ae1c GW |
1942 | ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); |
1943 | dbuf_destroy(db); | |
13fe0198 | 1944 | return (B_TRUE); |
34dc7c2f BB |
1945 | } |
1946 | ||
13fe0198 | 1947 | return (B_FALSE); |
34dc7c2f BB |
1948 | } |
1949 | ||
34dc7c2f | 1950 | void |
b0bc7a84 | 1951 | dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) |
34dc7c2f | 1952 | { |
b0bc7a84 | 1953 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; |
34dc7c2f | 1954 | int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; |
5a28a973 | 1955 | dbuf_dirty_record_t *dr; |
34dc7c2f BB |
1956 | |
1957 | ASSERT(tx->tx_txg != 0); | |
1958 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
1959 | ||
5a28a973 MA |
1960 | /* |
1961 | * Quick check for dirtyness. For already dirty blocks, this | |
1962 | * reduces runtime of this function by >90%, and overall performance | |
1963 | * by 50% for some workloads (e.g. file deletion with indirect blocks | |
1964 | * cached). | |
1965 | */ | |
1966 | mutex_enter(&db->db_mtx); | |
1967 | ||
1968 | for (dr = db->db_last_dirty; | |
1969 | dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { | |
1970 | /* | |
1971 | * It's possible that it is already dirty but not cached, | |
1972 | * because there are some calls to dbuf_dirty() that don't | |
1973 | * go through dmu_buf_will_dirty(). | |
1974 | */ | |
1975 | if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { | |
1976 | /* This dbuf is already dirty and cached. */ | |
1977 | dbuf_redirty(dr); | |
1978 | mutex_exit(&db->db_mtx); | |
1979 | return; | |
1980 | } | |
1981 | } | |
1982 | mutex_exit(&db->db_mtx); | |
1983 | ||
572e2857 BB |
1984 | DB_DNODE_ENTER(db); |
1985 | if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) | |
34dc7c2f | 1986 | rf |= DB_RF_HAVESTRUCT; |
572e2857 | 1987 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
1988 | (void) dbuf_read(db, NULL, rf); |
1989 | (void) dbuf_dirty(db, tx); | |
1990 | } | |
1991 | ||
b128c09f BB |
1992 | void |
1993 | dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) | |
1994 | { | |
1995 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
1996 | ||
1997 | db->db_state = DB_NOFILL; | |
1998 | ||
1999 | dmu_buf_will_fill(db_fake, tx); | |
2000 | } | |
2001 | ||
34dc7c2f BB |
2002 | void |
2003 | dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) | |
2004 | { | |
2005 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
2006 | ||
428870ff | 2007 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f BB |
2008 | ASSERT(tx->tx_txg != 0); |
2009 | ASSERT(db->db_level == 0); | |
2010 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
2011 | ||
2012 | ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || | |
2013 | dmu_tx_private_ok(tx)); | |
2014 | ||
2015 | dbuf_noread(db); | |
2016 | (void) dbuf_dirty(db, tx); | |
2017 | } | |
2018 | ||
2019 | #pragma weak dmu_buf_fill_done = dbuf_fill_done | |
2020 | /* ARGSUSED */ | |
2021 | void | |
2022 | dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) | |
2023 | { | |
2024 | mutex_enter(&db->db_mtx); | |
2025 | DBUF_VERIFY(db); | |
2026 | ||
2027 | if (db->db_state == DB_FILL) { | |
2028 | if (db->db_level == 0 && db->db_freed_in_flight) { | |
428870ff | 2029 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f BB |
2030 | /* we were freed while filling */ |
2031 | /* XXX dbuf_undirty? */ | |
2032 | bzero(db->db.db_data, db->db.db_size); | |
2033 | db->db_freed_in_flight = FALSE; | |
2034 | } | |
2035 | db->db_state = DB_CACHED; | |
2036 | cv_broadcast(&db->db_changed); | |
2037 | } | |
2038 | mutex_exit(&db->db_mtx); | |
2039 | } | |
2040 | ||
9b67f605 MA |
2041 | void |
2042 | dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, | |
2043 | bp_embedded_type_t etype, enum zio_compress comp, | |
2044 | int uncompressed_size, int compressed_size, int byteorder, | |
2045 | dmu_tx_t *tx) | |
2046 | { | |
2047 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; | |
2048 | struct dirty_leaf *dl; | |
2049 | dmu_object_type_t type; | |
2050 | ||
241b5415 MA |
2051 | if (etype == BP_EMBEDDED_TYPE_DATA) { |
2052 | ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), | |
2053 | SPA_FEATURE_EMBEDDED_DATA)); | |
2054 | } | |
2055 | ||
9b67f605 MA |
2056 | DB_DNODE_ENTER(db); |
2057 | type = DB_DNODE(db)->dn_type; | |
2058 | DB_DNODE_EXIT(db); | |
2059 | ||
2060 | ASSERT0(db->db_level); | |
2061 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
2062 | ||
2063 | dmu_buf_will_not_fill(dbuf, tx); | |
2064 | ||
2065 | ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); | |
2066 | dl = &db->db_last_dirty->dt.dl; | |
2067 | encode_embedded_bp_compressed(&dl->dr_overridden_by, | |
2068 | data, comp, uncompressed_size, compressed_size); | |
2069 | BPE_SET_ETYPE(&dl->dr_overridden_by, etype); | |
2070 | BP_SET_TYPE(&dl->dr_overridden_by, type); | |
2071 | BP_SET_LEVEL(&dl->dr_overridden_by, 0); | |
2072 | BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); | |
2073 | ||
2074 | dl->dr_override_state = DR_OVERRIDDEN; | |
2075 | dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; | |
2076 | } | |
2077 | ||
9babb374 BB |
2078 | /* |
2079 | * Directly assign a provided arc buf to a given dbuf if it's not referenced | |
2080 | * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. | |
2081 | */ | |
2082 | void | |
2083 | dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) | |
2084 | { | |
2085 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
428870ff | 2086 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
9babb374 | 2087 | ASSERT(db->db_level == 0); |
2aa34383 | 2088 | ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); |
9babb374 | 2089 | ASSERT(buf != NULL); |
2aa34383 | 2090 | ASSERT(arc_buf_lsize(buf) == db->db.db_size); |
9babb374 BB |
2091 | ASSERT(tx->tx_txg != 0); |
2092 | ||
2093 | arc_return_buf(buf, db); | |
2094 | ASSERT(arc_released(buf)); | |
2095 | ||
2096 | mutex_enter(&db->db_mtx); | |
2097 | ||
2098 | while (db->db_state == DB_READ || db->db_state == DB_FILL) | |
2099 | cv_wait(&db->db_changed, &db->db_mtx); | |
2100 | ||
2101 | ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); | |
2102 | ||
2103 | if (db->db_state == DB_CACHED && | |
2104 | refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { | |
2105 | mutex_exit(&db->db_mtx); | |
2106 | (void) dbuf_dirty(db, tx); | |
2107 | bcopy(buf->b_data, db->db.db_data, db->db.db_size); | |
d3c2ae1c | 2108 | arc_buf_destroy(buf, db); |
428870ff | 2109 | xuio_stat_wbuf_copied(); |
9babb374 BB |
2110 | return; |
2111 | } | |
2112 | ||
428870ff | 2113 | xuio_stat_wbuf_nocopy(); |
9babb374 BB |
2114 | if (db->db_state == DB_CACHED) { |
2115 | dbuf_dirty_record_t *dr = db->db_last_dirty; | |
2116 | ||
2117 | ASSERT(db->db_buf != NULL); | |
2118 | if (dr != NULL && dr->dr_txg == tx->tx_txg) { | |
2119 | ASSERT(dr->dt.dl.dr_data == db->db_buf); | |
2120 | if (!arc_released(db->db_buf)) { | |
2121 | ASSERT(dr->dt.dl.dr_override_state == | |
2122 | DR_OVERRIDDEN); | |
2123 | arc_release(db->db_buf, db); | |
2124 | } | |
2125 | dr->dt.dl.dr_data = buf; | |
d3c2ae1c | 2126 | arc_buf_destroy(db->db_buf, db); |
9babb374 BB |
2127 | } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { |
2128 | arc_release(db->db_buf, db); | |
d3c2ae1c | 2129 | arc_buf_destroy(db->db_buf, db); |
9babb374 BB |
2130 | } |
2131 | db->db_buf = NULL; | |
2132 | } | |
2133 | ASSERT(db->db_buf == NULL); | |
2134 | dbuf_set_data(db, buf); | |
2135 | db->db_state = DB_FILL; | |
2136 | mutex_exit(&db->db_mtx); | |
2137 | (void) dbuf_dirty(db, tx); | |
b0bc7a84 | 2138 | dmu_buf_fill_done(&db->db, tx); |
9babb374 BB |
2139 | } |
2140 | ||
34dc7c2f | 2141 | void |
d3c2ae1c | 2142 | dbuf_destroy(dmu_buf_impl_t *db) |
34dc7c2f | 2143 | { |
572e2857 | 2144 | dnode_t *dn; |
34dc7c2f | 2145 | dmu_buf_impl_t *parent = db->db_parent; |
572e2857 | 2146 | dmu_buf_impl_t *dndb; |
34dc7c2f BB |
2147 | |
2148 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
2149 | ASSERT(refcount_is_zero(&db->db_holds)); | |
2150 | ||
d3c2ae1c GW |
2151 | if (db->db_buf != NULL) { |
2152 | arc_buf_destroy(db->db_buf, db); | |
2153 | db->db_buf = NULL; | |
2154 | } | |
34dc7c2f | 2155 | |
d3c2ae1c GW |
2156 | if (db->db_blkid == DMU_BONUS_BLKID) { |
2157 | int slots = DB_DNODE(db)->dn_num_slots; | |
2158 | int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); | |
34dc7c2f | 2159 | ASSERT(db->db.db_data != NULL); |
a3fd9d9e | 2160 | kmem_free(db->db.db_data, bonuslen); |
d3c2ae1c | 2161 | arc_space_return(bonuslen, ARC_SPACE_BONUS); |
34dc7c2f BB |
2162 | db->db_state = DB_UNCACHED; |
2163 | } | |
2164 | ||
d3c2ae1c GW |
2165 | dbuf_clear_data(db); |
2166 | ||
2167 | if (multilist_link_active(&db->db_cache_link)) { | |
2168 | multilist_remove(&dbuf_cache, db); | |
2169 | (void) refcount_remove_many(&dbuf_cache_size, | |
2170 | db->db.db_size, db); | |
2171 | } | |
2172 | ||
b128c09f | 2173 | ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); |
34dc7c2f BB |
2174 | ASSERT(db->db_data_pending == NULL); |
2175 | ||
2176 | db->db_state = DB_EVICTING; | |
2177 | db->db_blkptr = NULL; | |
2178 | ||
d3c2ae1c GW |
2179 | /* |
2180 | * Now that db_state is DB_EVICTING, nobody else can find this via | |
2181 | * the hash table. We can now drop db_mtx, which allows us to | |
2182 | * acquire the dn_dbufs_mtx. | |
2183 | */ | |
2184 | mutex_exit(&db->db_mtx); | |
2185 | ||
572e2857 BB |
2186 | DB_DNODE_ENTER(db); |
2187 | dn = DB_DNODE(db); | |
2188 | dndb = dn->dn_dbuf; | |
d3c2ae1c GW |
2189 | if (db->db_blkid != DMU_BONUS_BLKID) { |
2190 | boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); | |
2191 | if (needlock) | |
2192 | mutex_enter(&dn->dn_dbufs_mtx); | |
8951cb8d | 2193 | avl_remove(&dn->dn_dbufs, db); |
73ad4a9f | 2194 | atomic_dec_32(&dn->dn_dbufs_count); |
572e2857 BB |
2195 | membar_producer(); |
2196 | DB_DNODE_EXIT(db); | |
d3c2ae1c GW |
2197 | if (needlock) |
2198 | mutex_exit(&dn->dn_dbufs_mtx); | |
572e2857 BB |
2199 | /* |
2200 | * Decrementing the dbuf count means that the hold corresponding | |
2201 | * to the removed dbuf is no longer discounted in dnode_move(), | |
2202 | * so the dnode cannot be moved until after we release the hold. | |
2203 | * The membar_producer() ensures visibility of the decremented | |
2204 | * value in dnode_move(), since DB_DNODE_EXIT doesn't actually | |
2205 | * release any lock. | |
2206 | */ | |
34dc7c2f | 2207 | dnode_rele(dn, db); |
572e2857 | 2208 | db->db_dnode_handle = NULL; |
d3c2ae1c GW |
2209 | |
2210 | dbuf_hash_remove(db); | |
572e2857 BB |
2211 | } else { |
2212 | DB_DNODE_EXIT(db); | |
34dc7c2f BB |
2213 | } |
2214 | ||
d3c2ae1c | 2215 | ASSERT(refcount_is_zero(&db->db_holds)); |
34dc7c2f | 2216 | |
d3c2ae1c GW |
2217 | db->db_parent = NULL; |
2218 | ||
2219 | ASSERT(db->db_buf == NULL); | |
2220 | ASSERT(db->db.db_data == NULL); | |
2221 | ASSERT(db->db_hash_next == NULL); | |
2222 | ASSERT(db->db_blkptr == NULL); | |
2223 | ASSERT(db->db_data_pending == NULL); | |
2224 | ASSERT(!multilist_link_active(&db->db_cache_link)); | |
2225 | ||
2226 | kmem_cache_free(dbuf_kmem_cache, db); | |
2227 | arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); | |
34dc7c2f BB |
2228 | |
2229 | /* | |
572e2857 | 2230 | * If this dbuf is referenced from an indirect dbuf, |
34dc7c2f BB |
2231 | * decrement the ref count on the indirect dbuf. |
2232 | */ | |
2233 | if (parent && parent != dndb) | |
2234 | dbuf_rele(parent, db); | |
2235 | } | |
2236 | ||
fcff0f35 PD |
2237 | /* |
2238 | * Note: While bpp will always be updated if the function returns success, | |
2239 | * parentp will not be updated if the dnode does not have dn_dbuf filled in; | |
2240 | * this happens when the dnode is the meta-dnode, or a userused or groupused | |
2241 | * object. | |
2242 | */ | |
bf701a83 BB |
2243 | __attribute__((always_inline)) |
2244 | static inline int | |
34dc7c2f | 2245 | dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, |
fc5bb51f | 2246 | dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh) |
34dc7c2f BB |
2247 | { |
2248 | int nlevels, epbs; | |
2249 | ||
2250 | *parentp = NULL; | |
2251 | *bpp = NULL; | |
2252 | ||
428870ff BB |
2253 | ASSERT(blkid != DMU_BONUS_BLKID); |
2254 | ||
2255 | if (blkid == DMU_SPILL_BLKID) { | |
2256 | mutex_enter(&dn->dn_mtx); | |
2257 | if (dn->dn_have_spill && | |
2258 | (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) | |
50c957f7 | 2259 | *bpp = DN_SPILL_BLKPTR(dn->dn_phys); |
428870ff BB |
2260 | else |
2261 | *bpp = NULL; | |
2262 | dbuf_add_ref(dn->dn_dbuf, NULL); | |
2263 | *parentp = dn->dn_dbuf; | |
2264 | mutex_exit(&dn->dn_mtx); | |
2265 | return (0); | |
2266 | } | |
34dc7c2f | 2267 | |
32d41fb7 PD |
2268 | nlevels = |
2269 | (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; | |
34dc7c2f BB |
2270 | epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; |
2271 | ||
2272 | ASSERT3U(level * epbs, <, 64); | |
2273 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); | |
32d41fb7 PD |
2274 | /* |
2275 | * This assertion shouldn't trip as long as the max indirect block size | |
2276 | * is less than 1M. The reason for this is that up to that point, | |
2277 | * the number of levels required to address an entire object with blocks | |
2278 | * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In | |
2279 | * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 | |
2280 | * (i.e. we can address the entire object), objects will all use at most | |
2281 | * N-1 levels and the assertion won't overflow. However, once epbs is | |
2282 | * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be | |
2283 | * enough to address an entire object, so objects will have 5 levels, | |
2284 | * but then this assertion will overflow. | |
2285 | * | |
2286 | * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we | |
2287 | * need to redo this logic to handle overflows. | |
2288 | */ | |
2289 | ASSERT(level >= nlevels || | |
2290 | ((nlevels - level - 1) * epbs) + | |
2291 | highbit64(dn->dn_phys->dn_nblkptr) <= 64); | |
34dc7c2f | 2292 | if (level >= nlevels || |
32d41fb7 PD |
2293 | blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << |
2294 | ((nlevels - level - 1) * epbs)) || | |
2295 | (fail_sparse && | |
2296 | blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { | |
34dc7c2f | 2297 | /* the buffer has no parent yet */ |
2e528b49 | 2298 | return (SET_ERROR(ENOENT)); |
34dc7c2f BB |
2299 | } else if (level < nlevels-1) { |
2300 | /* this block is referenced from an indirect block */ | |
fc5bb51f BB |
2301 | int err; |
2302 | if (dh == NULL) { | |
fcff0f35 PD |
2303 | err = dbuf_hold_impl(dn, level+1, |
2304 | blkid >> epbs, fail_sparse, FALSE, NULL, parentp); | |
d1d7e268 | 2305 | } else { |
fc5bb51f | 2306 | __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1, |
fcff0f35 PD |
2307 | blkid >> epbs, fail_sparse, FALSE, NULL, |
2308 | parentp, dh->dh_depth + 1); | |
fc5bb51f BB |
2309 | err = __dbuf_hold_impl(dh + 1); |
2310 | } | |
34dc7c2f BB |
2311 | if (err) |
2312 | return (err); | |
2313 | err = dbuf_read(*parentp, NULL, | |
2314 | (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); | |
2315 | if (err) { | |
2316 | dbuf_rele(*parentp, NULL); | |
2317 | *parentp = NULL; | |
2318 | return (err); | |
2319 | } | |
2320 | *bpp = ((blkptr_t *)(*parentp)->db.db_data) + | |
2321 | (blkid & ((1ULL << epbs) - 1)); | |
32d41fb7 PD |
2322 | if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) |
2323 | ASSERT(BP_IS_HOLE(*bpp)); | |
34dc7c2f BB |
2324 | return (0); |
2325 | } else { | |
2326 | /* the block is referenced from the dnode */ | |
2327 | ASSERT3U(level, ==, nlevels-1); | |
2328 | ASSERT(dn->dn_phys->dn_nblkptr == 0 || | |
2329 | blkid < dn->dn_phys->dn_nblkptr); | |
2330 | if (dn->dn_dbuf) { | |
2331 | dbuf_add_ref(dn->dn_dbuf, NULL); | |
2332 | *parentp = dn->dn_dbuf; | |
2333 | } | |
2334 | *bpp = &dn->dn_phys->dn_blkptr[blkid]; | |
2335 | return (0); | |
2336 | } | |
2337 | } | |
2338 | ||
2339 | static dmu_buf_impl_t * | |
2340 | dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, | |
2341 | dmu_buf_impl_t *parent, blkptr_t *blkptr) | |
2342 | { | |
428870ff | 2343 | objset_t *os = dn->dn_objset; |
34dc7c2f BB |
2344 | dmu_buf_impl_t *db, *odb; |
2345 | ||
2346 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); | |
2347 | ASSERT(dn->dn_type != DMU_OT_NONE); | |
2348 | ||
d3c2ae1c | 2349 | db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); |
34dc7c2f BB |
2350 | |
2351 | db->db_objset = os; | |
2352 | db->db.db_object = dn->dn_object; | |
2353 | db->db_level = level; | |
2354 | db->db_blkid = blkid; | |
2355 | db->db_last_dirty = NULL; | |
2356 | db->db_dirtycnt = 0; | |
572e2857 | 2357 | db->db_dnode_handle = dn->dn_handle; |
34dc7c2f BB |
2358 | db->db_parent = parent; |
2359 | db->db_blkptr = blkptr; | |
2360 | ||
0c66c32d | 2361 | db->db_user = NULL; |
bc4501f7 JG |
2362 | db->db_user_immediate_evict = FALSE; |
2363 | db->db_freed_in_flight = FALSE; | |
2364 | db->db_pending_evict = FALSE; | |
34dc7c2f | 2365 | |
428870ff | 2366 | if (blkid == DMU_BONUS_BLKID) { |
34dc7c2f | 2367 | ASSERT3P(parent, ==, dn->dn_dbuf); |
50c957f7 | 2368 | db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - |
34dc7c2f BB |
2369 | (dn->dn_nblkptr-1) * sizeof (blkptr_t); |
2370 | ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); | |
428870ff | 2371 | db->db.db_offset = DMU_BONUS_BLKID; |
34dc7c2f BB |
2372 | db->db_state = DB_UNCACHED; |
2373 | /* the bonus dbuf is not placed in the hash table */ | |
25458cbe | 2374 | arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); |
34dc7c2f | 2375 | return (db); |
428870ff BB |
2376 | } else if (blkid == DMU_SPILL_BLKID) { |
2377 | db->db.db_size = (blkptr != NULL) ? | |
2378 | BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; | |
2379 | db->db.db_offset = 0; | |
34dc7c2f BB |
2380 | } else { |
2381 | int blocksize = | |
e8b96c60 | 2382 | db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; |
34dc7c2f BB |
2383 | db->db.db_size = blocksize; |
2384 | db->db.db_offset = db->db_blkid * blocksize; | |
2385 | } | |
2386 | ||
2387 | /* | |
2388 | * Hold the dn_dbufs_mtx while we get the new dbuf | |
2389 | * in the hash table *and* added to the dbufs list. | |
2390 | * This prevents a possible deadlock with someone | |
2391 | * trying to look up this dbuf before its added to the | |
2392 | * dn_dbufs list. | |
2393 | */ | |
2394 | mutex_enter(&dn->dn_dbufs_mtx); | |
2395 | db->db_state = DB_EVICTING; | |
2396 | if ((odb = dbuf_hash_insert(db)) != NULL) { | |
2397 | /* someone else inserted it first */ | |
d3c2ae1c | 2398 | kmem_cache_free(dbuf_kmem_cache, db); |
34dc7c2f BB |
2399 | mutex_exit(&dn->dn_dbufs_mtx); |
2400 | return (odb); | |
2401 | } | |
8951cb8d | 2402 | avl_add(&dn->dn_dbufs, db); |
b663a23d MA |
2403 | if (db->db_level == 0 && db->db_blkid >= |
2404 | dn->dn_unlisted_l0_blkid) | |
2405 | dn->dn_unlisted_l0_blkid = db->db_blkid + 1; | |
34dc7c2f BB |
2406 | db->db_state = DB_UNCACHED; |
2407 | mutex_exit(&dn->dn_dbufs_mtx); | |
25458cbe | 2408 | arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); |
34dc7c2f BB |
2409 | |
2410 | if (parent && parent != dn->dn_dbuf) | |
2411 | dbuf_add_ref(parent, db); | |
2412 | ||
2413 | ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || | |
2414 | refcount_count(&dn->dn_holds) > 0); | |
2415 | (void) refcount_add(&dn->dn_holds, db); | |
73ad4a9f | 2416 | atomic_inc_32(&dn->dn_dbufs_count); |
34dc7c2f BB |
2417 | |
2418 | dprintf_dbuf(db, "db=%p\n", db); | |
2419 | ||
2420 | return (db); | |
2421 | } | |
2422 | ||
fcff0f35 PD |
2423 | typedef struct dbuf_prefetch_arg { |
2424 | spa_t *dpa_spa; /* The spa to issue the prefetch in. */ | |
2425 | zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ | |
2426 | int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ | |
2427 | int dpa_curlevel; /* The current level that we're reading */ | |
d3c2ae1c | 2428 | dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ |
fcff0f35 PD |
2429 | zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ |
2430 | zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ | |
2431 | arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ | |
2432 | } dbuf_prefetch_arg_t; | |
2433 | ||
2434 | /* | |
2435 | * Actually issue the prefetch read for the block given. | |
2436 | */ | |
2437 | static void | |
2438 | dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) | |
2439 | { | |
2440 | arc_flags_t aflags; | |
2441 | if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) | |
2442 | return; | |
2443 | ||
2444 | aflags = dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; | |
2445 | ||
2446 | ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); | |
2447 | ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); | |
2448 | ASSERT(dpa->dpa_zio != NULL); | |
2449 | (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, | |
2450 | dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, | |
2451 | &aflags, &dpa->dpa_zb); | |
2452 | } | |
2453 | ||
2454 | /* | |
2455 | * Called when an indirect block above our prefetch target is read in. This | |
2456 | * will either read in the next indirect block down the tree or issue the actual | |
2457 | * prefetch if the next block down is our target. | |
2458 | */ | |
2459 | static void | |
2460 | dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private) | |
2461 | { | |
2462 | dbuf_prefetch_arg_t *dpa = private; | |
2463 | uint64_t nextblkid; | |
2464 | blkptr_t *bp; | |
2465 | ||
2466 | ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); | |
2467 | ASSERT3S(dpa->dpa_curlevel, >, 0); | |
d3c2ae1c GW |
2468 | |
2469 | /* | |
2470 | * The dpa_dnode is only valid if we are called with a NULL | |
2471 | * zio. This indicates that the arc_read() returned without | |
2472 | * first calling zio_read() to issue a physical read. Once | |
2473 | * a physical read is made the dpa_dnode must be invalidated | |
2474 | * as the locks guarding it may have been dropped. If the | |
2475 | * dpa_dnode is still valid, then we want to add it to the dbuf | |
2476 | * cache. To do so, we must hold the dbuf associated with the block | |
2477 | * we just prefetched, read its contents so that we associate it | |
2478 | * with an arc_buf_t, and then release it. | |
2479 | */ | |
fcff0f35 PD |
2480 | if (zio != NULL) { |
2481 | ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); | |
d3c2ae1c GW |
2482 | if (zio->io_flags & ZIO_FLAG_RAW) { |
2483 | ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); | |
2484 | } else { | |
2485 | ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); | |
2486 | } | |
fcff0f35 | 2487 | ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); |
d3c2ae1c GW |
2488 | |
2489 | dpa->dpa_dnode = NULL; | |
2490 | } else if (dpa->dpa_dnode != NULL) { | |
2491 | uint64_t curblkid = dpa->dpa_zb.zb_blkid >> | |
2492 | (dpa->dpa_epbs * (dpa->dpa_curlevel - | |
2493 | dpa->dpa_zb.zb_level)); | |
2494 | dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, | |
2495 | dpa->dpa_curlevel, curblkid, FTAG); | |
2496 | (void) dbuf_read(db, NULL, | |
2497 | DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); | |
2498 | dbuf_rele(db, FTAG); | |
fcff0f35 PD |
2499 | } |
2500 | ||
2501 | dpa->dpa_curlevel--; | |
2502 | ||
2503 | nextblkid = dpa->dpa_zb.zb_blkid >> | |
2504 | (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); | |
2505 | bp = ((blkptr_t *)abuf->b_data) + | |
2506 | P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); | |
2507 | if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) { | |
2508 | kmem_free(dpa, sizeof (*dpa)); | |
2509 | } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { | |
2510 | ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); | |
2511 | dbuf_issue_final_prefetch(dpa, bp); | |
2512 | kmem_free(dpa, sizeof (*dpa)); | |
2513 | } else { | |
2514 | arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; | |
2515 | zbookmark_phys_t zb; | |
2516 | ||
2517 | ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); | |
2518 | ||
2519 | SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, | |
2520 | dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); | |
2521 | ||
2522 | (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, | |
2523 | bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, | |
2524 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, | |
2525 | &iter_aflags, &zb); | |
2526 | } | |
d3c2ae1c GW |
2527 | |
2528 | arc_buf_destroy(abuf, private); | |
fcff0f35 PD |
2529 | } |
2530 | ||
2531 | /* | |
2532 | * Issue prefetch reads for the given block on the given level. If the indirect | |
2533 | * blocks above that block are not in memory, we will read them in | |
2534 | * asynchronously. As a result, this call never blocks waiting for a read to | |
2535 | * complete. | |
2536 | */ | |
34dc7c2f | 2537 | void |
fcff0f35 PD |
2538 | dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, |
2539 | arc_flags_t aflags) | |
34dc7c2f | 2540 | { |
fcff0f35 PD |
2541 | blkptr_t bp; |
2542 | int epbs, nlevels, curlevel; | |
2543 | uint64_t curblkid; | |
2544 | dmu_buf_impl_t *db; | |
2545 | zio_t *pio; | |
2546 | dbuf_prefetch_arg_t *dpa; | |
2547 | dsl_dataset_t *ds; | |
34dc7c2f | 2548 | |
428870ff | 2549 | ASSERT(blkid != DMU_BONUS_BLKID); |
34dc7c2f BB |
2550 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); |
2551 | ||
7f60329a MA |
2552 | if (blkid > dn->dn_maxblkid) |
2553 | return; | |
2554 | ||
34dc7c2f BB |
2555 | if (dnode_block_freed(dn, blkid)) |
2556 | return; | |
2557 | ||
fcff0f35 PD |
2558 | /* |
2559 | * This dnode hasn't been written to disk yet, so there's nothing to | |
2560 | * prefetch. | |
2561 | */ | |
2562 | nlevels = dn->dn_phys->dn_nlevels; | |
2563 | if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) | |
2564 | return; | |
2565 | ||
2566 | epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
2567 | if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) | |
2568 | return; | |
2569 | ||
2570 | db = dbuf_find(dn->dn_objset, dn->dn_object, | |
2571 | level, blkid); | |
2572 | if (db != NULL) { | |
2573 | mutex_exit(&db->db_mtx); | |
572e2857 | 2574 | /* |
fcff0f35 PD |
2575 | * This dbuf already exists. It is either CACHED, or |
2576 | * (we assume) about to be read or filled. | |
572e2857 | 2577 | */ |
572e2857 | 2578 | return; |
34dc7c2f BB |
2579 | } |
2580 | ||
fcff0f35 PD |
2581 | /* |
2582 | * Find the closest ancestor (indirect block) of the target block | |
2583 | * that is present in the cache. In this indirect block, we will | |
2584 | * find the bp that is at curlevel, curblkid. | |
2585 | */ | |
2586 | curlevel = level; | |
2587 | curblkid = blkid; | |
2588 | while (curlevel < nlevels - 1) { | |
2589 | int parent_level = curlevel + 1; | |
2590 | uint64_t parent_blkid = curblkid >> epbs; | |
2591 | dmu_buf_impl_t *db; | |
2592 | ||
2593 | if (dbuf_hold_impl(dn, parent_level, parent_blkid, | |
2594 | FALSE, TRUE, FTAG, &db) == 0) { | |
2595 | blkptr_t *bpp = db->db_buf->b_data; | |
2596 | bp = bpp[P2PHASE(curblkid, 1 << epbs)]; | |
2597 | dbuf_rele(db, FTAG); | |
2598 | break; | |
2599 | } | |
428870ff | 2600 | |
fcff0f35 PD |
2601 | curlevel = parent_level; |
2602 | curblkid = parent_blkid; | |
2603 | } | |
34dc7c2f | 2604 | |
fcff0f35 PD |
2605 | if (curlevel == nlevels - 1) { |
2606 | /* No cached indirect blocks found. */ | |
2607 | ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); | |
2608 | bp = dn->dn_phys->dn_blkptr[curblkid]; | |
34dc7c2f | 2609 | } |
fcff0f35 PD |
2610 | if (BP_IS_HOLE(&bp)) |
2611 | return; | |
2612 | ||
2613 | ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); | |
2614 | ||
2615 | pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, | |
2616 | ZIO_FLAG_CANFAIL); | |
2617 | ||
2618 | dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); | |
2619 | ds = dn->dn_objset->os_dsl_dataset; | |
2620 | SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, | |
2621 | dn->dn_object, level, blkid); | |
2622 | dpa->dpa_curlevel = curlevel; | |
2623 | dpa->dpa_prio = prio; | |
2624 | dpa->dpa_aflags = aflags; | |
2625 | dpa->dpa_spa = dn->dn_objset->os_spa; | |
d3c2ae1c | 2626 | dpa->dpa_dnode = dn; |
fcff0f35 PD |
2627 | dpa->dpa_epbs = epbs; |
2628 | dpa->dpa_zio = pio; | |
2629 | ||
2630 | /* | |
2631 | * If we have the indirect just above us, no need to do the asynchronous | |
2632 | * prefetch chain; we'll just run the last step ourselves. If we're at | |
2633 | * a higher level, though, we want to issue the prefetches for all the | |
2634 | * indirect blocks asynchronously, so we can go on with whatever we were | |
2635 | * doing. | |
2636 | */ | |
2637 | if (curlevel == level) { | |
2638 | ASSERT3U(curblkid, ==, blkid); | |
2639 | dbuf_issue_final_prefetch(dpa, &bp); | |
2640 | kmem_free(dpa, sizeof (*dpa)); | |
2641 | } else { | |
2642 | arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; | |
2643 | zbookmark_phys_t zb; | |
2644 | ||
2645 | SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, | |
2646 | dn->dn_object, curlevel, curblkid); | |
2647 | (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, | |
2648 | &bp, dbuf_prefetch_indirect_done, dpa, prio, | |
2649 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, | |
2650 | &iter_aflags, &zb); | |
2651 | } | |
2652 | /* | |
2653 | * We use pio here instead of dpa_zio since it's possible that | |
2654 | * dpa may have already been freed. | |
2655 | */ | |
2656 | zio_nowait(pio); | |
34dc7c2f BB |
2657 | } |
2658 | ||
d1d7e268 | 2659 | #define DBUF_HOLD_IMPL_MAX_DEPTH 20 |
fc5bb51f | 2660 | |
34dc7c2f BB |
2661 | /* |
2662 | * Returns with db_holds incremented, and db_mtx not held. | |
2663 | * Note: dn_struct_rwlock must be held. | |
2664 | */ | |
fc5bb51f BB |
2665 | static int |
2666 | __dbuf_hold_impl(struct dbuf_hold_impl_data *dh) | |
34dc7c2f | 2667 | { |
fc5bb51f BB |
2668 | ASSERT3S(dh->dh_depth, <, DBUF_HOLD_IMPL_MAX_DEPTH); |
2669 | dh->dh_parent = NULL; | |
34dc7c2f | 2670 | |
fc5bb51f BB |
2671 | ASSERT(dh->dh_blkid != DMU_BONUS_BLKID); |
2672 | ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock)); | |
2673 | ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level); | |
34dc7c2f | 2674 | |
fc5bb51f | 2675 | *(dh->dh_dbp) = NULL; |
d3c2ae1c | 2676 | |
34dc7c2f | 2677 | /* dbuf_find() returns with db_mtx held */ |
6ebebace JG |
2678 | dh->dh_db = dbuf_find(dh->dh_dn->dn_objset, dh->dh_dn->dn_object, |
2679 | dh->dh_level, dh->dh_blkid); | |
fc5bb51f BB |
2680 | |
2681 | if (dh->dh_db == NULL) { | |
2682 | dh->dh_bp = NULL; | |
2683 | ||
fcff0f35 PD |
2684 | if (dh->dh_fail_uncached) |
2685 | return (SET_ERROR(ENOENT)); | |
2686 | ||
fc5bb51f BB |
2687 | ASSERT3P(dh->dh_parent, ==, NULL); |
2688 | dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid, | |
2689 | dh->dh_fail_sparse, &dh->dh_parent, | |
2690 | &dh->dh_bp, dh); | |
2691 | if (dh->dh_fail_sparse) { | |
d1d7e268 MK |
2692 | if (dh->dh_err == 0 && |
2693 | dh->dh_bp && BP_IS_HOLE(dh->dh_bp)) | |
2e528b49 | 2694 | dh->dh_err = SET_ERROR(ENOENT); |
fc5bb51f BB |
2695 | if (dh->dh_err) { |
2696 | if (dh->dh_parent) | |
2697 | dbuf_rele(dh->dh_parent, NULL); | |
2698 | return (dh->dh_err); | |
34dc7c2f BB |
2699 | } |
2700 | } | |
fc5bb51f BB |
2701 | if (dh->dh_err && dh->dh_err != ENOENT) |
2702 | return (dh->dh_err); | |
2703 | dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid, | |
2704 | dh->dh_parent, dh->dh_bp); | |
34dc7c2f BB |
2705 | } |
2706 | ||
fcff0f35 PD |
2707 | if (dh->dh_fail_uncached && dh->dh_db->db_state != DB_CACHED) { |
2708 | mutex_exit(&dh->dh_db->db_mtx); | |
2709 | return (SET_ERROR(ENOENT)); | |
2710 | } | |
2711 | ||
d3c2ae1c | 2712 | if (dh->dh_db->db_buf != NULL) |
fc5bb51f | 2713 | ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data); |
34dc7c2f | 2714 | |
fc5bb51f | 2715 | ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf)); |
34dc7c2f BB |
2716 | |
2717 | /* | |
2718 | * If this buffer is currently syncing out, and we are are | |
2719 | * still referencing it from db_data, we need to make a copy | |
2720 | * of it in case we decide we want to dirty it again in this txg. | |
2721 | */ | |
fc5bb51f BB |
2722 | if (dh->dh_db->db_level == 0 && |
2723 | dh->dh_db->db_blkid != DMU_BONUS_BLKID && | |
2724 | dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT && | |
2725 | dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) { | |
2726 | dh->dh_dr = dh->dh_db->db_data_pending; | |
2727 | ||
2728 | if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf) { | |
2729 | dh->dh_type = DBUF_GET_BUFC_TYPE(dh->dh_db); | |
2730 | ||
2731 | dbuf_set_data(dh->dh_db, | |
d3c2ae1c | 2732 | arc_alloc_buf(dh->dh_dn->dn_objset->os_spa, |
2aa34383 | 2733 | dh->dh_db, dh->dh_type, dh->dh_db->db.db_size)); |
fc5bb51f BB |
2734 | bcopy(dh->dh_dr->dt.dl.dr_data->b_data, |
2735 | dh->dh_db->db.db_data, dh->dh_db->db.db_size); | |
34dc7c2f BB |
2736 | } |
2737 | } | |
2738 | ||
d3c2ae1c GW |
2739 | if (multilist_link_active(&dh->dh_db->db_cache_link)) { |
2740 | ASSERT(refcount_is_zero(&dh->dh_db->db_holds)); | |
2741 | multilist_remove(&dbuf_cache, dh->dh_db); | |
2742 | (void) refcount_remove_many(&dbuf_cache_size, | |
2743 | dh->dh_db->db.db_size, dh->dh_db); | |
2744 | } | |
fc5bb51f | 2745 | (void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag); |
fc5bb51f BB |
2746 | DBUF_VERIFY(dh->dh_db); |
2747 | mutex_exit(&dh->dh_db->db_mtx); | |
34dc7c2f BB |
2748 | |
2749 | /* NOTE: we can't rele the parent until after we drop the db_mtx */ | |
fc5bb51f BB |
2750 | if (dh->dh_parent) |
2751 | dbuf_rele(dh->dh_parent, NULL); | |
34dc7c2f | 2752 | |
fc5bb51f BB |
2753 | ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn); |
2754 | ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid); | |
2755 | ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level); | |
2756 | *(dh->dh_dbp) = dh->dh_db; | |
34dc7c2f BB |
2757 | |
2758 | return (0); | |
2759 | } | |
2760 | ||
fc5bb51f BB |
2761 | /* |
2762 | * The following code preserves the recursive function dbuf_hold_impl() | |
2763 | * but moves the local variables AND function arguments to the heap to | |
2764 | * minimize the stack frame size. Enough space is initially allocated | |
2765 | * on the stack for 20 levels of recursion. | |
2766 | */ | |
2767 | int | |
fcff0f35 PD |
2768 | dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, |
2769 | boolean_t fail_sparse, boolean_t fail_uncached, | |
fc5bb51f BB |
2770 | void *tag, dmu_buf_impl_t **dbp) |
2771 | { | |
2772 | struct dbuf_hold_impl_data *dh; | |
2773 | int error; | |
2774 | ||
d9eea113 | 2775 | dh = kmem_alloc(sizeof (struct dbuf_hold_impl_data) * |
79c76d5b | 2776 | DBUF_HOLD_IMPL_MAX_DEPTH, KM_SLEEP); |
fcff0f35 PD |
2777 | __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, |
2778 | fail_uncached, tag, dbp, 0); | |
fc5bb51f BB |
2779 | |
2780 | error = __dbuf_hold_impl(dh); | |
2781 | ||
d1d7e268 | 2782 | kmem_free(dh, sizeof (struct dbuf_hold_impl_data) * |
fc5bb51f BB |
2783 | DBUF_HOLD_IMPL_MAX_DEPTH); |
2784 | ||
2785 | return (error); | |
2786 | } | |
2787 | ||
2788 | static void | |
2789 | __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh, | |
fcff0f35 PD |
2790 | dnode_t *dn, uint8_t level, uint64_t blkid, |
2791 | boolean_t fail_sparse, boolean_t fail_uncached, | |
2792 | void *tag, dmu_buf_impl_t **dbp, int depth) | |
fc5bb51f BB |
2793 | { |
2794 | dh->dh_dn = dn; | |
2795 | dh->dh_level = level; | |
2796 | dh->dh_blkid = blkid; | |
fcff0f35 | 2797 | |
fc5bb51f | 2798 | dh->dh_fail_sparse = fail_sparse; |
fcff0f35 PD |
2799 | dh->dh_fail_uncached = fail_uncached; |
2800 | ||
fc5bb51f BB |
2801 | dh->dh_tag = tag; |
2802 | dh->dh_dbp = dbp; | |
d9eea113 MA |
2803 | |
2804 | dh->dh_db = NULL; | |
2805 | dh->dh_parent = NULL; | |
2806 | dh->dh_bp = NULL; | |
2807 | dh->dh_err = 0; | |
2808 | dh->dh_dr = NULL; | |
2809 | dh->dh_type = 0; | |
2810 | ||
fc5bb51f BB |
2811 | dh->dh_depth = depth; |
2812 | } | |
2813 | ||
34dc7c2f BB |
2814 | dmu_buf_impl_t * |
2815 | dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) | |
2816 | { | |
fcff0f35 | 2817 | return (dbuf_hold_level(dn, 0, blkid, tag)); |
34dc7c2f BB |
2818 | } |
2819 | ||
2820 | dmu_buf_impl_t * | |
2821 | dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) | |
2822 | { | |
2823 | dmu_buf_impl_t *db; | |
fcff0f35 | 2824 | int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); |
34dc7c2f BB |
2825 | return (err ? NULL : db); |
2826 | } | |
2827 | ||
2828 | void | |
2829 | dbuf_create_bonus(dnode_t *dn) | |
2830 | { | |
2831 | ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); | |
2832 | ||
2833 | ASSERT(dn->dn_bonus == NULL); | |
428870ff BB |
2834 | dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); |
2835 | } | |
2836 | ||
2837 | int | |
2838 | dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) | |
2839 | { | |
2840 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
572e2857 BB |
2841 | dnode_t *dn; |
2842 | ||
428870ff | 2843 | if (db->db_blkid != DMU_SPILL_BLKID) |
2e528b49 | 2844 | return (SET_ERROR(ENOTSUP)); |
428870ff BB |
2845 | if (blksz == 0) |
2846 | blksz = SPA_MINBLOCKSIZE; | |
f1512ee6 MA |
2847 | ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); |
2848 | blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); | |
428870ff | 2849 | |
572e2857 BB |
2850 | DB_DNODE_ENTER(db); |
2851 | dn = DB_DNODE(db); | |
2852 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
428870ff | 2853 | dbuf_new_size(db, blksz, tx); |
572e2857 BB |
2854 | rw_exit(&dn->dn_struct_rwlock); |
2855 | DB_DNODE_EXIT(db); | |
428870ff BB |
2856 | |
2857 | return (0); | |
2858 | } | |
2859 | ||
2860 | void | |
2861 | dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) | |
2862 | { | |
2863 | dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); | |
34dc7c2f BB |
2864 | } |
2865 | ||
2866 | #pragma weak dmu_buf_add_ref = dbuf_add_ref | |
2867 | void | |
2868 | dbuf_add_ref(dmu_buf_impl_t *db, void *tag) | |
2869 | { | |
d3c2ae1c GW |
2870 | int64_t holds = refcount_add(&db->db_holds, tag); |
2871 | VERIFY3S(holds, >, 1); | |
34dc7c2f BB |
2872 | } |
2873 | ||
6ebebace JG |
2874 | #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref |
2875 | boolean_t | |
2876 | dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, | |
2877 | void *tag) | |
2878 | { | |
2879 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
2880 | dmu_buf_impl_t *found_db; | |
2881 | boolean_t result = B_FALSE; | |
2882 | ||
d617648c | 2883 | if (blkid == DMU_BONUS_BLKID) |
6ebebace JG |
2884 | found_db = dbuf_find_bonus(os, obj); |
2885 | else | |
2886 | found_db = dbuf_find(os, obj, 0, blkid); | |
2887 | ||
2888 | if (found_db != NULL) { | |
2889 | if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { | |
2890 | (void) refcount_add(&db->db_holds, tag); | |
2891 | result = B_TRUE; | |
2892 | } | |
d617648c | 2893 | mutex_exit(&found_db->db_mtx); |
6ebebace JG |
2894 | } |
2895 | return (result); | |
2896 | } | |
2897 | ||
572e2857 BB |
2898 | /* |
2899 | * If you call dbuf_rele() you had better not be referencing the dnode handle | |
2900 | * unless you have some other direct or indirect hold on the dnode. (An indirect | |
2901 | * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) | |
2902 | * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the | |
2903 | * dnode's parent dbuf evicting its dnode handles. | |
2904 | */ | |
34dc7c2f BB |
2905 | void |
2906 | dbuf_rele(dmu_buf_impl_t *db, void *tag) | |
428870ff BB |
2907 | { |
2908 | mutex_enter(&db->db_mtx); | |
2909 | dbuf_rele_and_unlock(db, tag); | |
2910 | } | |
2911 | ||
b0bc7a84 MG |
2912 | void |
2913 | dmu_buf_rele(dmu_buf_t *db, void *tag) | |
2914 | { | |
2915 | dbuf_rele((dmu_buf_impl_t *)db, tag); | |
2916 | } | |
2917 | ||
428870ff BB |
2918 | /* |
2919 | * dbuf_rele() for an already-locked dbuf. This is necessary to allow | |
2920 | * db_dirtycnt and db_holds to be updated atomically. | |
2921 | */ | |
2922 | void | |
2923 | dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) | |
34dc7c2f BB |
2924 | { |
2925 | int64_t holds; | |
2926 | ||
428870ff | 2927 | ASSERT(MUTEX_HELD(&db->db_mtx)); |
34dc7c2f BB |
2928 | DBUF_VERIFY(db); |
2929 | ||
572e2857 BB |
2930 | /* |
2931 | * Remove the reference to the dbuf before removing its hold on the | |
2932 | * dnode so we can guarantee in dnode_move() that a referenced bonus | |
2933 | * buffer has a corresponding dnode hold. | |
2934 | */ | |
34dc7c2f BB |
2935 | holds = refcount_remove(&db->db_holds, tag); |
2936 | ASSERT(holds >= 0); | |
2937 | ||
2938 | /* | |
2939 | * We can't freeze indirects if there is a possibility that they | |
2940 | * may be modified in the current syncing context. | |
2941 | */ | |
d3c2ae1c GW |
2942 | if (db->db_buf != NULL && |
2943 | holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { | |
34dc7c2f | 2944 | arc_buf_freeze(db->db_buf); |
d3c2ae1c | 2945 | } |
34dc7c2f BB |
2946 | |
2947 | if (holds == db->db_dirtycnt && | |
bc4501f7 | 2948 | db->db_level == 0 && db->db_user_immediate_evict) |
34dc7c2f BB |
2949 | dbuf_evict_user(db); |
2950 | ||
2951 | if (holds == 0) { | |
428870ff | 2952 | if (db->db_blkid == DMU_BONUS_BLKID) { |
4c7b7eed | 2953 | dnode_t *dn; |
bc4501f7 | 2954 | boolean_t evict_dbuf = db->db_pending_evict; |
572e2857 BB |
2955 | |
2956 | /* | |
4c7b7eed JG |
2957 | * If the dnode moves here, we cannot cross this |
2958 | * barrier until the move completes. | |
572e2857 BB |
2959 | */ |
2960 | DB_DNODE_ENTER(db); | |
4c7b7eed JG |
2961 | |
2962 | dn = DB_DNODE(db); | |
2963 | atomic_dec_32(&dn->dn_dbufs_count); | |
2964 | ||
2965 | /* | |
2966 | * Decrementing the dbuf count means that the bonus | |
2967 | * buffer's dnode hold is no longer discounted in | |
2968 | * dnode_move(). The dnode cannot move until after | |
bc4501f7 | 2969 | * the dnode_rele() below. |
4c7b7eed | 2970 | */ |
572e2857 | 2971 | DB_DNODE_EXIT(db); |
4c7b7eed JG |
2972 | |
2973 | /* | |
2974 | * Do not reference db after its lock is dropped. | |
2975 | * Another thread may evict it. | |
2976 | */ | |
2977 | mutex_exit(&db->db_mtx); | |
2978 | ||
bc4501f7 | 2979 | if (evict_dbuf) |
4c7b7eed | 2980 | dnode_evict_bonus(dn); |
bc4501f7 JG |
2981 | |
2982 | dnode_rele(dn, db); | |
34dc7c2f BB |
2983 | } else if (db->db_buf == NULL) { |
2984 | /* | |
2985 | * This is a special case: we never associated this | |
2986 | * dbuf with any data allocated from the ARC. | |
2987 | */ | |
b128c09f BB |
2988 | ASSERT(db->db_state == DB_UNCACHED || |
2989 | db->db_state == DB_NOFILL); | |
d3c2ae1c | 2990 | dbuf_destroy(db); |
34dc7c2f | 2991 | } else if (arc_released(db->db_buf)) { |
34dc7c2f BB |
2992 | /* |
2993 | * This dbuf has anonymous data associated with it. | |
2994 | */ | |
d3c2ae1c | 2995 | dbuf_destroy(db); |
34dc7c2f | 2996 | } else { |
d3c2ae1c GW |
2997 | boolean_t do_arc_evict = B_FALSE; |
2998 | blkptr_t bp; | |
2999 | spa_t *spa = dmu_objset_spa(db->db_objset); | |
3000 | ||
3001 | if (!DBUF_IS_CACHEABLE(db) && | |
3002 | db->db_blkptr != NULL && | |
3003 | !BP_IS_HOLE(db->db_blkptr) && | |
3004 | !BP_IS_EMBEDDED(db->db_blkptr)) { | |
3005 | do_arc_evict = B_TRUE; | |
3006 | bp = *db->db_blkptr; | |
3007 | } | |
1eb5bfa3 | 3008 | |
d3c2ae1c GW |
3009 | if (!DBUF_IS_CACHEABLE(db) || |
3010 | db->db_pending_evict) { | |
3011 | dbuf_destroy(db); | |
3012 | } else if (!multilist_link_active(&db->db_cache_link)) { | |
3013 | multilist_insert(&dbuf_cache, db); | |
3014 | (void) refcount_add_many(&dbuf_cache_size, | |
3015 | db->db.db_size, db); | |
b128c09f | 3016 | mutex_exit(&db->db_mtx); |
d3c2ae1c GW |
3017 | |
3018 | dbuf_evict_notify(); | |
bd089c54 | 3019 | } |
d3c2ae1c GW |
3020 | |
3021 | if (do_arc_evict) | |
3022 | arc_freed(spa, &bp); | |
34dc7c2f BB |
3023 | } |
3024 | } else { | |
3025 | mutex_exit(&db->db_mtx); | |
3026 | } | |
d3c2ae1c | 3027 | |
34dc7c2f BB |
3028 | } |
3029 | ||
3030 | #pragma weak dmu_buf_refcount = dbuf_refcount | |
3031 | uint64_t | |
3032 | dbuf_refcount(dmu_buf_impl_t *db) | |
3033 | { | |
3034 | return (refcount_count(&db->db_holds)); | |
3035 | } | |
3036 | ||
3037 | void * | |
0c66c32d JG |
3038 | dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, |
3039 | dmu_buf_user_t *new_user) | |
34dc7c2f | 3040 | { |
0c66c32d JG |
3041 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; |
3042 | ||
3043 | mutex_enter(&db->db_mtx); | |
3044 | dbuf_verify_user(db, DBVU_NOT_EVICTING); | |
3045 | if (db->db_user == old_user) | |
3046 | db->db_user = new_user; | |
3047 | else | |
3048 | old_user = db->db_user; | |
3049 | dbuf_verify_user(db, DBVU_NOT_EVICTING); | |
3050 | mutex_exit(&db->db_mtx); | |
3051 | ||
3052 | return (old_user); | |
34dc7c2f BB |
3053 | } |
3054 | ||
3055 | void * | |
0c66c32d | 3056 | dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) |
34dc7c2f | 3057 | { |
0c66c32d | 3058 | return (dmu_buf_replace_user(db_fake, NULL, user)); |
34dc7c2f BB |
3059 | } |
3060 | ||
3061 | void * | |
0c66c32d | 3062 | dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) |
34dc7c2f BB |
3063 | { |
3064 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
34dc7c2f | 3065 | |
bc4501f7 | 3066 | db->db_user_immediate_evict = TRUE; |
0c66c32d JG |
3067 | return (dmu_buf_set_user(db_fake, user)); |
3068 | } | |
34dc7c2f | 3069 | |
0c66c32d JG |
3070 | void * |
3071 | dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) | |
3072 | { | |
3073 | return (dmu_buf_replace_user(db_fake, user, NULL)); | |
34dc7c2f BB |
3074 | } |
3075 | ||
3076 | void * | |
3077 | dmu_buf_get_user(dmu_buf_t *db_fake) | |
3078 | { | |
3079 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
34dc7c2f | 3080 | |
0c66c32d JG |
3081 | dbuf_verify_user(db, DBVU_NOT_EVICTING); |
3082 | return (db->db_user); | |
3083 | } | |
3084 | ||
3085 | void | |
3086 | dmu_buf_user_evict_wait() | |
3087 | { | |
3088 | taskq_wait(dbu_evict_taskq); | |
34dc7c2f BB |
3089 | } |
3090 | ||
9babb374 BB |
3091 | boolean_t |
3092 | dmu_buf_freeable(dmu_buf_t *dbuf) | |
3093 | { | |
3094 | boolean_t res = B_FALSE; | |
3095 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; | |
3096 | ||
3097 | if (db->db_blkptr) | |
3098 | res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset, | |
428870ff | 3099 | db->db_blkptr, db->db_blkptr->blk_birth); |
9babb374 BB |
3100 | |
3101 | return (res); | |
3102 | } | |
3103 | ||
03c6040b GW |
3104 | blkptr_t * |
3105 | dmu_buf_get_blkptr(dmu_buf_t *db) | |
3106 | { | |
3107 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
3108 | return (dbi->db_blkptr); | |
3109 | } | |
3110 | ||
8bea9815 MA |
3111 | objset_t * |
3112 | dmu_buf_get_objset(dmu_buf_t *db) | |
3113 | { | |
3114 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
3115 | return (dbi->db_objset); | |
3116 | } | |
3117 | ||
2bce8049 MA |
3118 | dnode_t * |
3119 | dmu_buf_dnode_enter(dmu_buf_t *db) | |
3120 | { | |
3121 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
3122 | DB_DNODE_ENTER(dbi); | |
3123 | return (DB_DNODE(dbi)); | |
3124 | } | |
3125 | ||
3126 | void | |
3127 | dmu_buf_dnode_exit(dmu_buf_t *db) | |
3128 | { | |
3129 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
3130 | DB_DNODE_EXIT(dbi); | |
3131 | } | |
3132 | ||
34dc7c2f BB |
3133 | static void |
3134 | dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) | |
3135 | { | |
3136 | /* ASSERT(dmu_tx_is_syncing(tx) */ | |
3137 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
3138 | ||
3139 | if (db->db_blkptr != NULL) | |
3140 | return; | |
3141 | ||
428870ff | 3142 | if (db->db_blkid == DMU_SPILL_BLKID) { |
50c957f7 | 3143 | db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); |
428870ff BB |
3144 | BP_ZERO(db->db_blkptr); |
3145 | return; | |
3146 | } | |
34dc7c2f BB |
3147 | if (db->db_level == dn->dn_phys->dn_nlevels-1) { |
3148 | /* | |
3149 | * This buffer was allocated at a time when there was | |
3150 | * no available blkptrs from the dnode, or it was | |
3151 | * inappropriate to hook it in (i.e., nlevels mis-match). | |
3152 | */ | |
3153 | ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); | |
3154 | ASSERT(db->db_parent == NULL); | |
3155 | db->db_parent = dn->dn_dbuf; | |
3156 | db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; | |
3157 | DBUF_VERIFY(db); | |
3158 | } else { | |
3159 | dmu_buf_impl_t *parent = db->db_parent; | |
3160 | int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
3161 | ||
3162 | ASSERT(dn->dn_phys->dn_nlevels > 1); | |
3163 | if (parent == NULL) { | |
3164 | mutex_exit(&db->db_mtx); | |
3165 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
fcff0f35 PD |
3166 | parent = dbuf_hold_level(dn, db->db_level + 1, |
3167 | db->db_blkid >> epbs, db); | |
34dc7c2f BB |
3168 | rw_exit(&dn->dn_struct_rwlock); |
3169 | mutex_enter(&db->db_mtx); | |
3170 | db->db_parent = parent; | |
3171 | } | |
3172 | db->db_blkptr = (blkptr_t *)parent->db.db_data + | |
3173 | (db->db_blkid & ((1ULL << epbs) - 1)); | |
3174 | DBUF_VERIFY(db); | |
3175 | } | |
3176 | } | |
3177 | ||
d1d7e268 MK |
3178 | /* |
3179 | * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it | |
60948de1 BB |
3180 | * is critical the we not allow the compiler to inline this function in to |
3181 | * dbuf_sync_list() thereby drastically bloating the stack usage. | |
3182 | */ | |
3183 | noinline static void | |
34dc7c2f BB |
3184 | dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) |
3185 | { | |
3186 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
572e2857 | 3187 | dnode_t *dn; |
34dc7c2f BB |
3188 | zio_t *zio; |
3189 | ||
3190 | ASSERT(dmu_tx_is_syncing(tx)); | |
3191 | ||
3192 | dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); | |
3193 | ||
3194 | mutex_enter(&db->db_mtx); | |
3195 | ||
3196 | ASSERT(db->db_level > 0); | |
3197 | DBUF_VERIFY(db); | |
3198 | ||
e49f1e20 | 3199 | /* Read the block if it hasn't been read yet. */ |
34dc7c2f BB |
3200 | if (db->db_buf == NULL) { |
3201 | mutex_exit(&db->db_mtx); | |
3202 | (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); | |
3203 | mutex_enter(&db->db_mtx); | |
3204 | } | |
3205 | ASSERT3U(db->db_state, ==, DB_CACHED); | |
34dc7c2f BB |
3206 | ASSERT(db->db_buf != NULL); |
3207 | ||
572e2857 BB |
3208 | DB_DNODE_ENTER(db); |
3209 | dn = DB_DNODE(db); | |
e49f1e20 | 3210 | /* Indirect block size must match what the dnode thinks it is. */ |
572e2857 | 3211 | ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); |
34dc7c2f | 3212 | dbuf_check_blkptr(dn, db); |
572e2857 | 3213 | DB_DNODE_EXIT(db); |
34dc7c2f | 3214 | |
e49f1e20 | 3215 | /* Provide the pending dirty record to child dbufs */ |
34dc7c2f BB |
3216 | db->db_data_pending = dr; |
3217 | ||
34dc7c2f | 3218 | mutex_exit(&db->db_mtx); |
b128c09f | 3219 | dbuf_write(dr, db->db_buf, tx); |
34dc7c2f BB |
3220 | |
3221 | zio = dr->dr_zio; | |
3222 | mutex_enter(&dr->dt.di.dr_mtx); | |
4bda3bd0 | 3223 | dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); |
34dc7c2f BB |
3224 | ASSERT(list_head(&dr->dt.di.dr_children) == NULL); |
3225 | mutex_exit(&dr->dt.di.dr_mtx); | |
3226 | zio_nowait(zio); | |
3227 | } | |
3228 | ||
d1d7e268 MK |
3229 | /* |
3230 | * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is | |
60948de1 BB |
3231 | * critical the we not allow the compiler to inline this function in to |
3232 | * dbuf_sync_list() thereby drastically bloating the stack usage. | |
3233 | */ | |
3234 | noinline static void | |
34dc7c2f BB |
3235 | dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) |
3236 | { | |
3237 | arc_buf_t **datap = &dr->dt.dl.dr_data; | |
3238 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
572e2857 BB |
3239 | dnode_t *dn; |
3240 | objset_t *os; | |
34dc7c2f | 3241 | uint64_t txg = tx->tx_txg; |
34dc7c2f BB |
3242 | |
3243 | ASSERT(dmu_tx_is_syncing(tx)); | |
3244 | ||
3245 | dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); | |
3246 | ||
3247 | mutex_enter(&db->db_mtx); | |
3248 | /* | |
3249 | * To be synced, we must be dirtied. But we | |
3250 | * might have been freed after the dirty. | |
3251 | */ | |
3252 | if (db->db_state == DB_UNCACHED) { | |
3253 | /* This buffer has been freed since it was dirtied */ | |
3254 | ASSERT(db->db.db_data == NULL); | |
3255 | } else if (db->db_state == DB_FILL) { | |
3256 | /* This buffer was freed and is now being re-filled */ | |
3257 | ASSERT(db->db.db_data != dr->dt.dl.dr_data); | |
3258 | } else { | |
b128c09f | 3259 | ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); |
34dc7c2f BB |
3260 | } |
3261 | DBUF_VERIFY(db); | |
3262 | ||
572e2857 BB |
3263 | DB_DNODE_ENTER(db); |
3264 | dn = DB_DNODE(db); | |
3265 | ||
428870ff BB |
3266 | if (db->db_blkid == DMU_SPILL_BLKID) { |
3267 | mutex_enter(&dn->dn_mtx); | |
81edd3e8 P |
3268 | if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { |
3269 | /* | |
3270 | * In the previous transaction group, the bonus buffer | |
3271 | * was entirely used to store the attributes for the | |
3272 | * dnode which overrode the dn_spill field. However, | |
3273 | * when adding more attributes to the file a spill | |
3274 | * block was required to hold the extra attributes. | |
3275 | * | |
3276 | * Make sure to clear the garbage left in the dn_spill | |
3277 | * field from the previous attributes in the bonus | |
3278 | * buffer. Otherwise, after writing out the spill | |
3279 | * block to the new allocated dva, it will free | |
3280 | * the old block pointed to by the invalid dn_spill. | |
3281 | */ | |
3282 | db->db_blkptr = NULL; | |
3283 | } | |
428870ff BB |
3284 | dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; |
3285 | mutex_exit(&dn->dn_mtx); | |
3286 | } | |
3287 | ||
34dc7c2f BB |
3288 | /* |
3289 | * If this is a bonus buffer, simply copy the bonus data into the | |
3290 | * dnode. It will be written out when the dnode is synced (and it | |
3291 | * will be synced, since it must have been dirty for dbuf_sync to | |
3292 | * be called). | |
3293 | */ | |
428870ff | 3294 | if (db->db_blkid == DMU_BONUS_BLKID) { |
34dc7c2f BB |
3295 | dbuf_dirty_record_t **drp; |
3296 | ||
3297 | ASSERT(*datap != NULL); | |
c99c9001 | 3298 | ASSERT0(db->db_level); |
50c957f7 NB |
3299 | ASSERT3U(dn->dn_phys->dn_bonuslen, <=, |
3300 | DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); | |
34dc7c2f | 3301 | bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); |
572e2857 BB |
3302 | DB_DNODE_EXIT(db); |
3303 | ||
34dc7c2f | 3304 | if (*datap != db->db.db_data) { |
50c957f7 NB |
3305 | int slots = DB_DNODE(db)->dn_num_slots; |
3306 | int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); | |
a3fd9d9e | 3307 | kmem_free(*datap, bonuslen); |
25458cbe | 3308 | arc_space_return(bonuslen, ARC_SPACE_BONUS); |
34dc7c2f BB |
3309 | } |
3310 | db->db_data_pending = NULL; | |
3311 | drp = &db->db_last_dirty; | |
3312 | while (*drp != dr) | |
3313 | drp = &(*drp)->dr_next; | |
3314 | ASSERT(dr->dr_next == NULL); | |
428870ff | 3315 | ASSERT(dr->dr_dbuf == db); |
34dc7c2f | 3316 | *drp = dr->dr_next; |
753972fc BB |
3317 | if (dr->dr_dbuf->db_level != 0) { |
3318 | mutex_destroy(&dr->dt.di.dr_mtx); | |
3319 | list_destroy(&dr->dt.di.dr_children); | |
3320 | } | |
34dc7c2f BB |
3321 | kmem_free(dr, sizeof (dbuf_dirty_record_t)); |
3322 | ASSERT(db->db_dirtycnt > 0); | |
3323 | db->db_dirtycnt -= 1; | |
428870ff | 3324 | dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); |
34dc7c2f BB |
3325 | return; |
3326 | } | |
3327 | ||
572e2857 BB |
3328 | os = dn->dn_objset; |
3329 | ||
34dc7c2f BB |
3330 | /* |
3331 | * This function may have dropped the db_mtx lock allowing a dmu_sync | |
3332 | * operation to sneak in. As a result, we need to ensure that we | |
3333 | * don't check the dr_override_state until we have returned from | |
3334 | * dbuf_check_blkptr. | |
3335 | */ | |
3336 | dbuf_check_blkptr(dn, db); | |
3337 | ||
3338 | /* | |
572e2857 | 3339 | * If this buffer is in the middle of an immediate write, |
34dc7c2f BB |
3340 | * wait for the synchronous IO to complete. |
3341 | */ | |
3342 | while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { | |
3343 | ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); | |
3344 | cv_wait(&db->db_changed, &db->db_mtx); | |
3345 | ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); | |
3346 | } | |
3347 | ||
9babb374 BB |
3348 | if (db->db_state != DB_NOFILL && |
3349 | dn->dn_object != DMU_META_DNODE_OBJECT && | |
3350 | refcount_count(&db->db_holds) > 1 && | |
428870ff | 3351 | dr->dt.dl.dr_override_state != DR_OVERRIDDEN && |
9babb374 BB |
3352 | *datap == db->db_buf) { |
3353 | /* | |
3354 | * If this buffer is currently "in use" (i.e., there | |
3355 | * are active holds and db_data still references it), | |
3356 | * then make a copy before we start the write so that | |
3357 | * any modifications from the open txg will not leak | |
3358 | * into this write. | |
3359 | * | |
3360 | * NOTE: this copy does not need to be made for | |
3361 | * objects only modified in the syncing context (e.g. | |
3362 | * DNONE_DNODE blocks). | |
3363 | */ | |
2aa34383 | 3364 | int psize = arc_buf_size(*datap); |
9babb374 | 3365 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); |
2aa34383 DK |
3366 | enum zio_compress compress_type = arc_get_compression(*datap); |
3367 | ||
3368 | if (compress_type == ZIO_COMPRESS_OFF) { | |
3369 | *datap = arc_alloc_buf(os->os_spa, db, type, psize); | |
3370 | } else { | |
3371 | int lsize = arc_buf_lsize(*datap); | |
3372 | ASSERT3U(type, ==, ARC_BUFC_DATA); | |
3373 | *datap = arc_alloc_compressed_buf(os->os_spa, db, | |
3374 | psize, lsize, compress_type); | |
3375 | } | |
3376 | bcopy(db->db.db_data, (*datap)->b_data, psize); | |
b128c09f | 3377 | } |
34dc7c2f BB |
3378 | db->db_data_pending = dr; |
3379 | ||
3380 | mutex_exit(&db->db_mtx); | |
3381 | ||
b128c09f | 3382 | dbuf_write(dr, *datap, tx); |
34dc7c2f BB |
3383 | |
3384 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
572e2857 | 3385 | if (dn->dn_object == DMU_META_DNODE_OBJECT) { |
34dc7c2f | 3386 | list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); |
572e2857 BB |
3387 | DB_DNODE_EXIT(db); |
3388 | } else { | |
3389 | /* | |
3390 | * Although zio_nowait() does not "wait for an IO", it does | |
3391 | * initiate the IO. If this is an empty write it seems plausible | |
3392 | * that the IO could actually be completed before the nowait | |
3393 | * returns. We need to DB_DNODE_EXIT() first in case | |
3394 | * zio_nowait() invalidates the dbuf. | |
3395 | */ | |
3396 | DB_DNODE_EXIT(db); | |
34dc7c2f | 3397 | zio_nowait(dr->dr_zio); |
572e2857 | 3398 | } |
34dc7c2f BB |
3399 | } |
3400 | ||
3401 | void | |
4bda3bd0 | 3402 | dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) |
34dc7c2f BB |
3403 | { |
3404 | dbuf_dirty_record_t *dr; | |
3405 | ||
c65aa5b2 | 3406 | while ((dr = list_head(list))) { |
34dc7c2f BB |
3407 | if (dr->dr_zio != NULL) { |
3408 | /* | |
3409 | * If we find an already initialized zio then we | |
3410 | * are processing the meta-dnode, and we have finished. | |
3411 | * The dbufs for all dnodes are put back on the list | |
3412 | * during processing, so that we can zio_wait() | |
3413 | * these IOs after initiating all child IOs. | |
3414 | */ | |
3415 | ASSERT3U(dr->dr_dbuf->db.db_object, ==, | |
3416 | DMU_META_DNODE_OBJECT); | |
3417 | break; | |
3418 | } | |
4bda3bd0 MA |
3419 | if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && |
3420 | dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { | |
3421 | VERIFY3U(dr->dr_dbuf->db_level, ==, level); | |
3422 | } | |
34dc7c2f BB |
3423 | list_remove(list, dr); |
3424 | if (dr->dr_dbuf->db_level > 0) | |
3425 | dbuf_sync_indirect(dr, tx); | |
3426 | else | |
3427 | dbuf_sync_leaf(dr, tx); | |
3428 | } | |
3429 | } | |
3430 | ||
34dc7c2f BB |
3431 | /* ARGSUSED */ |
3432 | static void | |
3433 | dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | |
3434 | { | |
3435 | dmu_buf_impl_t *db = vdb; | |
572e2857 | 3436 | dnode_t *dn; |
b128c09f | 3437 | blkptr_t *bp = zio->io_bp; |
34dc7c2f | 3438 | blkptr_t *bp_orig = &zio->io_bp_orig; |
428870ff BB |
3439 | spa_t *spa = zio->io_spa; |
3440 | int64_t delta; | |
34dc7c2f | 3441 | uint64_t fill = 0; |
428870ff | 3442 | int i; |
34dc7c2f | 3443 | |
463a8cfe AR |
3444 | ASSERT3P(db->db_blkptr, !=, NULL); |
3445 | ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); | |
b128c09f | 3446 | |
572e2857 BB |
3447 | DB_DNODE_ENTER(db); |
3448 | dn = DB_DNODE(db); | |
428870ff BB |
3449 | delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); |
3450 | dnode_diduse_space(dn, delta - zio->io_prev_space_delta); | |
3451 | zio->io_prev_space_delta = delta; | |
34dc7c2f | 3452 | |
b0bc7a84 MG |
3453 | if (bp->blk_birth != 0) { |
3454 | ASSERT((db->db_blkid != DMU_SPILL_BLKID && | |
3455 | BP_GET_TYPE(bp) == dn->dn_type) || | |
3456 | (db->db_blkid == DMU_SPILL_BLKID && | |
9b67f605 MA |
3457 | BP_GET_TYPE(bp) == dn->dn_bonustype) || |
3458 | BP_IS_EMBEDDED(bp)); | |
b0bc7a84 | 3459 | ASSERT(BP_GET_LEVEL(bp) == db->db_level); |
34dc7c2f BB |
3460 | } |
3461 | ||
3462 | mutex_enter(&db->db_mtx); | |
3463 | ||
428870ff BB |
3464 | #ifdef ZFS_DEBUG |
3465 | if (db->db_blkid == DMU_SPILL_BLKID) { | |
428870ff | 3466 | ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); |
463a8cfe | 3467 | ASSERT(!(BP_IS_HOLE(bp)) && |
50c957f7 | 3468 | db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); |
428870ff BB |
3469 | } |
3470 | #endif | |
3471 | ||
34dc7c2f BB |
3472 | if (db->db_level == 0) { |
3473 | mutex_enter(&dn->dn_mtx); | |
428870ff BB |
3474 | if (db->db_blkid > dn->dn_phys->dn_maxblkid && |
3475 | db->db_blkid != DMU_SPILL_BLKID) | |
34dc7c2f BB |
3476 | dn->dn_phys->dn_maxblkid = db->db_blkid; |
3477 | mutex_exit(&dn->dn_mtx); | |
3478 | ||
3479 | if (dn->dn_type == DMU_OT_DNODE) { | |
50c957f7 NB |
3480 | i = 0; |
3481 | while (i < db->db.db_size) { | |
3482 | dnode_phys_t *dnp = db->db.db_data + i; | |
3483 | ||
3484 | i += DNODE_MIN_SIZE; | |
3485 | if (dnp->dn_type != DMU_OT_NONE) { | |
34dc7c2f | 3486 | fill++; |
50c957f7 NB |
3487 | i += dnp->dn_extra_slots * |
3488 | DNODE_MIN_SIZE; | |
3489 | } | |
34dc7c2f BB |
3490 | } |
3491 | } else { | |
b0bc7a84 MG |
3492 | if (BP_IS_HOLE(bp)) { |
3493 | fill = 0; | |
3494 | } else { | |
3495 | fill = 1; | |
3496 | } | |
34dc7c2f BB |
3497 | } |
3498 | } else { | |
b128c09f | 3499 | blkptr_t *ibp = db->db.db_data; |
34dc7c2f | 3500 | ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); |
b128c09f BB |
3501 | for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { |
3502 | if (BP_IS_HOLE(ibp)) | |
34dc7c2f | 3503 | continue; |
9b67f605 | 3504 | fill += BP_GET_FILL(ibp); |
34dc7c2f BB |
3505 | } |
3506 | } | |
572e2857 | 3507 | DB_DNODE_EXIT(db); |
34dc7c2f | 3508 | |
9b67f605 MA |
3509 | if (!BP_IS_EMBEDDED(bp)) |
3510 | bp->blk_fill = fill; | |
34dc7c2f BB |
3511 | |
3512 | mutex_exit(&db->db_mtx); | |
463a8cfe AR |
3513 | |
3514 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
3515 | *db->db_blkptr = *bp; | |
3516 | rw_exit(&dn->dn_struct_rwlock); | |
34dc7c2f BB |
3517 | } |
3518 | ||
bc77ba73 PD |
3519 | /* ARGSUSED */ |
3520 | /* | |
3521 | * This function gets called just prior to running through the compression | |
3522 | * stage of the zio pipeline. If we're an indirect block comprised of only | |
3523 | * holes, then we want this indirect to be compressed away to a hole. In | |
3524 | * order to do that we must zero out any information about the holes that | |
3525 | * this indirect points to prior to before we try to compress it. | |
3526 | */ | |
3527 | static void | |
3528 | dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | |
3529 | { | |
3530 | dmu_buf_impl_t *db = vdb; | |
3531 | dnode_t *dn; | |
3532 | blkptr_t *bp; | |
3533 | uint64_t i; | |
3534 | int epbs; | |
3535 | ||
3536 | ASSERT3U(db->db_level, >, 0); | |
3537 | DB_DNODE_ENTER(db); | |
3538 | dn = DB_DNODE(db); | |
3539 | epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
3540 | ||
3541 | /* Determine if all our children are holes */ | |
3f93077b | 3542 | for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) { |
bc77ba73 PD |
3543 | if (!BP_IS_HOLE(bp)) |
3544 | break; | |
3545 | } | |
3546 | ||
3547 | /* | |
3548 | * If all the children are holes, then zero them all out so that | |
3549 | * we may get compressed away. | |
3550 | */ | |
3f93077b | 3551 | if (i == 1ULL << epbs) { |
bc77ba73 PD |
3552 | /* didn't find any non-holes */ |
3553 | bzero(db->db.db_data, db->db.db_size); | |
3554 | } | |
3555 | DB_DNODE_EXIT(db); | |
3556 | } | |
3557 | ||
e8b96c60 MA |
3558 | /* |
3559 | * The SPA will call this callback several times for each zio - once | |
3560 | * for every physical child i/o (zio->io_phys_children times). This | |
3561 | * allows the DMU to monitor the progress of each logical i/o. For example, | |
3562 | * there may be 2 copies of an indirect block, or many fragments of a RAID-Z | |
3563 | * block. There may be a long delay before all copies/fragments are completed, | |
3564 | * so this callback allows us to retire dirty space gradually, as the physical | |
3565 | * i/os complete. | |
3566 | */ | |
3567 | /* ARGSUSED */ | |
3568 | static void | |
3569 | dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) | |
3570 | { | |
3571 | dmu_buf_impl_t *db = arg; | |
3572 | objset_t *os = db->db_objset; | |
3573 | dsl_pool_t *dp = dmu_objset_pool(os); | |
3574 | dbuf_dirty_record_t *dr; | |
3575 | int delta = 0; | |
3576 | ||
3577 | dr = db->db_data_pending; | |
3578 | ASSERT3U(dr->dr_txg, ==, zio->io_txg); | |
3579 | ||
3580 | /* | |
3581 | * The callback will be called io_phys_children times. Retire one | |
3582 | * portion of our dirty space each time we are called. Any rounding | |
3583 | * error will be cleaned up by dsl_pool_sync()'s call to | |
3584 | * dsl_pool_undirty_space(). | |
3585 | */ | |
3586 | delta = dr->dr_accounted / zio->io_phys_children; | |
3587 | dsl_pool_undirty_space(dp, delta, zio->io_txg); | |
3588 | } | |
3589 | ||
34dc7c2f BB |
3590 | /* ARGSUSED */ |
3591 | static void | |
3592 | dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) | |
3593 | { | |
3594 | dmu_buf_impl_t *db = vdb; | |
428870ff | 3595 | blkptr_t *bp_orig = &zio->io_bp_orig; |
b0bc7a84 MG |
3596 | blkptr_t *bp = db->db_blkptr; |
3597 | objset_t *os = db->db_objset; | |
3598 | dmu_tx_t *tx = os->os_synctx; | |
34dc7c2f BB |
3599 | dbuf_dirty_record_t **drp, *dr; |
3600 | ||
c99c9001 | 3601 | ASSERT0(zio->io_error); |
428870ff BB |
3602 | ASSERT(db->db_blkptr == bp); |
3603 | ||
03c6040b GW |
3604 | /* |
3605 | * For nopwrites and rewrites we ensure that the bp matches our | |
3606 | * original and bypass all the accounting. | |
3607 | */ | |
3608 | if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { | |
428870ff BB |
3609 | ASSERT(BP_EQUAL(bp, bp_orig)); |
3610 | } else { | |
b0bc7a84 | 3611 | dsl_dataset_t *ds = os->os_dsl_dataset; |
428870ff BB |
3612 | (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); |
3613 | dsl_dataset_block_born(ds, bp, tx); | |
3614 | } | |
34dc7c2f BB |
3615 | |
3616 | mutex_enter(&db->db_mtx); | |
3617 | ||
428870ff BB |
3618 | DBUF_VERIFY(db); |
3619 | ||
34dc7c2f BB |
3620 | drp = &db->db_last_dirty; |
3621 | while ((dr = *drp) != db->db_data_pending) | |
3622 | drp = &dr->dr_next; | |
3623 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
428870ff | 3624 | ASSERT(dr->dr_dbuf == db); |
34dc7c2f BB |
3625 | ASSERT(dr->dr_next == NULL); |
3626 | *drp = dr->dr_next; | |
3627 | ||
428870ff BB |
3628 | #ifdef ZFS_DEBUG |
3629 | if (db->db_blkid == DMU_SPILL_BLKID) { | |
572e2857 BB |
3630 | dnode_t *dn; |
3631 | ||
3632 | DB_DNODE_ENTER(db); | |
3633 | dn = DB_DNODE(db); | |
428870ff BB |
3634 | ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); |
3635 | ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && | |
50c957f7 | 3636 | db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); |
572e2857 | 3637 | DB_DNODE_EXIT(db); |
428870ff BB |
3638 | } |
3639 | #endif | |
3640 | ||
34dc7c2f | 3641 | if (db->db_level == 0) { |
428870ff | 3642 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f | 3643 | ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); |
b128c09f BB |
3644 | if (db->db_state != DB_NOFILL) { |
3645 | if (dr->dt.dl.dr_data != db->db_buf) | |
d3c2ae1c | 3646 | arc_buf_destroy(dr->dt.dl.dr_data, db); |
b128c09f | 3647 | } |
34dc7c2f | 3648 | } else { |
572e2857 BB |
3649 | dnode_t *dn; |
3650 | ||
3651 | DB_DNODE_ENTER(db); | |
3652 | dn = DB_DNODE(db); | |
34dc7c2f | 3653 | ASSERT(list_head(&dr->dt.di.dr_children) == NULL); |
b0bc7a84 | 3654 | ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); |
34dc7c2f | 3655 | if (!BP_IS_HOLE(db->db_blkptr)) { |
1fde1e37 BB |
3656 | ASSERTV(int epbs = dn->dn_phys->dn_indblkshift - |
3657 | SPA_BLKPTRSHIFT); | |
b0bc7a84 MG |
3658 | ASSERT3U(db->db_blkid, <=, |
3659 | dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); | |
34dc7c2f BB |
3660 | ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, |
3661 | db->db.db_size); | |
34dc7c2f | 3662 | } |
572e2857 | 3663 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
3664 | mutex_destroy(&dr->dt.di.dr_mtx); |
3665 | list_destroy(&dr->dt.di.dr_children); | |
3666 | } | |
3667 | kmem_free(dr, sizeof (dbuf_dirty_record_t)); | |
3668 | ||
3669 | cv_broadcast(&db->db_changed); | |
3670 | ASSERT(db->db_dirtycnt > 0); | |
3671 | db->db_dirtycnt -= 1; | |
3672 | db->db_data_pending = NULL; | |
b0bc7a84 | 3673 | dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg); |
428870ff BB |
3674 | } |
3675 | ||
3676 | static void | |
3677 | dbuf_write_nofill_ready(zio_t *zio) | |
3678 | { | |
3679 | dbuf_write_ready(zio, NULL, zio->io_private); | |
3680 | } | |
3681 | ||
3682 | static void | |
3683 | dbuf_write_nofill_done(zio_t *zio) | |
3684 | { | |
3685 | dbuf_write_done(zio, NULL, zio->io_private); | |
3686 | } | |
3687 | ||
3688 | static void | |
3689 | dbuf_write_override_ready(zio_t *zio) | |
3690 | { | |
3691 | dbuf_dirty_record_t *dr = zio->io_private; | |
3692 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
3693 | ||
3694 | dbuf_write_ready(zio, NULL, db); | |
3695 | } | |
3696 | ||
3697 | static void | |
3698 | dbuf_write_override_done(zio_t *zio) | |
3699 | { | |
3700 | dbuf_dirty_record_t *dr = zio->io_private; | |
3701 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
3702 | blkptr_t *obp = &dr->dt.dl.dr_overridden_by; | |
3703 | ||
3704 | mutex_enter(&db->db_mtx); | |
3705 | if (!BP_EQUAL(zio->io_bp, obp)) { | |
3706 | if (!BP_IS_HOLE(obp)) | |
3707 | dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); | |
3708 | arc_release(dr->dt.dl.dr_data, db); | |
3709 | } | |
34dc7c2f BB |
3710 | mutex_exit(&db->db_mtx); |
3711 | ||
428870ff | 3712 | dbuf_write_done(zio, NULL, db); |
a6255b7f DQ |
3713 | |
3714 | if (zio->io_abd != NULL) | |
3715 | abd_put(zio->io_abd); | |
428870ff BB |
3716 | } |
3717 | ||
e49f1e20 | 3718 | /* Issue I/O to commit a dirty buffer to disk. */ |
428870ff BB |
3719 | static void |
3720 | dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) | |
3721 | { | |
3722 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
572e2857 BB |
3723 | dnode_t *dn; |
3724 | objset_t *os; | |
428870ff BB |
3725 | dmu_buf_impl_t *parent = db->db_parent; |
3726 | uint64_t txg = tx->tx_txg; | |
5dbd68a3 | 3727 | zbookmark_phys_t zb; |
428870ff BB |
3728 | zio_prop_t zp; |
3729 | zio_t *zio; | |
3730 | int wp_flag = 0; | |
34dc7c2f | 3731 | |
463a8cfe AR |
3732 | ASSERT(dmu_tx_is_syncing(tx)); |
3733 | ||
572e2857 BB |
3734 | DB_DNODE_ENTER(db); |
3735 | dn = DB_DNODE(db); | |
3736 | os = dn->dn_objset; | |
3737 | ||
428870ff BB |
3738 | if (db->db_state != DB_NOFILL) { |
3739 | if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { | |
3740 | /* | |
3741 | * Private object buffers are released here rather | |
3742 | * than in dbuf_dirty() since they are only modified | |
3743 | * in the syncing context and we don't want the | |
3744 | * overhead of making multiple copies of the data. | |
3745 | */ | |
3746 | if (BP_IS_HOLE(db->db_blkptr)) { | |
3747 | arc_buf_thaw(data); | |
3748 | } else { | |
3749 | dbuf_release_bp(db); | |
3750 | } | |
3751 | } | |
3752 | } | |
3753 | ||
3754 | if (parent != dn->dn_dbuf) { | |
e49f1e20 WA |
3755 | /* Our parent is an indirect block. */ |
3756 | /* We have a dirty parent that has been scheduled for write. */ | |
428870ff | 3757 | ASSERT(parent && parent->db_data_pending); |
e49f1e20 | 3758 | /* Our parent's buffer is one level closer to the dnode. */ |
428870ff | 3759 | ASSERT(db->db_level == parent->db_level-1); |
e49f1e20 WA |
3760 | /* |
3761 | * We're about to modify our parent's db_data by modifying | |
3762 | * our block pointer, so the parent must be released. | |
3763 | */ | |
428870ff BB |
3764 | ASSERT(arc_released(parent->db_buf)); |
3765 | zio = parent->db_data_pending->dr_zio; | |
3766 | } else { | |
e49f1e20 | 3767 | /* Our parent is the dnode itself. */ |
428870ff BB |
3768 | ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && |
3769 | db->db_blkid != DMU_SPILL_BLKID) || | |
3770 | (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); | |
3771 | if (db->db_blkid != DMU_SPILL_BLKID) | |
3772 | ASSERT3P(db->db_blkptr, ==, | |
3773 | &dn->dn_phys->dn_blkptr[db->db_blkid]); | |
3774 | zio = dn->dn_zio; | |
3775 | } | |
3776 | ||
3777 | ASSERT(db->db_level == 0 || data == db->db_buf); | |
3778 | ASSERT3U(db->db_blkptr->blk_birth, <=, txg); | |
3779 | ASSERT(zio); | |
3780 | ||
3781 | SET_BOOKMARK(&zb, os->os_dsl_dataset ? | |
3782 | os->os_dsl_dataset->ds_object : DMU_META_OBJSET, | |
3783 | db->db.db_object, db->db_level, db->db_blkid); | |
3784 | ||
3785 | if (db->db_blkid == DMU_SPILL_BLKID) | |
3786 | wp_flag = WP_SPILL; | |
3787 | wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; | |
3788 | ||
2aa34383 DK |
3789 | dmu_write_policy(os, dn, db->db_level, wp_flag, |
3790 | (data != NULL && arc_get_compression(data) != ZIO_COMPRESS_OFF) ? | |
3791 | arc_get_compression(data) : ZIO_COMPRESS_INHERIT, &zp); | |
572e2857 | 3792 | DB_DNODE_EXIT(db); |
428870ff | 3793 | |
463a8cfe AR |
3794 | /* |
3795 | * We copy the blkptr now (rather than when we instantiate the dirty | |
3796 | * record), because its value can change between open context and | |
3797 | * syncing context. We do not need to hold dn_struct_rwlock to read | |
3798 | * db_blkptr because we are in syncing context. | |
3799 | */ | |
3800 | dr->dr_bp_copy = *db->db_blkptr; | |
3801 | ||
9b67f605 MA |
3802 | if (db->db_level == 0 && |
3803 | dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { | |
3804 | /* | |
3805 | * The BP for this block has been provided by open context | |
3806 | * (by dmu_sync() or dmu_buf_write_embedded()). | |
3807 | */ | |
a6255b7f DQ |
3808 | abd_t *contents = (data != NULL) ? |
3809 | abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; | |
9b67f605 | 3810 | |
428870ff | 3811 | dr->dr_zio = zio_write(zio, os->os_spa, txg, |
2aa34383 DK |
3812 | &dr->dr_bp_copy, contents, db->db.db_size, db->db.db_size, |
3813 | &zp, dbuf_write_override_ready, NULL, NULL, | |
bc77ba73 | 3814 | dbuf_write_override_done, |
e8b96c60 | 3815 | dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); |
428870ff BB |
3816 | mutex_enter(&db->db_mtx); |
3817 | dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; | |
3818 | zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, | |
03c6040b | 3819 | dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); |
428870ff BB |
3820 | mutex_exit(&db->db_mtx); |
3821 | } else if (db->db_state == DB_NOFILL) { | |
3c67d83a TH |
3822 | ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || |
3823 | zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); | |
428870ff | 3824 | dr->dr_zio = zio_write(zio, os->os_spa, txg, |
2aa34383 | 3825 | &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, |
bc77ba73 PD |
3826 | dbuf_write_nofill_ready, NULL, NULL, |
3827 | dbuf_write_nofill_done, db, | |
428870ff BB |
3828 | ZIO_PRIORITY_ASYNC_WRITE, |
3829 | ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); | |
3830 | } else { | |
bc77ba73 | 3831 | arc_done_func_t *children_ready_cb = NULL; |
428870ff | 3832 | ASSERT(arc_released(data)); |
bc77ba73 PD |
3833 | |
3834 | /* | |
3835 | * For indirect blocks, we want to setup the children | |
3836 | * ready callback so that we can properly handle an indirect | |
3837 | * block that only contains holes. | |
3838 | */ | |
3839 | if (db->db_level != 0) | |
3840 | children_ready_cb = dbuf_write_children_ready; | |
3841 | ||
428870ff | 3842 | dr->dr_zio = arc_write(zio, os->os_spa, txg, |
463a8cfe | 3843 | &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), |
d3c2ae1c GW |
3844 | &zp, dbuf_write_ready, |
3845 | children_ready_cb, dbuf_write_physdone, | |
3846 | dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, | |
3847 | ZIO_FLAG_MUSTSUCCEED, &zb); | |
428870ff | 3848 | } |
34dc7c2f | 3849 | } |
c28b2279 BB |
3850 | |
3851 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
8f576c23 BB |
3852 | EXPORT_SYMBOL(dbuf_find); |
3853 | EXPORT_SYMBOL(dbuf_is_metadata); | |
d3c2ae1c | 3854 | EXPORT_SYMBOL(dbuf_destroy); |
8f576c23 BB |
3855 | EXPORT_SYMBOL(dbuf_loan_arcbuf); |
3856 | EXPORT_SYMBOL(dbuf_whichblock); | |
3857 | EXPORT_SYMBOL(dbuf_read); | |
3858 | EXPORT_SYMBOL(dbuf_unoverride); | |
3859 | EXPORT_SYMBOL(dbuf_free_range); | |
3860 | EXPORT_SYMBOL(dbuf_new_size); | |
3861 | EXPORT_SYMBOL(dbuf_release_bp); | |
3862 | EXPORT_SYMBOL(dbuf_dirty); | |
c28b2279 | 3863 | EXPORT_SYMBOL(dmu_buf_will_dirty); |
8f576c23 BB |
3864 | EXPORT_SYMBOL(dmu_buf_will_not_fill); |
3865 | EXPORT_SYMBOL(dmu_buf_will_fill); | |
3866 | EXPORT_SYMBOL(dmu_buf_fill_done); | |
4047414a | 3867 | EXPORT_SYMBOL(dmu_buf_rele); |
8f576c23 | 3868 | EXPORT_SYMBOL(dbuf_assign_arcbuf); |
8f576c23 BB |
3869 | EXPORT_SYMBOL(dbuf_prefetch); |
3870 | EXPORT_SYMBOL(dbuf_hold_impl); | |
3871 | EXPORT_SYMBOL(dbuf_hold); | |
3872 | EXPORT_SYMBOL(dbuf_hold_level); | |
3873 | EXPORT_SYMBOL(dbuf_create_bonus); | |
3874 | EXPORT_SYMBOL(dbuf_spill_set_blksz); | |
3875 | EXPORT_SYMBOL(dbuf_rm_spill); | |
3876 | EXPORT_SYMBOL(dbuf_add_ref); | |
3877 | EXPORT_SYMBOL(dbuf_rele); | |
3878 | EXPORT_SYMBOL(dbuf_rele_and_unlock); | |
3879 | EXPORT_SYMBOL(dbuf_refcount); | |
3880 | EXPORT_SYMBOL(dbuf_sync_list); | |
3881 | EXPORT_SYMBOL(dmu_buf_set_user); | |
3882 | EXPORT_SYMBOL(dmu_buf_set_user_ie); | |
8f576c23 BB |
3883 | EXPORT_SYMBOL(dmu_buf_get_user); |
3884 | EXPORT_SYMBOL(dmu_buf_freeable); | |
0f699108 | 3885 | EXPORT_SYMBOL(dmu_buf_get_blkptr); |
d3c2ae1c GW |
3886 | |
3887 | ||
3888 | module_param(dbuf_cache_max_bytes, ulong, 0644); | |
3889 | MODULE_PARM_DESC(dbuf_cache_max_bytes, | |
3890 | "Maximum size in bytes of the dbuf cache."); | |
3891 | ||
3892 | module_param(dbuf_cache_hiwater_pct, uint, 0644); | |
3893 | MODULE_PARM_DESC(dbuf_cache_hiwater_pct, | |
3894 | "Percentage over dbuf_cache_max_bytes when dbufs \ | |
3895 | much be evicted directly."); | |
3896 | ||
3897 | module_param(dbuf_cache_lowater_pct, uint, 0644); | |
3898 | MODULE_PARM_DESC(dbuf_cache_lowater_pct, | |
3899 | "Percentage below dbuf_cache_max_bytes \ | |
3900 | when the evict thread stop evicting dbufs."); | |
3901 | ||
3902 | module_param(dbuf_cache_max_shift, int, 0644); | |
3903 | MODULE_PARM_DESC(dbuf_cache_max_shift, | |
3904 | "Cap the size of the dbuf cache to log2 fraction of arc size."); | |
3905 | ||
c28b2279 | 3906 | #endif |