]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. | |
23 | * Copyright 2011 Nexenta Systems, Inc. All rights reserved. | |
24 | * Copyright (c) 2012, 2017 by Delphix. All rights reserved. | |
25 | * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. | |
26 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. | |
27 | */ | |
28 | ||
29 | #include <sys/zfs_context.h> | |
30 | #include <sys/arc.h> | |
31 | #include <sys/dmu.h> | |
32 | #include <sys/dmu_send.h> | |
33 | #include <sys/dmu_impl.h> | |
34 | #include <sys/dbuf.h> | |
35 | #include <sys/dmu_objset.h> | |
36 | #include <sys/dsl_dataset.h> | |
37 | #include <sys/dsl_dir.h> | |
38 | #include <sys/dmu_tx.h> | |
39 | #include <sys/spa.h> | |
40 | #include <sys/zio.h> | |
41 | #include <sys/dmu_zfetch.h> | |
42 | #include <sys/sa.h> | |
43 | #include <sys/sa_impl.h> | |
44 | #include <sys/zfeature.h> | |
45 | #include <sys/blkptr.h> | |
46 | #include <sys/range_tree.h> | |
47 | #include <sys/trace_dbuf.h> | |
48 | #include <sys/callb.h> | |
49 | #include <sys/abd.h> | |
50 | ||
51 | kstat_t *dbuf_ksp; | |
52 | ||
53 | typedef struct dbuf_stats { | |
54 | /* | |
55 | * Various statistics about the size of the dbuf cache. | |
56 | */ | |
57 | kstat_named_t cache_count; | |
58 | kstat_named_t cache_size_bytes; | |
59 | kstat_named_t cache_size_bytes_max; | |
60 | /* | |
61 | * Statistics regarding the bounds on the dbuf cache size. | |
62 | */ | |
63 | kstat_named_t cache_target_bytes; | |
64 | kstat_named_t cache_lowater_bytes; | |
65 | kstat_named_t cache_hiwater_bytes; | |
66 | /* | |
67 | * Total number of dbuf cache evictions that have occurred. | |
68 | */ | |
69 | kstat_named_t cache_total_evicts; | |
70 | /* | |
71 | * The distribution of dbuf levels in the dbuf cache and | |
72 | * the total size of all dbufs at each level. | |
73 | */ | |
74 | kstat_named_t cache_levels[DN_MAX_LEVELS]; | |
75 | kstat_named_t cache_levels_bytes[DN_MAX_LEVELS]; | |
76 | /* | |
77 | * Statistics about the dbuf hash table. | |
78 | */ | |
79 | kstat_named_t hash_hits; | |
80 | kstat_named_t hash_misses; | |
81 | kstat_named_t hash_collisions; | |
82 | kstat_named_t hash_elements; | |
83 | kstat_named_t hash_elements_max; | |
84 | /* | |
85 | * Number of sublists containing more than one dbuf in the dbuf | |
86 | * hash table. Keep track of the longest hash chain. | |
87 | */ | |
88 | kstat_named_t hash_chains; | |
89 | kstat_named_t hash_chain_max; | |
90 | /* | |
91 | * Number of times a dbuf_create() discovers that a dbuf was | |
92 | * already created and in the dbuf hash table. | |
93 | */ | |
94 | kstat_named_t hash_insert_race; | |
95 | } dbuf_stats_t; | |
96 | ||
97 | dbuf_stats_t dbuf_stats = { | |
98 | { "cache_count", KSTAT_DATA_UINT64 }, | |
99 | { "cache_size_bytes", KSTAT_DATA_UINT64 }, | |
100 | { "cache_size_bytes_max", KSTAT_DATA_UINT64 }, | |
101 | { "cache_target_bytes", KSTAT_DATA_UINT64 }, | |
102 | { "cache_lowater_bytes", KSTAT_DATA_UINT64 }, | |
103 | { "cache_hiwater_bytes", KSTAT_DATA_UINT64 }, | |
104 | { "cache_total_evicts", KSTAT_DATA_UINT64 }, | |
105 | { { "cache_levels_N", KSTAT_DATA_UINT64 } }, | |
106 | { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } }, | |
107 | { "hash_hits", KSTAT_DATA_UINT64 }, | |
108 | { "hash_misses", KSTAT_DATA_UINT64 }, | |
109 | { "hash_collisions", KSTAT_DATA_UINT64 }, | |
110 | { "hash_elements", KSTAT_DATA_UINT64 }, | |
111 | { "hash_elements_max", KSTAT_DATA_UINT64 }, | |
112 | { "hash_chains", KSTAT_DATA_UINT64 }, | |
113 | { "hash_chain_max", KSTAT_DATA_UINT64 }, | |
114 | { "hash_insert_race", KSTAT_DATA_UINT64 } | |
115 | }; | |
116 | ||
117 | #define DBUF_STAT_INCR(stat, val) \ | |
118 | atomic_add_64(&dbuf_stats.stat.value.ui64, (val)); | |
119 | #define DBUF_STAT_DECR(stat, val) \ | |
120 | DBUF_STAT_INCR(stat, -(val)); | |
121 | #define DBUF_STAT_BUMP(stat) \ | |
122 | DBUF_STAT_INCR(stat, 1); | |
123 | #define DBUF_STAT_BUMPDOWN(stat) \ | |
124 | DBUF_STAT_INCR(stat, -1); | |
125 | #define DBUF_STAT_MAX(stat, v) { \ | |
126 | uint64_t _m; \ | |
127 | while ((v) > (_m = dbuf_stats.stat.value.ui64) && \ | |
128 | (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\ | |
129 | continue; \ | |
130 | } | |
131 | ||
132 | struct dbuf_hold_impl_data { | |
133 | /* Function arguments */ | |
134 | dnode_t *dh_dn; | |
135 | uint8_t dh_level; | |
136 | uint64_t dh_blkid; | |
137 | boolean_t dh_fail_sparse; | |
138 | boolean_t dh_fail_uncached; | |
139 | void *dh_tag; | |
140 | dmu_buf_impl_t **dh_dbp; | |
141 | /* Local variables */ | |
142 | dmu_buf_impl_t *dh_db; | |
143 | dmu_buf_impl_t *dh_parent; | |
144 | blkptr_t *dh_bp; | |
145 | int dh_err; | |
146 | dbuf_dirty_record_t *dh_dr; | |
147 | int dh_depth; | |
148 | }; | |
149 | ||
150 | static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh, | |
151 | dnode_t *dn, uint8_t level, uint64_t blkid, boolean_t fail_sparse, | |
152 | boolean_t fail_uncached, | |
153 | void *tag, dmu_buf_impl_t **dbp, int depth); | |
154 | static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh); | |
155 | ||
156 | uint_t zfs_dbuf_evict_key; | |
157 | ||
158 | static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); | |
159 | static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); | |
160 | ||
161 | extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, | |
162 | dmu_buf_evict_func_t *evict_func_sync, | |
163 | dmu_buf_evict_func_t *evict_func_async, | |
164 | dmu_buf_t **clear_on_evict_dbufp); | |
165 | ||
166 | /* | |
167 | * Global data structures and functions for the dbuf cache. | |
168 | */ | |
169 | static kmem_cache_t *dbuf_kmem_cache; | |
170 | static taskq_t *dbu_evict_taskq; | |
171 | ||
172 | static kthread_t *dbuf_cache_evict_thread; | |
173 | static kmutex_t dbuf_evict_lock; | |
174 | static kcondvar_t dbuf_evict_cv; | |
175 | static boolean_t dbuf_evict_thread_exit; | |
176 | ||
177 | /* | |
178 | * LRU cache of dbufs. The dbuf cache maintains a list of dbufs that | |
179 | * are not currently held but have been recently released. These dbufs | |
180 | * are not eligible for arc eviction until they are aged out of the cache. | |
181 | * Dbufs are added to the dbuf cache once the last hold is released. If a | |
182 | * dbuf is later accessed and still exists in the dbuf cache, then it will | |
183 | * be removed from the cache and later re-added to the head of the cache. | |
184 | * Dbufs that are aged out of the cache will be immediately destroyed and | |
185 | * become eligible for arc eviction. | |
186 | */ | |
187 | static multilist_t *dbuf_cache; | |
188 | static refcount_t dbuf_cache_size; | |
189 | unsigned long dbuf_cache_max_bytes = 0; | |
190 | ||
191 | /* Set the default size of the dbuf cache to log2 fraction of arc size. */ | |
192 | int dbuf_cache_shift = 5; | |
193 | ||
194 | /* | |
195 | * The dbuf cache uses a three-stage eviction policy: | |
196 | * - A low water marker designates when the dbuf eviction thread | |
197 | * should stop evicting from the dbuf cache. | |
198 | * - When we reach the maximum size (aka mid water mark), we | |
199 | * signal the eviction thread to run. | |
200 | * - The high water mark indicates when the eviction thread | |
201 | * is unable to keep up with the incoming load and eviction must | |
202 | * happen in the context of the calling thread. | |
203 | * | |
204 | * The dbuf cache: | |
205 | * (max size) | |
206 | * low water mid water hi water | |
207 | * +----------------------------------------+----------+----------+ | |
208 | * | | | | | |
209 | * | | | | | |
210 | * | | | | | |
211 | * | | | | | |
212 | * +----------------------------------------+----------+----------+ | |
213 | * stop signal evict | |
214 | * evicting eviction directly | |
215 | * thread | |
216 | * | |
217 | * The high and low water marks indicate the operating range for the eviction | |
218 | * thread. The low water mark is, by default, 90% of the total size of the | |
219 | * cache and the high water mark is at 110% (both of these percentages can be | |
220 | * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, | |
221 | * respectively). The eviction thread will try to ensure that the cache remains | |
222 | * within this range by waking up every second and checking if the cache is | |
223 | * above the low water mark. The thread can also be woken up by callers adding | |
224 | * elements into the cache if the cache is larger than the mid water (i.e max | |
225 | * cache size). Once the eviction thread is woken up and eviction is required, | |
226 | * it will continue evicting buffers until it's able to reduce the cache size | |
227 | * to the low water mark. If the cache size continues to grow and hits the high | |
228 | * water mark, then callers adding elements to the cache will begin to evict | |
229 | * directly from the cache until the cache is no longer above the high water | |
230 | * mark. | |
231 | */ | |
232 | ||
233 | /* | |
234 | * The percentage above and below the maximum cache size. | |
235 | */ | |
236 | uint_t dbuf_cache_hiwater_pct = 10; | |
237 | uint_t dbuf_cache_lowater_pct = 10; | |
238 | ||
239 | /* ARGSUSED */ | |
240 | static int | |
241 | dbuf_cons(void *vdb, void *unused, int kmflag) | |
242 | { | |
243 | dmu_buf_impl_t *db = vdb; | |
244 | bzero(db, sizeof (dmu_buf_impl_t)); | |
245 | ||
246 | mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); | |
247 | cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); | |
248 | multilist_link_init(&db->db_cache_link); | |
249 | refcount_create(&db->db_holds); | |
250 | ||
251 | return (0); | |
252 | } | |
253 | ||
254 | /* ARGSUSED */ | |
255 | static void | |
256 | dbuf_dest(void *vdb, void *unused) | |
257 | { | |
258 | dmu_buf_impl_t *db = vdb; | |
259 | mutex_destroy(&db->db_mtx); | |
260 | cv_destroy(&db->db_changed); | |
261 | ASSERT(!multilist_link_active(&db->db_cache_link)); | |
262 | refcount_destroy(&db->db_holds); | |
263 | } | |
264 | ||
265 | /* | |
266 | * dbuf hash table routines | |
267 | */ | |
268 | static dbuf_hash_table_t dbuf_hash_table; | |
269 | ||
270 | static uint64_t dbuf_hash_count; | |
271 | ||
272 | static uint64_t | |
273 | dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) | |
274 | { | |
275 | uintptr_t osv = (uintptr_t)os; | |
276 | uint64_t crc = -1ULL; | |
277 | ||
278 | ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); | |
279 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; | |
280 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; | |
281 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; | |
282 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; | |
283 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; | |
284 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; | |
285 | ||
286 | crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); | |
287 | ||
288 | return (crc); | |
289 | } | |
290 | ||
291 | #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ | |
292 | ((dbuf)->db.db_object == (obj) && \ | |
293 | (dbuf)->db_objset == (os) && \ | |
294 | (dbuf)->db_level == (level) && \ | |
295 | (dbuf)->db_blkid == (blkid)) | |
296 | ||
297 | dmu_buf_impl_t * | |
298 | dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) | |
299 | { | |
300 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
301 | uint64_t hv; | |
302 | uint64_t idx; | |
303 | dmu_buf_impl_t *db; | |
304 | ||
305 | hv = dbuf_hash(os, obj, level, blkid); | |
306 | idx = hv & h->hash_table_mask; | |
307 | ||
308 | mutex_enter(DBUF_HASH_MUTEX(h, idx)); | |
309 | for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { | |
310 | if (DBUF_EQUAL(db, os, obj, level, blkid)) { | |
311 | mutex_enter(&db->db_mtx); | |
312 | if (db->db_state != DB_EVICTING) { | |
313 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
314 | return (db); | |
315 | } | |
316 | mutex_exit(&db->db_mtx); | |
317 | } | |
318 | } | |
319 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
320 | return (NULL); | |
321 | } | |
322 | ||
323 | static dmu_buf_impl_t * | |
324 | dbuf_find_bonus(objset_t *os, uint64_t object) | |
325 | { | |
326 | dnode_t *dn; | |
327 | dmu_buf_impl_t *db = NULL; | |
328 | ||
329 | if (dnode_hold(os, object, FTAG, &dn) == 0) { | |
330 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
331 | if (dn->dn_bonus != NULL) { | |
332 | db = dn->dn_bonus; | |
333 | mutex_enter(&db->db_mtx); | |
334 | } | |
335 | rw_exit(&dn->dn_struct_rwlock); | |
336 | dnode_rele(dn, FTAG); | |
337 | } | |
338 | return (db); | |
339 | } | |
340 | ||
341 | /* | |
342 | * Insert an entry into the hash table. If there is already an element | |
343 | * equal to elem in the hash table, then the already existing element | |
344 | * will be returned and the new element will not be inserted. | |
345 | * Otherwise returns NULL. | |
346 | */ | |
347 | static dmu_buf_impl_t * | |
348 | dbuf_hash_insert(dmu_buf_impl_t *db) | |
349 | { | |
350 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
351 | objset_t *os = db->db_objset; | |
352 | uint64_t obj = db->db.db_object; | |
353 | int level = db->db_level; | |
354 | uint64_t blkid, hv, idx; | |
355 | dmu_buf_impl_t *dbf; | |
356 | uint32_t i; | |
357 | ||
358 | blkid = db->db_blkid; | |
359 | hv = dbuf_hash(os, obj, level, blkid); | |
360 | idx = hv & h->hash_table_mask; | |
361 | ||
362 | mutex_enter(DBUF_HASH_MUTEX(h, idx)); | |
363 | for (dbf = h->hash_table[idx], i = 0; dbf != NULL; | |
364 | dbf = dbf->db_hash_next, i++) { | |
365 | if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { | |
366 | mutex_enter(&dbf->db_mtx); | |
367 | if (dbf->db_state != DB_EVICTING) { | |
368 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
369 | return (dbf); | |
370 | } | |
371 | mutex_exit(&dbf->db_mtx); | |
372 | } | |
373 | } | |
374 | ||
375 | if (i > 0) { | |
376 | DBUF_STAT_BUMP(hash_collisions); | |
377 | if (i == 1) | |
378 | DBUF_STAT_BUMP(hash_chains); | |
379 | ||
380 | DBUF_STAT_MAX(hash_chain_max, i); | |
381 | } | |
382 | ||
383 | mutex_enter(&db->db_mtx); | |
384 | db->db_hash_next = h->hash_table[idx]; | |
385 | h->hash_table[idx] = db; | |
386 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
387 | atomic_inc_64(&dbuf_hash_count); | |
388 | DBUF_STAT_MAX(hash_elements_max, dbuf_hash_count); | |
389 | ||
390 | return (NULL); | |
391 | } | |
392 | ||
393 | /* | |
394 | * Remove an entry from the hash table. It must be in the EVICTING state. | |
395 | */ | |
396 | static void | |
397 | dbuf_hash_remove(dmu_buf_impl_t *db) | |
398 | { | |
399 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
400 | uint64_t hv, idx; | |
401 | dmu_buf_impl_t *dbf, **dbp; | |
402 | ||
403 | hv = dbuf_hash(db->db_objset, db->db.db_object, | |
404 | db->db_level, db->db_blkid); | |
405 | idx = hv & h->hash_table_mask; | |
406 | ||
407 | /* | |
408 | * We mustn't hold db_mtx to maintain lock ordering: | |
409 | * DBUF_HASH_MUTEX > db_mtx. | |
410 | */ | |
411 | ASSERT(refcount_is_zero(&db->db_holds)); | |
412 | ASSERT(db->db_state == DB_EVICTING); | |
413 | ASSERT(!MUTEX_HELD(&db->db_mtx)); | |
414 | ||
415 | mutex_enter(DBUF_HASH_MUTEX(h, idx)); | |
416 | dbp = &h->hash_table[idx]; | |
417 | while ((dbf = *dbp) != db) { | |
418 | dbp = &dbf->db_hash_next; | |
419 | ASSERT(dbf != NULL); | |
420 | } | |
421 | *dbp = db->db_hash_next; | |
422 | db->db_hash_next = NULL; | |
423 | if (h->hash_table[idx] && | |
424 | h->hash_table[idx]->db_hash_next == NULL) | |
425 | DBUF_STAT_BUMPDOWN(hash_chains); | |
426 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); | |
427 | atomic_dec_64(&dbuf_hash_count); | |
428 | } | |
429 | ||
430 | typedef enum { | |
431 | DBVU_EVICTING, | |
432 | DBVU_NOT_EVICTING | |
433 | } dbvu_verify_type_t; | |
434 | ||
435 | static void | |
436 | dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) | |
437 | { | |
438 | #ifdef ZFS_DEBUG | |
439 | int64_t holds; | |
440 | ||
441 | if (db->db_user == NULL) | |
442 | return; | |
443 | ||
444 | /* Only data blocks support the attachment of user data. */ | |
445 | ASSERT(db->db_level == 0); | |
446 | ||
447 | /* Clients must resolve a dbuf before attaching user data. */ | |
448 | ASSERT(db->db.db_data != NULL); | |
449 | ASSERT3U(db->db_state, ==, DB_CACHED); | |
450 | ||
451 | holds = refcount_count(&db->db_holds); | |
452 | if (verify_type == DBVU_EVICTING) { | |
453 | /* | |
454 | * Immediate eviction occurs when holds == dirtycnt. | |
455 | * For normal eviction buffers, holds is zero on | |
456 | * eviction, except when dbuf_fix_old_data() calls | |
457 | * dbuf_clear_data(). However, the hold count can grow | |
458 | * during eviction even though db_mtx is held (see | |
459 | * dmu_bonus_hold() for an example), so we can only | |
460 | * test the generic invariant that holds >= dirtycnt. | |
461 | */ | |
462 | ASSERT3U(holds, >=, db->db_dirtycnt); | |
463 | } else { | |
464 | if (db->db_user_immediate_evict == TRUE) | |
465 | ASSERT3U(holds, >=, db->db_dirtycnt); | |
466 | else | |
467 | ASSERT3U(holds, >, 0); | |
468 | } | |
469 | #endif | |
470 | } | |
471 | ||
472 | static void | |
473 | dbuf_evict_user(dmu_buf_impl_t *db) | |
474 | { | |
475 | dmu_buf_user_t *dbu = db->db_user; | |
476 | ||
477 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
478 | ||
479 | if (dbu == NULL) | |
480 | return; | |
481 | ||
482 | dbuf_verify_user(db, DBVU_EVICTING); | |
483 | db->db_user = NULL; | |
484 | ||
485 | #ifdef ZFS_DEBUG | |
486 | if (dbu->dbu_clear_on_evict_dbufp != NULL) | |
487 | *dbu->dbu_clear_on_evict_dbufp = NULL; | |
488 | #endif | |
489 | ||
490 | /* | |
491 | * There are two eviction callbacks - one that we call synchronously | |
492 | * and one that we invoke via a taskq. The async one is useful for | |
493 | * avoiding lock order reversals and limiting stack depth. | |
494 | * | |
495 | * Note that if we have a sync callback but no async callback, | |
496 | * it's likely that the sync callback will free the structure | |
497 | * containing the dbu. In that case we need to take care to not | |
498 | * dereference dbu after calling the sync evict func. | |
499 | */ | |
500 | boolean_t has_async = (dbu->dbu_evict_func_async != NULL); | |
501 | ||
502 | if (dbu->dbu_evict_func_sync != NULL) | |
503 | dbu->dbu_evict_func_sync(dbu); | |
504 | ||
505 | if (has_async) { | |
506 | taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, | |
507 | dbu, 0, &dbu->dbu_tqent); | |
508 | } | |
509 | } | |
510 | ||
511 | boolean_t | |
512 | dbuf_is_metadata(dmu_buf_impl_t *db) | |
513 | { | |
514 | /* | |
515 | * Consider indirect blocks and spill blocks to be meta data. | |
516 | */ | |
517 | if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) { | |
518 | return (B_TRUE); | |
519 | } else { | |
520 | boolean_t is_metadata; | |
521 | ||
522 | DB_DNODE_ENTER(db); | |
523 | is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); | |
524 | DB_DNODE_EXIT(db); | |
525 | ||
526 | return (is_metadata); | |
527 | } | |
528 | } | |
529 | ||
530 | ||
531 | /* | |
532 | * This function *must* return indices evenly distributed between all | |
533 | * sublists of the multilist. This is needed due to how the dbuf eviction | |
534 | * code is laid out; dbuf_evict_thread() assumes dbufs are evenly | |
535 | * distributed between all sublists and uses this assumption when | |
536 | * deciding which sublist to evict from and how much to evict from it. | |
537 | */ | |
538 | unsigned int | |
539 | dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) | |
540 | { | |
541 | dmu_buf_impl_t *db = obj; | |
542 | ||
543 | /* | |
544 | * The assumption here, is the hash value for a given | |
545 | * dmu_buf_impl_t will remain constant throughout it's lifetime | |
546 | * (i.e. it's objset, object, level and blkid fields don't change). | |
547 | * Thus, we don't need to store the dbuf's sublist index | |
548 | * on insertion, as this index can be recalculated on removal. | |
549 | * | |
550 | * Also, the low order bits of the hash value are thought to be | |
551 | * distributed evenly. Otherwise, in the case that the multilist | |
552 | * has a power of two number of sublists, each sublists' usage | |
553 | * would not be evenly distributed. | |
554 | */ | |
555 | return (dbuf_hash(db->db_objset, db->db.db_object, | |
556 | db->db_level, db->db_blkid) % | |
557 | multilist_get_num_sublists(ml)); | |
558 | } | |
559 | ||
560 | static inline unsigned long | |
561 | dbuf_cache_target_bytes(void) | |
562 | { | |
563 | return MIN(dbuf_cache_max_bytes, | |
564 | arc_target_bytes() >> dbuf_cache_shift); | |
565 | } | |
566 | ||
567 | static inline uint64_t | |
568 | dbuf_cache_hiwater_bytes(void) | |
569 | { | |
570 | uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); | |
571 | return (dbuf_cache_target + | |
572 | (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100); | |
573 | } | |
574 | ||
575 | static inline uint64_t | |
576 | dbuf_cache_lowater_bytes(void) | |
577 | { | |
578 | uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); | |
579 | return (dbuf_cache_target - | |
580 | (dbuf_cache_target * dbuf_cache_lowater_pct) / 100); | |
581 | } | |
582 | ||
583 | static inline boolean_t | |
584 | dbuf_cache_above_hiwater(void) | |
585 | { | |
586 | return (refcount_count(&dbuf_cache_size) > dbuf_cache_hiwater_bytes()); | |
587 | } | |
588 | ||
589 | static inline boolean_t | |
590 | dbuf_cache_above_lowater(void) | |
591 | { | |
592 | return (refcount_count(&dbuf_cache_size) > dbuf_cache_lowater_bytes()); | |
593 | } | |
594 | ||
595 | /* | |
596 | * Evict the oldest eligible dbuf from the dbuf cache. | |
597 | */ | |
598 | static void | |
599 | dbuf_evict_one(void) | |
600 | { | |
601 | int idx = multilist_get_random_index(dbuf_cache); | |
602 | multilist_sublist_t *mls = multilist_sublist_lock(dbuf_cache, idx); | |
603 | ||
604 | ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); | |
605 | ||
606 | /* | |
607 | * Set the thread's tsd to indicate that it's processing evictions. | |
608 | * Once a thread stops evicting from the dbuf cache it will | |
609 | * reset its tsd to NULL. | |
610 | */ | |
611 | ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL); | |
612 | (void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE); | |
613 | ||
614 | dmu_buf_impl_t *db = multilist_sublist_tail(mls); | |
615 | while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { | |
616 | db = multilist_sublist_prev(mls, db); | |
617 | } | |
618 | ||
619 | DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, | |
620 | multilist_sublist_t *, mls); | |
621 | ||
622 | if (db != NULL) { | |
623 | multilist_sublist_remove(mls, db); | |
624 | multilist_sublist_unlock(mls); | |
625 | (void) refcount_remove_many(&dbuf_cache_size, | |
626 | db->db.db_size, db); | |
627 | DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); | |
628 | DBUF_STAT_BUMPDOWN(cache_count); | |
629 | DBUF_STAT_DECR(cache_levels_bytes[db->db_level], | |
630 | db->db.db_size); | |
631 | dbuf_destroy(db); | |
632 | DBUF_STAT_MAX(cache_size_bytes_max, | |
633 | refcount_count(&dbuf_cache_size)); | |
634 | DBUF_STAT_BUMP(cache_total_evicts); | |
635 | } else { | |
636 | multilist_sublist_unlock(mls); | |
637 | } | |
638 | (void) tsd_set(zfs_dbuf_evict_key, NULL); | |
639 | } | |
640 | ||
641 | /* | |
642 | * The dbuf evict thread is responsible for aging out dbufs from the | |
643 | * cache. Once the cache has reached it's maximum size, dbufs are removed | |
644 | * and destroyed. The eviction thread will continue running until the size | |
645 | * of the dbuf cache is at or below the maximum size. Once the dbuf is aged | |
646 | * out of the cache it is destroyed and becomes eligible for arc eviction. | |
647 | */ | |
648 | /* ARGSUSED */ | |
649 | static void | |
650 | dbuf_evict_thread(void *unused) | |
651 | { | |
652 | callb_cpr_t cpr; | |
653 | ||
654 | CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); | |
655 | ||
656 | mutex_enter(&dbuf_evict_lock); | |
657 | while (!dbuf_evict_thread_exit) { | |
658 | while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { | |
659 | CALLB_CPR_SAFE_BEGIN(&cpr); | |
660 | (void) cv_timedwait_sig_hires(&dbuf_evict_cv, | |
661 | &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); | |
662 | CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); | |
663 | } | |
664 | mutex_exit(&dbuf_evict_lock); | |
665 | ||
666 | /* | |
667 | * Keep evicting as long as we're above the low water mark | |
668 | * for the cache. We do this without holding the locks to | |
669 | * minimize lock contention. | |
670 | */ | |
671 | while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { | |
672 | dbuf_evict_one(); | |
673 | } | |
674 | ||
675 | mutex_enter(&dbuf_evict_lock); | |
676 | } | |
677 | ||
678 | dbuf_evict_thread_exit = B_FALSE; | |
679 | cv_broadcast(&dbuf_evict_cv); | |
680 | CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ | |
681 | thread_exit(); | |
682 | } | |
683 | ||
684 | /* | |
685 | * Wake up the dbuf eviction thread if the dbuf cache is at its max size. | |
686 | * If the dbuf cache is at its high water mark, then evict a dbuf from the | |
687 | * dbuf cache using the callers context. | |
688 | */ | |
689 | static void | |
690 | dbuf_evict_notify(void) | |
691 | { | |
692 | ||
693 | /* | |
694 | * We use thread specific data to track when a thread has | |
695 | * started processing evictions. This allows us to avoid deeply | |
696 | * nested stacks that would have a call flow similar to this: | |
697 | * | |
698 | * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() | |
699 | * ^ | | |
700 | * | | | |
701 | * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ | |
702 | * | |
703 | * The dbuf_eviction_thread will always have its tsd set until | |
704 | * that thread exits. All other threads will only set their tsd | |
705 | * if they are participating in the eviction process. This only | |
706 | * happens if the eviction thread is unable to process evictions | |
707 | * fast enough. To keep the dbuf cache size in check, other threads | |
708 | * can evict from the dbuf cache directly. Those threads will set | |
709 | * their tsd values so that we ensure that they only evict one dbuf | |
710 | * from the dbuf cache. | |
711 | */ | |
712 | if (tsd_get(zfs_dbuf_evict_key) != NULL) | |
713 | return; | |
714 | ||
715 | /* | |
716 | * We check if we should evict without holding the dbuf_evict_lock, | |
717 | * because it's OK to occasionally make the wrong decision here, | |
718 | * and grabbing the lock results in massive lock contention. | |
719 | */ | |
720 | if (refcount_count(&dbuf_cache_size) > dbuf_cache_target_bytes()) { | |
721 | if (dbuf_cache_above_hiwater()) | |
722 | dbuf_evict_one(); | |
723 | cv_signal(&dbuf_evict_cv); | |
724 | } | |
725 | } | |
726 | ||
727 | static int | |
728 | dbuf_kstat_update(kstat_t *ksp, int rw) | |
729 | { | |
730 | dbuf_stats_t *ds = ksp->ks_data; | |
731 | ||
732 | if (rw == KSTAT_WRITE) { | |
733 | return (SET_ERROR(EACCES)); | |
734 | } else { | |
735 | ds->cache_size_bytes.value.ui64 = | |
736 | refcount_count(&dbuf_cache_size); | |
737 | ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes(); | |
738 | ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes(); | |
739 | ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes(); | |
740 | ds->hash_elements.value.ui64 = dbuf_hash_count; | |
741 | } | |
742 | ||
743 | return (0); | |
744 | } | |
745 | ||
746 | void | |
747 | dbuf_init(void) | |
748 | { | |
749 | uint64_t hsize = 1ULL << 16; | |
750 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
751 | int i; | |
752 | ||
753 | /* | |
754 | * The hash table is big enough to fill all of physical memory | |
755 | * with an average block size of zfs_arc_average_blocksize (default 8K). | |
756 | * By default, the table will take up | |
757 | * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). | |
758 | */ | |
759 | while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE) | |
760 | hsize <<= 1; | |
761 | ||
762 | retry: | |
763 | h->hash_table_mask = hsize - 1; | |
764 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
765 | /* | |
766 | * Large allocations which do not require contiguous pages | |
767 | * should be using vmem_alloc() in the linux kernel | |
768 | */ | |
769 | h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP); | |
770 | #else | |
771 | h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); | |
772 | #endif | |
773 | if (h->hash_table == NULL) { | |
774 | /* XXX - we should really return an error instead of assert */ | |
775 | ASSERT(hsize > (1ULL << 10)); | |
776 | hsize >>= 1; | |
777 | goto retry; | |
778 | } | |
779 | ||
780 | dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", | |
781 | sizeof (dmu_buf_impl_t), | |
782 | 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); | |
783 | ||
784 | for (i = 0; i < DBUF_MUTEXES; i++) | |
785 | mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); | |
786 | ||
787 | dbuf_stats_init(h); | |
788 | ||
789 | /* | |
790 | * Setup the parameters for the dbuf cache. We set the size of the | |
791 | * dbuf cache to 1/32nd (default) of the target size of the ARC. If | |
792 | * the value has been specified as a module option and it's not | |
793 | * greater than the target size of the ARC, then we honor that value. | |
794 | */ | |
795 | if (dbuf_cache_max_bytes == 0 || | |
796 | dbuf_cache_max_bytes >= arc_target_bytes()) { | |
797 | dbuf_cache_max_bytes = arc_target_bytes() >> dbuf_cache_shift; | |
798 | } | |
799 | ||
800 | /* | |
801 | * All entries are queued via taskq_dispatch_ent(), so min/maxalloc | |
802 | * configuration is not required. | |
803 | */ | |
804 | dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0); | |
805 | ||
806 | dbuf_cache = multilist_create(sizeof (dmu_buf_impl_t), | |
807 | offsetof(dmu_buf_impl_t, db_cache_link), | |
808 | dbuf_cache_multilist_index_func); | |
809 | refcount_create(&dbuf_cache_size); | |
810 | ||
811 | tsd_create(&zfs_dbuf_evict_key, NULL); | |
812 | dbuf_evict_thread_exit = B_FALSE; | |
813 | mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); | |
814 | cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); | |
815 | dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, | |
816 | NULL, 0, &p0, TS_RUN, minclsyspri); | |
817 | ||
818 | dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc", | |
819 | KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t), | |
820 | KSTAT_FLAG_VIRTUAL); | |
821 | if (dbuf_ksp != NULL) { | |
822 | dbuf_ksp->ks_data = &dbuf_stats; | |
823 | dbuf_ksp->ks_update = dbuf_kstat_update; | |
824 | kstat_install(dbuf_ksp); | |
825 | ||
826 | for (i = 0; i < DN_MAX_LEVELS; i++) { | |
827 | snprintf(dbuf_stats.cache_levels[i].name, | |
828 | KSTAT_STRLEN, "cache_level_%d", i); | |
829 | dbuf_stats.cache_levels[i].data_type = | |
830 | KSTAT_DATA_UINT64; | |
831 | snprintf(dbuf_stats.cache_levels_bytes[i].name, | |
832 | KSTAT_STRLEN, "cache_level_%d_bytes", i); | |
833 | dbuf_stats.cache_levels_bytes[i].data_type = | |
834 | KSTAT_DATA_UINT64; | |
835 | } | |
836 | } | |
837 | } | |
838 | ||
839 | void | |
840 | dbuf_fini(void) | |
841 | { | |
842 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
843 | int i; | |
844 | ||
845 | dbuf_stats_destroy(); | |
846 | ||
847 | for (i = 0; i < DBUF_MUTEXES; i++) | |
848 | mutex_destroy(&h->hash_mutexes[i]); | |
849 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
850 | /* | |
851 | * Large allocations which do not require contiguous pages | |
852 | * should be using vmem_free() in the linux kernel | |
853 | */ | |
854 | vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); | |
855 | #else | |
856 | kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); | |
857 | #endif | |
858 | kmem_cache_destroy(dbuf_kmem_cache); | |
859 | taskq_destroy(dbu_evict_taskq); | |
860 | ||
861 | mutex_enter(&dbuf_evict_lock); | |
862 | dbuf_evict_thread_exit = B_TRUE; | |
863 | while (dbuf_evict_thread_exit) { | |
864 | cv_signal(&dbuf_evict_cv); | |
865 | cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); | |
866 | } | |
867 | mutex_exit(&dbuf_evict_lock); | |
868 | tsd_destroy(&zfs_dbuf_evict_key); | |
869 | ||
870 | mutex_destroy(&dbuf_evict_lock); | |
871 | cv_destroy(&dbuf_evict_cv); | |
872 | ||
873 | refcount_destroy(&dbuf_cache_size); | |
874 | multilist_destroy(dbuf_cache); | |
875 | ||
876 | if (dbuf_ksp != NULL) { | |
877 | kstat_delete(dbuf_ksp); | |
878 | dbuf_ksp = NULL; | |
879 | } | |
880 | } | |
881 | ||
882 | /* | |
883 | * Other stuff. | |
884 | */ | |
885 | ||
886 | #ifdef ZFS_DEBUG | |
887 | static void | |
888 | dbuf_verify(dmu_buf_impl_t *db) | |
889 | { | |
890 | dnode_t *dn; | |
891 | dbuf_dirty_record_t *dr; | |
892 | ||
893 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
894 | ||
895 | if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) | |
896 | return; | |
897 | ||
898 | ASSERT(db->db_objset != NULL); | |
899 | DB_DNODE_ENTER(db); | |
900 | dn = DB_DNODE(db); | |
901 | if (dn == NULL) { | |
902 | ASSERT(db->db_parent == NULL); | |
903 | ASSERT(db->db_blkptr == NULL); | |
904 | } else { | |
905 | ASSERT3U(db->db.db_object, ==, dn->dn_object); | |
906 | ASSERT3P(db->db_objset, ==, dn->dn_objset); | |
907 | ASSERT3U(db->db_level, <, dn->dn_nlevels); | |
908 | ASSERT(db->db_blkid == DMU_BONUS_BLKID || | |
909 | db->db_blkid == DMU_SPILL_BLKID || | |
910 | !avl_is_empty(&dn->dn_dbufs)); | |
911 | } | |
912 | if (db->db_blkid == DMU_BONUS_BLKID) { | |
913 | ASSERT(dn != NULL); | |
914 | ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); | |
915 | ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); | |
916 | } else if (db->db_blkid == DMU_SPILL_BLKID) { | |
917 | ASSERT(dn != NULL); | |
918 | ASSERT0(db->db.db_offset); | |
919 | } else { | |
920 | ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); | |
921 | } | |
922 | ||
923 | for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) | |
924 | ASSERT(dr->dr_dbuf == db); | |
925 | ||
926 | for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) | |
927 | ASSERT(dr->dr_dbuf == db); | |
928 | ||
929 | /* | |
930 | * We can't assert that db_size matches dn_datablksz because it | |
931 | * can be momentarily different when another thread is doing | |
932 | * dnode_set_blksz(). | |
933 | */ | |
934 | if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { | |
935 | dr = db->db_data_pending; | |
936 | /* | |
937 | * It should only be modified in syncing context, so | |
938 | * make sure we only have one copy of the data. | |
939 | */ | |
940 | ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); | |
941 | } | |
942 | ||
943 | /* verify db->db_blkptr */ | |
944 | if (db->db_blkptr) { | |
945 | if (db->db_parent == dn->dn_dbuf) { | |
946 | /* db is pointed to by the dnode */ | |
947 | /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ | |
948 | if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) | |
949 | ASSERT(db->db_parent == NULL); | |
950 | else | |
951 | ASSERT(db->db_parent != NULL); | |
952 | if (db->db_blkid != DMU_SPILL_BLKID) | |
953 | ASSERT3P(db->db_blkptr, ==, | |
954 | &dn->dn_phys->dn_blkptr[db->db_blkid]); | |
955 | } else { | |
956 | /* db is pointed to by an indirect block */ | |
957 | ASSERTV(int epb = db->db_parent->db.db_size >> | |
958 | SPA_BLKPTRSHIFT); | |
959 | ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); | |
960 | ASSERT3U(db->db_parent->db.db_object, ==, | |
961 | db->db.db_object); | |
962 | /* | |
963 | * dnode_grow_indblksz() can make this fail if we don't | |
964 | * have the struct_rwlock. XXX indblksz no longer | |
965 | * grows. safe to do this now? | |
966 | */ | |
967 | if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { | |
968 | ASSERT3P(db->db_blkptr, ==, | |
969 | ((blkptr_t *)db->db_parent->db.db_data + | |
970 | db->db_blkid % epb)); | |
971 | } | |
972 | } | |
973 | } | |
974 | if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && | |
975 | (db->db_buf == NULL || db->db_buf->b_data) && | |
976 | db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && | |
977 | db->db_state != DB_FILL && !dn->dn_free_txg) { | |
978 | /* | |
979 | * If the blkptr isn't set but they have nonzero data, | |
980 | * it had better be dirty, otherwise we'll lose that | |
981 | * data when we evict this buffer. | |
982 | * | |
983 | * There is an exception to this rule for indirect blocks; in | |
984 | * this case, if the indirect block is a hole, we fill in a few | |
985 | * fields on each of the child blocks (importantly, birth time) | |
986 | * to prevent hole birth times from being lost when you | |
987 | * partially fill in a hole. | |
988 | */ | |
989 | if (db->db_dirtycnt == 0) { | |
990 | if (db->db_level == 0) { | |
991 | uint64_t *buf = db->db.db_data; | |
992 | int i; | |
993 | ||
994 | for (i = 0; i < db->db.db_size >> 3; i++) { | |
995 | ASSERT(buf[i] == 0); | |
996 | } | |
997 | } else { | |
998 | blkptr_t *bps = db->db.db_data; | |
999 | ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, | |
1000 | db->db.db_size); | |
1001 | /* | |
1002 | * We want to verify that all the blkptrs in the | |
1003 | * indirect block are holes, but we may have | |
1004 | * automatically set up a few fields for them. | |
1005 | * We iterate through each blkptr and verify | |
1006 | * they only have those fields set. | |
1007 | */ | |
1008 | for (int i = 0; | |
1009 | i < db->db.db_size / sizeof (blkptr_t); | |
1010 | i++) { | |
1011 | blkptr_t *bp = &bps[i]; | |
1012 | ASSERT(ZIO_CHECKSUM_IS_ZERO( | |
1013 | &bp->blk_cksum)); | |
1014 | ASSERT( | |
1015 | DVA_IS_EMPTY(&bp->blk_dva[0]) && | |
1016 | DVA_IS_EMPTY(&bp->blk_dva[1]) && | |
1017 | DVA_IS_EMPTY(&bp->blk_dva[2])); | |
1018 | ASSERT0(bp->blk_fill); | |
1019 | ASSERT0(bp->blk_pad[0]); | |
1020 | ASSERT0(bp->blk_pad[1]); | |
1021 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
1022 | ASSERT(BP_IS_HOLE(bp)); | |
1023 | ASSERT0(bp->blk_phys_birth); | |
1024 | } | |
1025 | } | |
1026 | } | |
1027 | } | |
1028 | DB_DNODE_EXIT(db); | |
1029 | } | |
1030 | #endif | |
1031 | ||
1032 | static void | |
1033 | dbuf_clear_data(dmu_buf_impl_t *db) | |
1034 | { | |
1035 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1036 | dbuf_evict_user(db); | |
1037 | ASSERT3P(db->db_buf, ==, NULL); | |
1038 | db->db.db_data = NULL; | |
1039 | if (db->db_state != DB_NOFILL) | |
1040 | db->db_state = DB_UNCACHED; | |
1041 | } | |
1042 | ||
1043 | static void | |
1044 | dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) | |
1045 | { | |
1046 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1047 | ASSERT(buf != NULL); | |
1048 | ||
1049 | db->db_buf = buf; | |
1050 | ASSERT(buf->b_data != NULL); | |
1051 | db->db.db_data = buf->b_data; | |
1052 | } | |
1053 | ||
1054 | /* | |
1055 | * Loan out an arc_buf for read. Return the loaned arc_buf. | |
1056 | */ | |
1057 | arc_buf_t * | |
1058 | dbuf_loan_arcbuf(dmu_buf_impl_t *db) | |
1059 | { | |
1060 | arc_buf_t *abuf; | |
1061 | ||
1062 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
1063 | mutex_enter(&db->db_mtx); | |
1064 | if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { | |
1065 | int blksz = db->db.db_size; | |
1066 | spa_t *spa = db->db_objset->os_spa; | |
1067 | ||
1068 | mutex_exit(&db->db_mtx); | |
1069 | abuf = arc_loan_buf(spa, B_FALSE, blksz); | |
1070 | bcopy(db->db.db_data, abuf->b_data, blksz); | |
1071 | } else { | |
1072 | abuf = db->db_buf; | |
1073 | arc_loan_inuse_buf(abuf, db); | |
1074 | db->db_buf = NULL; | |
1075 | dbuf_clear_data(db); | |
1076 | mutex_exit(&db->db_mtx); | |
1077 | } | |
1078 | return (abuf); | |
1079 | } | |
1080 | ||
1081 | /* | |
1082 | * Calculate which level n block references the data at the level 0 offset | |
1083 | * provided. | |
1084 | */ | |
1085 | uint64_t | |
1086 | dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset) | |
1087 | { | |
1088 | if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { | |
1089 | /* | |
1090 | * The level n blkid is equal to the level 0 blkid divided by | |
1091 | * the number of level 0s in a level n block. | |
1092 | * | |
1093 | * The level 0 blkid is offset >> datablkshift = | |
1094 | * offset / 2^datablkshift. | |
1095 | * | |
1096 | * The number of level 0s in a level n is the number of block | |
1097 | * pointers in an indirect block, raised to the power of level. | |
1098 | * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = | |
1099 | * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). | |
1100 | * | |
1101 | * Thus, the level n blkid is: offset / | |
1102 | * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) | |
1103 | * = offset / 2^(datablkshift + level * | |
1104 | * (indblkshift - SPA_BLKPTRSHIFT)) | |
1105 | * = offset >> (datablkshift + level * | |
1106 | * (indblkshift - SPA_BLKPTRSHIFT)) | |
1107 | */ | |
1108 | ||
1109 | const unsigned exp = dn->dn_datablkshift + | |
1110 | level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT); | |
1111 | ||
1112 | if (exp >= 8 * sizeof (offset)) { | |
1113 | /* This only happens on the highest indirection level */ | |
1114 | ASSERT3U(level, ==, dn->dn_nlevels - 1); | |
1115 | return (0); | |
1116 | } | |
1117 | ||
1118 | ASSERT3U(exp, <, 8 * sizeof (offset)); | |
1119 | ||
1120 | return (offset >> exp); | |
1121 | } else { | |
1122 | ASSERT3U(offset, <, dn->dn_datablksz); | |
1123 | return (0); | |
1124 | } | |
1125 | } | |
1126 | ||
1127 | static void | |
1128 | dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, | |
1129 | arc_buf_t *buf, void *vdb) | |
1130 | { | |
1131 | dmu_buf_impl_t *db = vdb; | |
1132 | ||
1133 | mutex_enter(&db->db_mtx); | |
1134 | ASSERT3U(db->db_state, ==, DB_READ); | |
1135 | /* | |
1136 | * All reads are synchronous, so we must have a hold on the dbuf | |
1137 | */ | |
1138 | ASSERT(refcount_count(&db->db_holds) > 0); | |
1139 | ASSERT(db->db_buf == NULL); | |
1140 | ASSERT(db->db.db_data == NULL); | |
1141 | if (db->db_level == 0 && db->db_freed_in_flight) { | |
1142 | /* we were freed in flight; disregard any error */ | |
1143 | if (buf == NULL) { | |
1144 | buf = arc_alloc_buf(db->db_objset->os_spa, | |
1145 | db, DBUF_GET_BUFC_TYPE(db), db->db.db_size); | |
1146 | } | |
1147 | arc_release(buf, db); | |
1148 | bzero(buf->b_data, db->db.db_size); | |
1149 | arc_buf_freeze(buf); | |
1150 | db->db_freed_in_flight = FALSE; | |
1151 | dbuf_set_data(db, buf); | |
1152 | db->db_state = DB_CACHED; | |
1153 | } else if (buf != NULL) { | |
1154 | dbuf_set_data(db, buf); | |
1155 | db->db_state = DB_CACHED; | |
1156 | } else { | |
1157 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
1158 | ASSERT3P(db->db_buf, ==, NULL); | |
1159 | db->db_state = DB_UNCACHED; | |
1160 | } | |
1161 | cv_broadcast(&db->db_changed); | |
1162 | dbuf_rele_and_unlock(db, NULL); | |
1163 | } | |
1164 | ||
1165 | static int | |
1166 | dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) | |
1167 | { | |
1168 | dnode_t *dn; | |
1169 | zbookmark_phys_t zb; | |
1170 | uint32_t aflags = ARC_FLAG_NOWAIT; | |
1171 | int err, zio_flags = 0; | |
1172 | ||
1173 | DB_DNODE_ENTER(db); | |
1174 | dn = DB_DNODE(db); | |
1175 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
1176 | /* We need the struct_rwlock to prevent db_blkptr from changing. */ | |
1177 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); | |
1178 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1179 | ASSERT(db->db_state == DB_UNCACHED); | |
1180 | ASSERT(db->db_buf == NULL); | |
1181 | ||
1182 | if (db->db_blkid == DMU_BONUS_BLKID) { | |
1183 | /* | |
1184 | * The bonus length stored in the dnode may be less than | |
1185 | * the maximum available space in the bonus buffer. | |
1186 | */ | |
1187 | int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); | |
1188 | int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); | |
1189 | arc_buf_t *dn_buf = (dn->dn_dbuf != NULL) ? | |
1190 | dn->dn_dbuf->db_buf : NULL; | |
1191 | ||
1192 | /* if the underlying dnode block is encrypted, decrypt it */ | |
1193 | if (dn_buf != NULL && dn->dn_objset->os_encrypted && | |
1194 | DMU_OT_IS_ENCRYPTED(dn->dn_bonustype) && | |
1195 | (flags & DB_RF_NO_DECRYPT) == 0 && | |
1196 | arc_is_encrypted(dn_buf)) { | |
1197 | err = arc_untransform(dn_buf, dn->dn_objset->os_spa, | |
1198 | dmu_objset_id(dn->dn_objset), B_TRUE); | |
1199 | if (err != 0) { | |
1200 | DB_DNODE_EXIT(db); | |
1201 | mutex_exit(&db->db_mtx); | |
1202 | return (err); | |
1203 | } | |
1204 | } | |
1205 | ||
1206 | ASSERT3U(bonuslen, <=, db->db.db_size); | |
1207 | db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP); | |
1208 | arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); | |
1209 | if (bonuslen < max_bonuslen) | |
1210 | bzero(db->db.db_data, max_bonuslen); | |
1211 | if (bonuslen) | |
1212 | bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); | |
1213 | DB_DNODE_EXIT(db); | |
1214 | db->db_state = DB_CACHED; | |
1215 | mutex_exit(&db->db_mtx); | |
1216 | return (0); | |
1217 | } | |
1218 | ||
1219 | /* | |
1220 | * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() | |
1221 | * processes the delete record and clears the bp while we are waiting | |
1222 | * for the dn_mtx (resulting in a "no" from block_freed). | |
1223 | */ | |
1224 | if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || | |
1225 | (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || | |
1226 | BP_IS_HOLE(db->db_blkptr)))) { | |
1227 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
1228 | ||
1229 | dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, | |
1230 | db->db.db_size)); | |
1231 | bzero(db->db.db_data, db->db.db_size); | |
1232 | ||
1233 | if (db->db_blkptr != NULL && db->db_level > 0 && | |
1234 | BP_IS_HOLE(db->db_blkptr) && | |
1235 | db->db_blkptr->blk_birth != 0) { | |
1236 | blkptr_t *bps = db->db.db_data; | |
1237 | for (int i = 0; i < ((1 << | |
1238 | DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); | |
1239 | i++) { | |
1240 | blkptr_t *bp = &bps[i]; | |
1241 | ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, | |
1242 | 1 << dn->dn_indblkshift); | |
1243 | BP_SET_LSIZE(bp, | |
1244 | BP_GET_LEVEL(db->db_blkptr) == 1 ? | |
1245 | dn->dn_datablksz : | |
1246 | BP_GET_LSIZE(db->db_blkptr)); | |
1247 | BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); | |
1248 | BP_SET_LEVEL(bp, | |
1249 | BP_GET_LEVEL(db->db_blkptr) - 1); | |
1250 | BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); | |
1251 | } | |
1252 | } | |
1253 | DB_DNODE_EXIT(db); | |
1254 | db->db_state = DB_CACHED; | |
1255 | mutex_exit(&db->db_mtx); | |
1256 | return (0); | |
1257 | } | |
1258 | ||
1259 | DB_DNODE_EXIT(db); | |
1260 | ||
1261 | db->db_state = DB_READ; | |
1262 | mutex_exit(&db->db_mtx); | |
1263 | ||
1264 | if (DBUF_IS_L2CACHEABLE(db)) | |
1265 | aflags |= ARC_FLAG_L2CACHE; | |
1266 | ||
1267 | SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? | |
1268 | db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, | |
1269 | db->db.db_object, db->db_level, db->db_blkid); | |
1270 | ||
1271 | /* | |
1272 | * All bps of an encrypted os should have the encryption bit set. | |
1273 | * If this is not true it indicates tampering and we report an error. | |
1274 | */ | |
1275 | if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) { | |
1276 | spa_log_error(db->db_objset->os_spa, &zb); | |
1277 | zfs_panic_recover("unencrypted block in encrypted " | |
1278 | "object set %llu", dmu_objset_id(db->db_objset)); | |
1279 | return (SET_ERROR(EIO)); | |
1280 | } | |
1281 | ||
1282 | dbuf_add_ref(db, NULL); | |
1283 | ||
1284 | zio_flags = (flags & DB_RF_CANFAIL) ? | |
1285 | ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED; | |
1286 | ||
1287 | if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr)) | |
1288 | zio_flags |= ZIO_FLAG_RAW; | |
1289 | ||
1290 | err = arc_read(zio, db->db_objset->os_spa, db->db_blkptr, | |
1291 | dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags, | |
1292 | &aflags, &zb); | |
1293 | ||
1294 | return (err); | |
1295 | } | |
1296 | ||
1297 | /* | |
1298 | * This is our just-in-time copy function. It makes a copy of buffers that | |
1299 | * have been modified in a previous transaction group before we access them in | |
1300 | * the current active group. | |
1301 | * | |
1302 | * This function is used in three places: when we are dirtying a buffer for the | |
1303 | * first time in a txg, when we are freeing a range in a dnode that includes | |
1304 | * this buffer, and when we are accessing a buffer which was received compressed | |
1305 | * and later referenced in a WRITE_BYREF record. | |
1306 | * | |
1307 | * Note that when we are called from dbuf_free_range() we do not put a hold on | |
1308 | * the buffer, we just traverse the active dbuf list for the dnode. | |
1309 | */ | |
1310 | static void | |
1311 | dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) | |
1312 | { | |
1313 | dbuf_dirty_record_t *dr = db->db_last_dirty; | |
1314 | ||
1315 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1316 | ASSERT(db->db.db_data != NULL); | |
1317 | ASSERT(db->db_level == 0); | |
1318 | ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); | |
1319 | ||
1320 | if (dr == NULL || | |
1321 | (dr->dt.dl.dr_data != | |
1322 | ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) | |
1323 | return; | |
1324 | ||
1325 | /* | |
1326 | * If the last dirty record for this dbuf has not yet synced | |
1327 | * and its referencing the dbuf data, either: | |
1328 | * reset the reference to point to a new copy, | |
1329 | * or (if there a no active holders) | |
1330 | * just null out the current db_data pointer. | |
1331 | */ | |
1332 | ASSERT3U(dr->dr_txg, >=, txg - 2); | |
1333 | if (db->db_blkid == DMU_BONUS_BLKID) { | |
1334 | dnode_t *dn = DB_DNODE(db); | |
1335 | int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); | |
1336 | dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP); | |
1337 | arc_space_consume(bonuslen, ARC_SPACE_BONUS); | |
1338 | bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen); | |
1339 | } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { | |
1340 | dnode_t *dn = DB_DNODE(db); | |
1341 | int size = arc_buf_size(db->db_buf); | |
1342 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
1343 | spa_t *spa = db->db_objset->os_spa; | |
1344 | enum zio_compress compress_type = | |
1345 | arc_get_compression(db->db_buf); | |
1346 | ||
1347 | if (arc_is_encrypted(db->db_buf)) { | |
1348 | boolean_t byteorder; | |
1349 | uint8_t salt[ZIO_DATA_SALT_LEN]; | |
1350 | uint8_t iv[ZIO_DATA_IV_LEN]; | |
1351 | uint8_t mac[ZIO_DATA_MAC_LEN]; | |
1352 | ||
1353 | arc_get_raw_params(db->db_buf, &byteorder, salt, | |
1354 | iv, mac); | |
1355 | dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db, | |
1356 | dmu_objset_id(dn->dn_objset), byteorder, salt, iv, | |
1357 | mac, dn->dn_type, size, arc_buf_lsize(db->db_buf), | |
1358 | compress_type); | |
1359 | } else if (compress_type != ZIO_COMPRESS_OFF) { | |
1360 | ASSERT3U(type, ==, ARC_BUFC_DATA); | |
1361 | dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, | |
1362 | size, arc_buf_lsize(db->db_buf), compress_type); | |
1363 | } else { | |
1364 | dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); | |
1365 | } | |
1366 | bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); | |
1367 | } else { | |
1368 | db->db_buf = NULL; | |
1369 | dbuf_clear_data(db); | |
1370 | } | |
1371 | } | |
1372 | ||
1373 | int | |
1374 | dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) | |
1375 | { | |
1376 | int err = 0; | |
1377 | boolean_t prefetch; | |
1378 | dnode_t *dn; | |
1379 | ||
1380 | /* | |
1381 | * We don't have to hold the mutex to check db_state because it | |
1382 | * can't be freed while we have a hold on the buffer. | |
1383 | */ | |
1384 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
1385 | ||
1386 | if (db->db_state == DB_NOFILL) | |
1387 | return (SET_ERROR(EIO)); | |
1388 | ||
1389 | DB_DNODE_ENTER(db); | |
1390 | dn = DB_DNODE(db); | |
1391 | if ((flags & DB_RF_HAVESTRUCT) == 0) | |
1392 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
1393 | ||
1394 | prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && | |
1395 | (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && | |
1396 | DBUF_IS_CACHEABLE(db); | |
1397 | ||
1398 | mutex_enter(&db->db_mtx); | |
1399 | if (db->db_state == DB_CACHED) { | |
1400 | spa_t *spa = dn->dn_objset->os_spa; | |
1401 | ||
1402 | /* | |
1403 | * If the arc buf is compressed or encrypted, we need to | |
1404 | * untransform it to read the data. This could happen during | |
1405 | * the "zfs receive" of a stream which is deduplicated and | |
1406 | * either raw or compressed. We do not need to do this if the | |
1407 | * caller wants raw encrypted data. | |
1408 | */ | |
1409 | if (db->db_buf != NULL && (flags & DB_RF_NO_DECRYPT) == 0 && | |
1410 | (arc_is_encrypted(db->db_buf) || | |
1411 | arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) { | |
1412 | dbuf_fix_old_data(db, spa_syncing_txg(spa)); | |
1413 | err = arc_untransform(db->db_buf, spa, | |
1414 | dmu_objset_id(db->db_objset), B_FALSE); | |
1415 | dbuf_set_data(db, db->db_buf); | |
1416 | } | |
1417 | mutex_exit(&db->db_mtx); | |
1418 | if (prefetch) | |
1419 | dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); | |
1420 | if ((flags & DB_RF_HAVESTRUCT) == 0) | |
1421 | rw_exit(&dn->dn_struct_rwlock); | |
1422 | DB_DNODE_EXIT(db); | |
1423 | DBUF_STAT_BUMP(hash_hits); | |
1424 | } else if (db->db_state == DB_UNCACHED) { | |
1425 | spa_t *spa = dn->dn_objset->os_spa; | |
1426 | boolean_t need_wait = B_FALSE; | |
1427 | ||
1428 | if (zio == NULL && | |
1429 | db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { | |
1430 | zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); | |
1431 | need_wait = B_TRUE; | |
1432 | } | |
1433 | err = dbuf_read_impl(db, zio, flags); | |
1434 | ||
1435 | /* dbuf_read_impl has dropped db_mtx for us */ | |
1436 | ||
1437 | if (!err && prefetch) | |
1438 | dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); | |
1439 | ||
1440 | if ((flags & DB_RF_HAVESTRUCT) == 0) | |
1441 | rw_exit(&dn->dn_struct_rwlock); | |
1442 | DB_DNODE_EXIT(db); | |
1443 | DBUF_STAT_BUMP(hash_misses); | |
1444 | ||
1445 | if (!err && need_wait) | |
1446 | err = zio_wait(zio); | |
1447 | } else { | |
1448 | /* | |
1449 | * Another reader came in while the dbuf was in flight | |
1450 | * between UNCACHED and CACHED. Either a writer will finish | |
1451 | * writing the buffer (sending the dbuf to CACHED) or the | |
1452 | * first reader's request will reach the read_done callback | |
1453 | * and send the dbuf to CACHED. Otherwise, a failure | |
1454 | * occurred and the dbuf went to UNCACHED. | |
1455 | */ | |
1456 | mutex_exit(&db->db_mtx); | |
1457 | if (prefetch) | |
1458 | dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); | |
1459 | if ((flags & DB_RF_HAVESTRUCT) == 0) | |
1460 | rw_exit(&dn->dn_struct_rwlock); | |
1461 | DB_DNODE_EXIT(db); | |
1462 | DBUF_STAT_BUMP(hash_misses); | |
1463 | ||
1464 | /* Skip the wait per the caller's request. */ | |
1465 | mutex_enter(&db->db_mtx); | |
1466 | if ((flags & DB_RF_NEVERWAIT) == 0) { | |
1467 | while (db->db_state == DB_READ || | |
1468 | db->db_state == DB_FILL) { | |
1469 | ASSERT(db->db_state == DB_READ || | |
1470 | (flags & DB_RF_HAVESTRUCT) == 0); | |
1471 | DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, | |
1472 | db, zio_t *, zio); | |
1473 | cv_wait(&db->db_changed, &db->db_mtx); | |
1474 | } | |
1475 | if (db->db_state == DB_UNCACHED) | |
1476 | err = SET_ERROR(EIO); | |
1477 | } | |
1478 | mutex_exit(&db->db_mtx); | |
1479 | } | |
1480 | ||
1481 | return (err); | |
1482 | } | |
1483 | ||
1484 | static void | |
1485 | dbuf_noread(dmu_buf_impl_t *db) | |
1486 | { | |
1487 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
1488 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
1489 | mutex_enter(&db->db_mtx); | |
1490 | while (db->db_state == DB_READ || db->db_state == DB_FILL) | |
1491 | cv_wait(&db->db_changed, &db->db_mtx); | |
1492 | if (db->db_state == DB_UNCACHED) { | |
1493 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
1494 | spa_t *spa = db->db_objset->os_spa; | |
1495 | ||
1496 | ASSERT(db->db_buf == NULL); | |
1497 | ASSERT(db->db.db_data == NULL); | |
1498 | dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); | |
1499 | db->db_state = DB_FILL; | |
1500 | } else if (db->db_state == DB_NOFILL) { | |
1501 | dbuf_clear_data(db); | |
1502 | } else { | |
1503 | ASSERT3U(db->db_state, ==, DB_CACHED); | |
1504 | } | |
1505 | mutex_exit(&db->db_mtx); | |
1506 | } | |
1507 | ||
1508 | void | |
1509 | dbuf_unoverride(dbuf_dirty_record_t *dr) | |
1510 | { | |
1511 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
1512 | blkptr_t *bp = &dr->dt.dl.dr_overridden_by; | |
1513 | uint64_t txg = dr->dr_txg; | |
1514 | ||
1515 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1516 | /* | |
1517 | * This assert is valid because dmu_sync() expects to be called by | |
1518 | * a zilog's get_data while holding a range lock. This call only | |
1519 | * comes from dbuf_dirty() callers who must also hold a range lock. | |
1520 | */ | |
1521 | ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); | |
1522 | ASSERT(db->db_level == 0); | |
1523 | ||
1524 | if (db->db_blkid == DMU_BONUS_BLKID || | |
1525 | dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) | |
1526 | return; | |
1527 | ||
1528 | ASSERT(db->db_data_pending != dr); | |
1529 | ||
1530 | /* free this block */ | |
1531 | if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) | |
1532 | zio_free(db->db_objset->os_spa, txg, bp); | |
1533 | ||
1534 | dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; | |
1535 | dr->dt.dl.dr_nopwrite = B_FALSE; | |
1536 | dr->dt.dl.dr_raw = B_FALSE; | |
1537 | ||
1538 | /* | |
1539 | * Release the already-written buffer, so we leave it in | |
1540 | * a consistent dirty state. Note that all callers are | |
1541 | * modifying the buffer, so they will immediately do | |
1542 | * another (redundant) arc_release(). Therefore, leave | |
1543 | * the buf thawed to save the effort of freezing & | |
1544 | * immediately re-thawing it. | |
1545 | */ | |
1546 | arc_release(dr->dt.dl.dr_data, db); | |
1547 | } | |
1548 | ||
1549 | /* | |
1550 | * Evict (if its unreferenced) or clear (if its referenced) any level-0 | |
1551 | * data blocks in the free range, so that any future readers will find | |
1552 | * empty blocks. | |
1553 | */ | |
1554 | void | |
1555 | dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, | |
1556 | dmu_tx_t *tx) | |
1557 | { | |
1558 | dmu_buf_impl_t *db_search; | |
1559 | dmu_buf_impl_t *db, *db_next; | |
1560 | uint64_t txg = tx->tx_txg; | |
1561 | avl_index_t where; | |
1562 | ||
1563 | if (end_blkid > dn->dn_maxblkid && | |
1564 | !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) | |
1565 | end_blkid = dn->dn_maxblkid; | |
1566 | dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); | |
1567 | ||
1568 | db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP); | |
1569 | db_search->db_level = 0; | |
1570 | db_search->db_blkid = start_blkid; | |
1571 | db_search->db_state = DB_SEARCH; | |
1572 | ||
1573 | mutex_enter(&dn->dn_dbufs_mtx); | |
1574 | db = avl_find(&dn->dn_dbufs, db_search, &where); | |
1575 | ASSERT3P(db, ==, NULL); | |
1576 | ||
1577 | db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); | |
1578 | ||
1579 | for (; db != NULL; db = db_next) { | |
1580 | db_next = AVL_NEXT(&dn->dn_dbufs, db); | |
1581 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
1582 | ||
1583 | if (db->db_level != 0 || db->db_blkid > end_blkid) { | |
1584 | break; | |
1585 | } | |
1586 | ASSERT3U(db->db_blkid, >=, start_blkid); | |
1587 | ||
1588 | /* found a level 0 buffer in the range */ | |
1589 | mutex_enter(&db->db_mtx); | |
1590 | if (dbuf_undirty(db, tx)) { | |
1591 | /* mutex has been dropped and dbuf destroyed */ | |
1592 | continue; | |
1593 | } | |
1594 | ||
1595 | if (db->db_state == DB_UNCACHED || | |
1596 | db->db_state == DB_NOFILL || | |
1597 | db->db_state == DB_EVICTING) { | |
1598 | ASSERT(db->db.db_data == NULL); | |
1599 | mutex_exit(&db->db_mtx); | |
1600 | continue; | |
1601 | } | |
1602 | if (db->db_state == DB_READ || db->db_state == DB_FILL) { | |
1603 | /* will be handled in dbuf_read_done or dbuf_rele */ | |
1604 | db->db_freed_in_flight = TRUE; | |
1605 | mutex_exit(&db->db_mtx); | |
1606 | continue; | |
1607 | } | |
1608 | if (refcount_count(&db->db_holds) == 0) { | |
1609 | ASSERT(db->db_buf); | |
1610 | dbuf_destroy(db); | |
1611 | continue; | |
1612 | } | |
1613 | /* The dbuf is referenced */ | |
1614 | ||
1615 | if (db->db_last_dirty != NULL) { | |
1616 | dbuf_dirty_record_t *dr = db->db_last_dirty; | |
1617 | ||
1618 | if (dr->dr_txg == txg) { | |
1619 | /* | |
1620 | * This buffer is "in-use", re-adjust the file | |
1621 | * size to reflect that this buffer may | |
1622 | * contain new data when we sync. | |
1623 | */ | |
1624 | if (db->db_blkid != DMU_SPILL_BLKID && | |
1625 | db->db_blkid > dn->dn_maxblkid) | |
1626 | dn->dn_maxblkid = db->db_blkid; | |
1627 | dbuf_unoverride(dr); | |
1628 | } else { | |
1629 | /* | |
1630 | * This dbuf is not dirty in the open context. | |
1631 | * Either uncache it (if its not referenced in | |
1632 | * the open context) or reset its contents to | |
1633 | * empty. | |
1634 | */ | |
1635 | dbuf_fix_old_data(db, txg); | |
1636 | } | |
1637 | } | |
1638 | /* clear the contents if its cached */ | |
1639 | if (db->db_state == DB_CACHED) { | |
1640 | ASSERT(db->db.db_data != NULL); | |
1641 | arc_release(db->db_buf, db); | |
1642 | bzero(db->db.db_data, db->db.db_size); | |
1643 | arc_buf_freeze(db->db_buf); | |
1644 | } | |
1645 | ||
1646 | mutex_exit(&db->db_mtx); | |
1647 | } | |
1648 | ||
1649 | kmem_free(db_search, sizeof (dmu_buf_impl_t)); | |
1650 | mutex_exit(&dn->dn_dbufs_mtx); | |
1651 | } | |
1652 | ||
1653 | void | |
1654 | dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) | |
1655 | { | |
1656 | arc_buf_t *buf, *obuf; | |
1657 | int osize = db->db.db_size; | |
1658 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
1659 | dnode_t *dn; | |
1660 | ||
1661 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
1662 | ||
1663 | DB_DNODE_ENTER(db); | |
1664 | dn = DB_DNODE(db); | |
1665 | ||
1666 | /* XXX does *this* func really need the lock? */ | |
1667 | ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); | |
1668 | ||
1669 | /* | |
1670 | * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held | |
1671 | * is OK, because there can be no other references to the db | |
1672 | * when we are changing its size, so no concurrent DB_FILL can | |
1673 | * be happening. | |
1674 | */ | |
1675 | /* | |
1676 | * XXX we should be doing a dbuf_read, checking the return | |
1677 | * value and returning that up to our callers | |
1678 | */ | |
1679 | dmu_buf_will_dirty(&db->db, tx); | |
1680 | ||
1681 | /* create the data buffer for the new block */ | |
1682 | buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); | |
1683 | ||
1684 | /* copy old block data to the new block */ | |
1685 | obuf = db->db_buf; | |
1686 | bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); | |
1687 | /* zero the remainder */ | |
1688 | if (size > osize) | |
1689 | bzero((uint8_t *)buf->b_data + osize, size - osize); | |
1690 | ||
1691 | mutex_enter(&db->db_mtx); | |
1692 | dbuf_set_data(db, buf); | |
1693 | arc_buf_destroy(obuf, db); | |
1694 | db->db.db_size = size; | |
1695 | ||
1696 | if (db->db_level == 0) { | |
1697 | ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); | |
1698 | db->db_last_dirty->dt.dl.dr_data = buf; | |
1699 | } | |
1700 | mutex_exit(&db->db_mtx); | |
1701 | ||
1702 | dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); | |
1703 | DB_DNODE_EXIT(db); | |
1704 | } | |
1705 | ||
1706 | void | |
1707 | dbuf_release_bp(dmu_buf_impl_t *db) | |
1708 | { | |
1709 | ASSERTV(objset_t *os = db->db_objset); | |
1710 | ||
1711 | ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); | |
1712 | ASSERT(arc_released(os->os_phys_buf) || | |
1713 | list_link_active(&os->os_dsl_dataset->ds_synced_link)); | |
1714 | ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); | |
1715 | ||
1716 | (void) arc_release(db->db_buf, db); | |
1717 | } | |
1718 | ||
1719 | /* | |
1720 | * We already have a dirty record for this TXG, and we are being | |
1721 | * dirtied again. | |
1722 | */ | |
1723 | static void | |
1724 | dbuf_redirty(dbuf_dirty_record_t *dr) | |
1725 | { | |
1726 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
1727 | ||
1728 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1729 | ||
1730 | if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { | |
1731 | /* | |
1732 | * If this buffer has already been written out, | |
1733 | * we now need to reset its state. | |
1734 | */ | |
1735 | dbuf_unoverride(dr); | |
1736 | if (db->db.db_object != DMU_META_DNODE_OBJECT && | |
1737 | db->db_state != DB_NOFILL) { | |
1738 | /* Already released on initial dirty, so just thaw. */ | |
1739 | ASSERT(arc_released(db->db_buf)); | |
1740 | arc_buf_thaw(db->db_buf); | |
1741 | } | |
1742 | } | |
1743 | } | |
1744 | ||
1745 | dbuf_dirty_record_t * | |
1746 | dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) | |
1747 | { | |
1748 | dnode_t *dn; | |
1749 | objset_t *os; | |
1750 | dbuf_dirty_record_t **drp, *dr; | |
1751 | int drop_struct_lock = FALSE; | |
1752 | int txgoff = tx->tx_txg & TXG_MASK; | |
1753 | ||
1754 | ASSERT(tx->tx_txg != 0); | |
1755 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
1756 | DMU_TX_DIRTY_BUF(tx, db); | |
1757 | ||
1758 | DB_DNODE_ENTER(db); | |
1759 | dn = DB_DNODE(db); | |
1760 | /* | |
1761 | * Shouldn't dirty a regular buffer in syncing context. Private | |
1762 | * objects may be dirtied in syncing context, but only if they | |
1763 | * were already pre-dirtied in open context. | |
1764 | */ | |
1765 | #ifdef DEBUG | |
1766 | if (dn->dn_objset->os_dsl_dataset != NULL) { | |
1767 | rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, | |
1768 | RW_READER, FTAG); | |
1769 | } | |
1770 | ASSERT(!dmu_tx_is_syncing(tx) || | |
1771 | BP_IS_HOLE(dn->dn_objset->os_rootbp) || | |
1772 | DMU_OBJECT_IS_SPECIAL(dn->dn_object) || | |
1773 | dn->dn_objset->os_dsl_dataset == NULL); | |
1774 | if (dn->dn_objset->os_dsl_dataset != NULL) | |
1775 | rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); | |
1776 | #endif | |
1777 | /* | |
1778 | * We make this assert for private objects as well, but after we | |
1779 | * check if we're already dirty. They are allowed to re-dirty | |
1780 | * in syncing context. | |
1781 | */ | |
1782 | ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || | |
1783 | dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == | |
1784 | (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); | |
1785 | ||
1786 | mutex_enter(&db->db_mtx); | |
1787 | /* | |
1788 | * XXX make this true for indirects too? The problem is that | |
1789 | * transactions created with dmu_tx_create_assigned() from | |
1790 | * syncing context don't bother holding ahead. | |
1791 | */ | |
1792 | ASSERT(db->db_level != 0 || | |
1793 | db->db_state == DB_CACHED || db->db_state == DB_FILL || | |
1794 | db->db_state == DB_NOFILL); | |
1795 | ||
1796 | mutex_enter(&dn->dn_mtx); | |
1797 | /* | |
1798 | * Don't set dirtyctx to SYNC if we're just modifying this as we | |
1799 | * initialize the objset. | |
1800 | */ | |
1801 | if (dn->dn_dirtyctx == DN_UNDIRTIED) { | |
1802 | if (dn->dn_objset->os_dsl_dataset != NULL) { | |
1803 | rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, | |
1804 | RW_READER, FTAG); | |
1805 | } | |
1806 | if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) { | |
1807 | dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? | |
1808 | DN_DIRTY_SYNC : DN_DIRTY_OPEN); | |
1809 | ASSERT(dn->dn_dirtyctx_firstset == NULL); | |
1810 | dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); | |
1811 | } | |
1812 | if (dn->dn_objset->os_dsl_dataset != NULL) { | |
1813 | rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, | |
1814 | FTAG); | |
1815 | } | |
1816 | } | |
1817 | mutex_exit(&dn->dn_mtx); | |
1818 | ||
1819 | if (db->db_blkid == DMU_SPILL_BLKID) | |
1820 | dn->dn_have_spill = B_TRUE; | |
1821 | ||
1822 | /* | |
1823 | * If this buffer is already dirty, we're done. | |
1824 | */ | |
1825 | drp = &db->db_last_dirty; | |
1826 | ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || | |
1827 | db->db.db_object == DMU_META_DNODE_OBJECT); | |
1828 | while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) | |
1829 | drp = &dr->dr_next; | |
1830 | if (dr && dr->dr_txg == tx->tx_txg) { | |
1831 | DB_DNODE_EXIT(db); | |
1832 | ||
1833 | dbuf_redirty(dr); | |
1834 | mutex_exit(&db->db_mtx); | |
1835 | return (dr); | |
1836 | } | |
1837 | ||
1838 | /* | |
1839 | * Only valid if not already dirty. | |
1840 | */ | |
1841 | ASSERT(dn->dn_object == 0 || | |
1842 | dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == | |
1843 | (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); | |
1844 | ||
1845 | ASSERT3U(dn->dn_nlevels, >, db->db_level); | |
1846 | ||
1847 | /* | |
1848 | * We should only be dirtying in syncing context if it's the | |
1849 | * mos or we're initializing the os or it's a special object. | |
1850 | * However, we are allowed to dirty in syncing context provided | |
1851 | * we already dirtied it in open context. Hence we must make | |
1852 | * this assertion only if we're not already dirty. | |
1853 | */ | |
1854 | os = dn->dn_objset; | |
1855 | VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); | |
1856 | #ifdef DEBUG | |
1857 | if (dn->dn_objset->os_dsl_dataset != NULL) | |
1858 | rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); | |
1859 | ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || | |
1860 | os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); | |
1861 | if (dn->dn_objset->os_dsl_dataset != NULL) | |
1862 | rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); | |
1863 | #endif | |
1864 | ASSERT(db->db.db_size != 0); | |
1865 | ||
1866 | dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); | |
1867 | ||
1868 | if (db->db_blkid != DMU_BONUS_BLKID) { | |
1869 | dmu_objset_willuse_space(os, db->db.db_size, tx); | |
1870 | } | |
1871 | ||
1872 | /* | |
1873 | * If this buffer is dirty in an old transaction group we need | |
1874 | * to make a copy of it so that the changes we make in this | |
1875 | * transaction group won't leak out when we sync the older txg. | |
1876 | */ | |
1877 | dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); | |
1878 | list_link_init(&dr->dr_dirty_node); | |
1879 | if (db->db_level == 0) { | |
1880 | void *data_old = db->db_buf; | |
1881 | ||
1882 | if (db->db_state != DB_NOFILL) { | |
1883 | if (db->db_blkid == DMU_BONUS_BLKID) { | |
1884 | dbuf_fix_old_data(db, tx->tx_txg); | |
1885 | data_old = db->db.db_data; | |
1886 | } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { | |
1887 | /* | |
1888 | * Release the data buffer from the cache so | |
1889 | * that we can modify it without impacting | |
1890 | * possible other users of this cached data | |
1891 | * block. Note that indirect blocks and | |
1892 | * private objects are not released until the | |
1893 | * syncing state (since they are only modified | |
1894 | * then). | |
1895 | */ | |
1896 | arc_release(db->db_buf, db); | |
1897 | dbuf_fix_old_data(db, tx->tx_txg); | |
1898 | data_old = db->db_buf; | |
1899 | } | |
1900 | ASSERT(data_old != NULL); | |
1901 | } | |
1902 | dr->dt.dl.dr_data = data_old; | |
1903 | } else { | |
1904 | mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL); | |
1905 | list_create(&dr->dt.di.dr_children, | |
1906 | sizeof (dbuf_dirty_record_t), | |
1907 | offsetof(dbuf_dirty_record_t, dr_dirty_node)); | |
1908 | } | |
1909 | if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) | |
1910 | dr->dr_accounted = db->db.db_size; | |
1911 | dr->dr_dbuf = db; | |
1912 | dr->dr_txg = tx->tx_txg; | |
1913 | dr->dr_next = *drp; | |
1914 | *drp = dr; | |
1915 | ||
1916 | /* | |
1917 | * We could have been freed_in_flight between the dbuf_noread | |
1918 | * and dbuf_dirty. We win, as though the dbuf_noread() had | |
1919 | * happened after the free. | |
1920 | */ | |
1921 | if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && | |
1922 | db->db_blkid != DMU_SPILL_BLKID) { | |
1923 | mutex_enter(&dn->dn_mtx); | |
1924 | if (dn->dn_free_ranges[txgoff] != NULL) { | |
1925 | range_tree_clear(dn->dn_free_ranges[txgoff], | |
1926 | db->db_blkid, 1); | |
1927 | } | |
1928 | mutex_exit(&dn->dn_mtx); | |
1929 | db->db_freed_in_flight = FALSE; | |
1930 | } | |
1931 | ||
1932 | /* | |
1933 | * This buffer is now part of this txg | |
1934 | */ | |
1935 | dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); | |
1936 | db->db_dirtycnt += 1; | |
1937 | ASSERT3U(db->db_dirtycnt, <=, 3); | |
1938 | ||
1939 | mutex_exit(&db->db_mtx); | |
1940 | ||
1941 | if (db->db_blkid == DMU_BONUS_BLKID || | |
1942 | db->db_blkid == DMU_SPILL_BLKID) { | |
1943 | mutex_enter(&dn->dn_mtx); | |
1944 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
1945 | list_insert_tail(&dn->dn_dirty_records[txgoff], dr); | |
1946 | mutex_exit(&dn->dn_mtx); | |
1947 | dnode_setdirty(dn, tx); | |
1948 | DB_DNODE_EXIT(db); | |
1949 | return (dr); | |
1950 | } | |
1951 | ||
1952 | /* | |
1953 | * The dn_struct_rwlock prevents db_blkptr from changing | |
1954 | * due to a write from syncing context completing | |
1955 | * while we are running, so we want to acquire it before | |
1956 | * looking at db_blkptr. | |
1957 | */ | |
1958 | if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { | |
1959 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
1960 | drop_struct_lock = TRUE; | |
1961 | } | |
1962 | ||
1963 | /* | |
1964 | * We need to hold the dn_struct_rwlock to make this assertion, | |
1965 | * because it protects dn_phys / dn_next_nlevels from changing. | |
1966 | */ | |
1967 | ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || | |
1968 | dn->dn_phys->dn_nlevels > db->db_level || | |
1969 | dn->dn_next_nlevels[txgoff] > db->db_level || | |
1970 | dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || | |
1971 | dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); | |
1972 | ||
1973 | /* | |
1974 | * If we are overwriting a dedup BP, then unless it is snapshotted, | |
1975 | * when we get to syncing context we will need to decrement its | |
1976 | * refcount in the DDT. Prefetch the relevant DDT block so that | |
1977 | * syncing context won't have to wait for the i/o. | |
1978 | */ | |
1979 | ddt_prefetch(os->os_spa, db->db_blkptr); | |
1980 | ||
1981 | if (db->db_level == 0) { | |
1982 | dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); | |
1983 | ASSERT(dn->dn_maxblkid >= db->db_blkid); | |
1984 | } | |
1985 | ||
1986 | if (db->db_level+1 < dn->dn_nlevels) { | |
1987 | dmu_buf_impl_t *parent = db->db_parent; | |
1988 | dbuf_dirty_record_t *di; | |
1989 | int parent_held = FALSE; | |
1990 | ||
1991 | if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { | |
1992 | int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
1993 | ||
1994 | parent = dbuf_hold_level(dn, db->db_level+1, | |
1995 | db->db_blkid >> epbs, FTAG); | |
1996 | ASSERT(parent != NULL); | |
1997 | parent_held = TRUE; | |
1998 | } | |
1999 | if (drop_struct_lock) | |
2000 | rw_exit(&dn->dn_struct_rwlock); | |
2001 | ASSERT3U(db->db_level+1, ==, parent->db_level); | |
2002 | di = dbuf_dirty(parent, tx); | |
2003 | if (parent_held) | |
2004 | dbuf_rele(parent, FTAG); | |
2005 | ||
2006 | mutex_enter(&db->db_mtx); | |
2007 | /* | |
2008 | * Since we've dropped the mutex, it's possible that | |
2009 | * dbuf_undirty() might have changed this out from under us. | |
2010 | */ | |
2011 | if (db->db_last_dirty == dr || | |
2012 | dn->dn_object == DMU_META_DNODE_OBJECT) { | |
2013 | mutex_enter(&di->dt.di.dr_mtx); | |
2014 | ASSERT3U(di->dr_txg, ==, tx->tx_txg); | |
2015 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
2016 | list_insert_tail(&di->dt.di.dr_children, dr); | |
2017 | mutex_exit(&di->dt.di.dr_mtx); | |
2018 | dr->dr_parent = di; | |
2019 | } | |
2020 | mutex_exit(&db->db_mtx); | |
2021 | } else { | |
2022 | ASSERT(db->db_level+1 == dn->dn_nlevels); | |
2023 | ASSERT(db->db_blkid < dn->dn_nblkptr); | |
2024 | ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); | |
2025 | mutex_enter(&dn->dn_mtx); | |
2026 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
2027 | list_insert_tail(&dn->dn_dirty_records[txgoff], dr); | |
2028 | mutex_exit(&dn->dn_mtx); | |
2029 | if (drop_struct_lock) | |
2030 | rw_exit(&dn->dn_struct_rwlock); | |
2031 | } | |
2032 | ||
2033 | dnode_setdirty(dn, tx); | |
2034 | DB_DNODE_EXIT(db); | |
2035 | return (dr); | |
2036 | } | |
2037 | ||
2038 | /* | |
2039 | * Undirty a buffer in the transaction group referenced by the given | |
2040 | * transaction. Return whether this evicted the dbuf. | |
2041 | */ | |
2042 | static boolean_t | |
2043 | dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) | |
2044 | { | |
2045 | dnode_t *dn; | |
2046 | uint64_t txg = tx->tx_txg; | |
2047 | dbuf_dirty_record_t *dr, **drp; | |
2048 | ||
2049 | ASSERT(txg != 0); | |
2050 | ||
2051 | /* | |
2052 | * Due to our use of dn_nlevels below, this can only be called | |
2053 | * in open context, unless we are operating on the MOS. | |
2054 | * From syncing context, dn_nlevels may be different from the | |
2055 | * dn_nlevels used when dbuf was dirtied. | |
2056 | */ | |
2057 | ASSERT(db->db_objset == | |
2058 | dmu_objset_pool(db->db_objset)->dp_meta_objset || | |
2059 | txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); | |
2060 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
2061 | ASSERT0(db->db_level); | |
2062 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
2063 | ||
2064 | /* | |
2065 | * If this buffer is not dirty, we're done. | |
2066 | */ | |
2067 | for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) | |
2068 | if (dr->dr_txg <= txg) | |
2069 | break; | |
2070 | if (dr == NULL || dr->dr_txg < txg) | |
2071 | return (B_FALSE); | |
2072 | ASSERT(dr->dr_txg == txg); | |
2073 | ASSERT(dr->dr_dbuf == db); | |
2074 | ||
2075 | DB_DNODE_ENTER(db); | |
2076 | dn = DB_DNODE(db); | |
2077 | ||
2078 | dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); | |
2079 | ||
2080 | ASSERT(db->db.db_size != 0); | |
2081 | ||
2082 | dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), | |
2083 | dr->dr_accounted, txg); | |
2084 | ||
2085 | *drp = dr->dr_next; | |
2086 | ||
2087 | /* | |
2088 | * Note that there are three places in dbuf_dirty() | |
2089 | * where this dirty record may be put on a list. | |
2090 | * Make sure to do a list_remove corresponding to | |
2091 | * every one of those list_insert calls. | |
2092 | */ | |
2093 | if (dr->dr_parent) { | |
2094 | mutex_enter(&dr->dr_parent->dt.di.dr_mtx); | |
2095 | list_remove(&dr->dr_parent->dt.di.dr_children, dr); | |
2096 | mutex_exit(&dr->dr_parent->dt.di.dr_mtx); | |
2097 | } else if (db->db_blkid == DMU_SPILL_BLKID || | |
2098 | db->db_level + 1 == dn->dn_nlevels) { | |
2099 | ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); | |
2100 | mutex_enter(&dn->dn_mtx); | |
2101 | list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); | |
2102 | mutex_exit(&dn->dn_mtx); | |
2103 | } | |
2104 | DB_DNODE_EXIT(db); | |
2105 | ||
2106 | if (db->db_state != DB_NOFILL) { | |
2107 | dbuf_unoverride(dr); | |
2108 | ||
2109 | ASSERT(db->db_buf != NULL); | |
2110 | ASSERT(dr->dt.dl.dr_data != NULL); | |
2111 | if (dr->dt.dl.dr_data != db->db_buf) | |
2112 | arc_buf_destroy(dr->dt.dl.dr_data, db); | |
2113 | } | |
2114 | ||
2115 | kmem_free(dr, sizeof (dbuf_dirty_record_t)); | |
2116 | ||
2117 | ASSERT(db->db_dirtycnt > 0); | |
2118 | db->db_dirtycnt -= 1; | |
2119 | ||
2120 | if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { | |
2121 | ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); | |
2122 | dbuf_destroy(db); | |
2123 | return (B_TRUE); | |
2124 | } | |
2125 | ||
2126 | return (B_FALSE); | |
2127 | } | |
2128 | ||
2129 | static void | |
2130 | dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx) | |
2131 | { | |
2132 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
2133 | ||
2134 | ASSERT(tx->tx_txg != 0); | |
2135 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
2136 | ||
2137 | /* | |
2138 | * Quick check for dirtyness. For already dirty blocks, this | |
2139 | * reduces runtime of this function by >90%, and overall performance | |
2140 | * by 50% for some workloads (e.g. file deletion with indirect blocks | |
2141 | * cached). | |
2142 | */ | |
2143 | mutex_enter(&db->db_mtx); | |
2144 | ||
2145 | dbuf_dirty_record_t *dr; | |
2146 | for (dr = db->db_last_dirty; | |
2147 | dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { | |
2148 | /* | |
2149 | * It's possible that it is already dirty but not cached, | |
2150 | * because there are some calls to dbuf_dirty() that don't | |
2151 | * go through dmu_buf_will_dirty(). | |
2152 | */ | |
2153 | if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { | |
2154 | /* This dbuf is already dirty and cached. */ | |
2155 | dbuf_redirty(dr); | |
2156 | mutex_exit(&db->db_mtx); | |
2157 | return; | |
2158 | } | |
2159 | } | |
2160 | mutex_exit(&db->db_mtx); | |
2161 | ||
2162 | DB_DNODE_ENTER(db); | |
2163 | if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) | |
2164 | flags |= DB_RF_HAVESTRUCT; | |
2165 | DB_DNODE_EXIT(db); | |
2166 | (void) dbuf_read(db, NULL, flags); | |
2167 | (void) dbuf_dirty(db, tx); | |
2168 | } | |
2169 | ||
2170 | void | |
2171 | dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) | |
2172 | { | |
2173 | dmu_buf_will_dirty_impl(db_fake, | |
2174 | DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx); | |
2175 | } | |
2176 | ||
2177 | void | |
2178 | dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) | |
2179 | { | |
2180 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
2181 | ||
2182 | db->db_state = DB_NOFILL; | |
2183 | ||
2184 | dmu_buf_will_fill(db_fake, tx); | |
2185 | } | |
2186 | ||
2187 | void | |
2188 | dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) | |
2189 | { | |
2190 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
2191 | ||
2192 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
2193 | ASSERT(tx->tx_txg != 0); | |
2194 | ASSERT(db->db_level == 0); | |
2195 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
2196 | ||
2197 | ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || | |
2198 | dmu_tx_private_ok(tx)); | |
2199 | ||
2200 | dbuf_noread(db); | |
2201 | (void) dbuf_dirty(db, tx); | |
2202 | } | |
2203 | ||
2204 | /* | |
2205 | * This function is effectively the same as dmu_buf_will_dirty(), but | |
2206 | * indicates the caller expects raw encrypted data in the db. It will | |
2207 | * also set the raw flag on the created dirty record. | |
2208 | */ | |
2209 | void | |
2210 | dmu_buf_will_change_crypt_params(dmu_buf_t *db_fake, dmu_tx_t *tx) | |
2211 | { | |
2212 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
2213 | dbuf_dirty_record_t *dr; | |
2214 | ||
2215 | dmu_buf_will_dirty_impl(db_fake, | |
2216 | DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx); | |
2217 | ||
2218 | dr = db->db_last_dirty; | |
2219 | while (dr != NULL && dr->dr_txg > tx->tx_txg) | |
2220 | dr = dr->dr_next; | |
2221 | ||
2222 | ASSERT3P(dr, !=, NULL); | |
2223 | ASSERT3U(dr->dr_txg, ==, tx->tx_txg); | |
2224 | dr->dt.dl.dr_raw = B_TRUE; | |
2225 | db->db_objset->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; | |
2226 | } | |
2227 | ||
2228 | #pragma weak dmu_buf_fill_done = dbuf_fill_done | |
2229 | /* ARGSUSED */ | |
2230 | void | |
2231 | dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) | |
2232 | { | |
2233 | mutex_enter(&db->db_mtx); | |
2234 | DBUF_VERIFY(db); | |
2235 | ||
2236 | if (db->db_state == DB_FILL) { | |
2237 | if (db->db_level == 0 && db->db_freed_in_flight) { | |
2238 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
2239 | /* we were freed while filling */ | |
2240 | /* XXX dbuf_undirty? */ | |
2241 | bzero(db->db.db_data, db->db.db_size); | |
2242 | db->db_freed_in_flight = FALSE; | |
2243 | } | |
2244 | db->db_state = DB_CACHED; | |
2245 | cv_broadcast(&db->db_changed); | |
2246 | } | |
2247 | mutex_exit(&db->db_mtx); | |
2248 | } | |
2249 | ||
2250 | void | |
2251 | dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, | |
2252 | bp_embedded_type_t etype, enum zio_compress comp, | |
2253 | int uncompressed_size, int compressed_size, int byteorder, | |
2254 | dmu_tx_t *tx) | |
2255 | { | |
2256 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; | |
2257 | struct dirty_leaf *dl; | |
2258 | dmu_object_type_t type; | |
2259 | ||
2260 | if (etype == BP_EMBEDDED_TYPE_DATA) { | |
2261 | ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), | |
2262 | SPA_FEATURE_EMBEDDED_DATA)); | |
2263 | } | |
2264 | ||
2265 | DB_DNODE_ENTER(db); | |
2266 | type = DB_DNODE(db)->dn_type; | |
2267 | DB_DNODE_EXIT(db); | |
2268 | ||
2269 | ASSERT0(db->db_level); | |
2270 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
2271 | ||
2272 | dmu_buf_will_not_fill(dbuf, tx); | |
2273 | ||
2274 | ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); | |
2275 | dl = &db->db_last_dirty->dt.dl; | |
2276 | encode_embedded_bp_compressed(&dl->dr_overridden_by, | |
2277 | data, comp, uncompressed_size, compressed_size); | |
2278 | BPE_SET_ETYPE(&dl->dr_overridden_by, etype); | |
2279 | BP_SET_TYPE(&dl->dr_overridden_by, type); | |
2280 | BP_SET_LEVEL(&dl->dr_overridden_by, 0); | |
2281 | BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); | |
2282 | ||
2283 | dl->dr_override_state = DR_OVERRIDDEN; | |
2284 | dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; | |
2285 | } | |
2286 | ||
2287 | /* | |
2288 | * Directly assign a provided arc buf to a given dbuf if it's not referenced | |
2289 | * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. | |
2290 | */ | |
2291 | void | |
2292 | dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) | |
2293 | { | |
2294 | ASSERT(!refcount_is_zero(&db->db_holds)); | |
2295 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
2296 | ASSERT(db->db_level == 0); | |
2297 | ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); | |
2298 | ASSERT(buf != NULL); | |
2299 | ASSERT(arc_buf_lsize(buf) == db->db.db_size); | |
2300 | ASSERT(tx->tx_txg != 0); | |
2301 | ||
2302 | arc_return_buf(buf, db); | |
2303 | ASSERT(arc_released(buf)); | |
2304 | ||
2305 | mutex_enter(&db->db_mtx); | |
2306 | ||
2307 | while (db->db_state == DB_READ || db->db_state == DB_FILL) | |
2308 | cv_wait(&db->db_changed, &db->db_mtx); | |
2309 | ||
2310 | ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); | |
2311 | ||
2312 | if (db->db_state == DB_CACHED && | |
2313 | refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { | |
2314 | /* | |
2315 | * In practice, we will never have a case where we have an | |
2316 | * encrypted arc buffer while additional holds exist on the | |
2317 | * dbuf. We don't handle this here so we simply assert that | |
2318 | * fact instead. | |
2319 | */ | |
2320 | ASSERT(!arc_is_encrypted(buf)); | |
2321 | mutex_exit(&db->db_mtx); | |
2322 | (void) dbuf_dirty(db, tx); | |
2323 | bcopy(buf->b_data, db->db.db_data, db->db.db_size); | |
2324 | arc_buf_destroy(buf, db); | |
2325 | xuio_stat_wbuf_copied(); | |
2326 | return; | |
2327 | } | |
2328 | ||
2329 | xuio_stat_wbuf_nocopy(); | |
2330 | if (db->db_state == DB_CACHED) { | |
2331 | dbuf_dirty_record_t *dr = db->db_last_dirty; | |
2332 | ||
2333 | ASSERT(db->db_buf != NULL); | |
2334 | if (dr != NULL && dr->dr_txg == tx->tx_txg) { | |
2335 | ASSERT(dr->dt.dl.dr_data == db->db_buf); | |
2336 | IMPLY(arc_is_encrypted(buf), dr->dt.dl.dr_raw); | |
2337 | ||
2338 | if (!arc_released(db->db_buf)) { | |
2339 | ASSERT(dr->dt.dl.dr_override_state == | |
2340 | DR_OVERRIDDEN); | |
2341 | arc_release(db->db_buf, db); | |
2342 | } | |
2343 | dr->dt.dl.dr_data = buf; | |
2344 | arc_buf_destroy(db->db_buf, db); | |
2345 | } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { | |
2346 | arc_release(db->db_buf, db); | |
2347 | arc_buf_destroy(db->db_buf, db); | |
2348 | } | |
2349 | db->db_buf = NULL; | |
2350 | } | |
2351 | ASSERT(db->db_buf == NULL); | |
2352 | dbuf_set_data(db, buf); | |
2353 | db->db_state = DB_FILL; | |
2354 | mutex_exit(&db->db_mtx); | |
2355 | (void) dbuf_dirty(db, tx); | |
2356 | dmu_buf_fill_done(&db->db, tx); | |
2357 | } | |
2358 | ||
2359 | void | |
2360 | dbuf_destroy(dmu_buf_impl_t *db) | |
2361 | { | |
2362 | dnode_t *dn; | |
2363 | dmu_buf_impl_t *parent = db->db_parent; | |
2364 | dmu_buf_impl_t *dndb; | |
2365 | ||
2366 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
2367 | ASSERT(refcount_is_zero(&db->db_holds)); | |
2368 | ||
2369 | if (db->db_buf != NULL) { | |
2370 | arc_buf_destroy(db->db_buf, db); | |
2371 | db->db_buf = NULL; | |
2372 | } | |
2373 | ||
2374 | if (db->db_blkid == DMU_BONUS_BLKID) { | |
2375 | int slots = DB_DNODE(db)->dn_num_slots; | |
2376 | int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); | |
2377 | if (db->db.db_data != NULL) { | |
2378 | kmem_free(db->db.db_data, bonuslen); | |
2379 | arc_space_return(bonuslen, ARC_SPACE_BONUS); | |
2380 | db->db_state = DB_UNCACHED; | |
2381 | } | |
2382 | } | |
2383 | ||
2384 | dbuf_clear_data(db); | |
2385 | ||
2386 | if (multilist_link_active(&db->db_cache_link)) { | |
2387 | multilist_remove(dbuf_cache, db); | |
2388 | (void) refcount_remove_many(&dbuf_cache_size, | |
2389 | db->db.db_size, db); | |
2390 | DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); | |
2391 | DBUF_STAT_BUMPDOWN(cache_count); | |
2392 | DBUF_STAT_DECR(cache_levels_bytes[db->db_level], | |
2393 | db->db.db_size); | |
2394 | } | |
2395 | ||
2396 | ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); | |
2397 | ASSERT(db->db_data_pending == NULL); | |
2398 | ||
2399 | db->db_state = DB_EVICTING; | |
2400 | db->db_blkptr = NULL; | |
2401 | ||
2402 | /* | |
2403 | * Now that db_state is DB_EVICTING, nobody else can find this via | |
2404 | * the hash table. We can now drop db_mtx, which allows us to | |
2405 | * acquire the dn_dbufs_mtx. | |
2406 | */ | |
2407 | mutex_exit(&db->db_mtx); | |
2408 | ||
2409 | DB_DNODE_ENTER(db); | |
2410 | dn = DB_DNODE(db); | |
2411 | dndb = dn->dn_dbuf; | |
2412 | if (db->db_blkid != DMU_BONUS_BLKID) { | |
2413 | boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); | |
2414 | if (needlock) | |
2415 | mutex_enter(&dn->dn_dbufs_mtx); | |
2416 | avl_remove(&dn->dn_dbufs, db); | |
2417 | atomic_dec_32(&dn->dn_dbufs_count); | |
2418 | membar_producer(); | |
2419 | DB_DNODE_EXIT(db); | |
2420 | if (needlock) | |
2421 | mutex_exit(&dn->dn_dbufs_mtx); | |
2422 | /* | |
2423 | * Decrementing the dbuf count means that the hold corresponding | |
2424 | * to the removed dbuf is no longer discounted in dnode_move(), | |
2425 | * so the dnode cannot be moved until after we release the hold. | |
2426 | * The membar_producer() ensures visibility of the decremented | |
2427 | * value in dnode_move(), since DB_DNODE_EXIT doesn't actually | |
2428 | * release any lock. | |
2429 | */ | |
2430 | dnode_rele(dn, db); | |
2431 | db->db_dnode_handle = NULL; | |
2432 | ||
2433 | dbuf_hash_remove(db); | |
2434 | } else { | |
2435 | DB_DNODE_EXIT(db); | |
2436 | } | |
2437 | ||
2438 | ASSERT(refcount_is_zero(&db->db_holds)); | |
2439 | ||
2440 | db->db_parent = NULL; | |
2441 | ||
2442 | ASSERT(db->db_buf == NULL); | |
2443 | ASSERT(db->db.db_data == NULL); | |
2444 | ASSERT(db->db_hash_next == NULL); | |
2445 | ASSERT(db->db_blkptr == NULL); | |
2446 | ASSERT(db->db_data_pending == NULL); | |
2447 | ASSERT(!multilist_link_active(&db->db_cache_link)); | |
2448 | ||
2449 | kmem_cache_free(dbuf_kmem_cache, db); | |
2450 | arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); | |
2451 | ||
2452 | /* | |
2453 | * If this dbuf is referenced from an indirect dbuf, | |
2454 | * decrement the ref count on the indirect dbuf. | |
2455 | */ | |
2456 | if (parent && parent != dndb) | |
2457 | dbuf_rele(parent, db); | |
2458 | } | |
2459 | ||
2460 | /* | |
2461 | * Note: While bpp will always be updated if the function returns success, | |
2462 | * parentp will not be updated if the dnode does not have dn_dbuf filled in; | |
2463 | * this happens when the dnode is the meta-dnode, or {user|group|project}used | |
2464 | * object. | |
2465 | */ | |
2466 | __attribute__((always_inline)) | |
2467 | static inline int | |
2468 | dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, | |
2469 | dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh) | |
2470 | { | |
2471 | *parentp = NULL; | |
2472 | *bpp = NULL; | |
2473 | ||
2474 | ASSERT(blkid != DMU_BONUS_BLKID); | |
2475 | ||
2476 | if (blkid == DMU_SPILL_BLKID) { | |
2477 | mutex_enter(&dn->dn_mtx); | |
2478 | if (dn->dn_have_spill && | |
2479 | (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) | |
2480 | *bpp = DN_SPILL_BLKPTR(dn->dn_phys); | |
2481 | else | |
2482 | *bpp = NULL; | |
2483 | dbuf_add_ref(dn->dn_dbuf, NULL); | |
2484 | *parentp = dn->dn_dbuf; | |
2485 | mutex_exit(&dn->dn_mtx); | |
2486 | return (0); | |
2487 | } | |
2488 | ||
2489 | int nlevels = | |
2490 | (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; | |
2491 | int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
2492 | ||
2493 | ASSERT3U(level * epbs, <, 64); | |
2494 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); | |
2495 | /* | |
2496 | * This assertion shouldn't trip as long as the max indirect block size | |
2497 | * is less than 1M. The reason for this is that up to that point, | |
2498 | * the number of levels required to address an entire object with blocks | |
2499 | * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In | |
2500 | * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 | |
2501 | * (i.e. we can address the entire object), objects will all use at most | |
2502 | * N-1 levels and the assertion won't overflow. However, once epbs is | |
2503 | * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be | |
2504 | * enough to address an entire object, so objects will have 5 levels, | |
2505 | * but then this assertion will overflow. | |
2506 | * | |
2507 | * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we | |
2508 | * need to redo this logic to handle overflows. | |
2509 | */ | |
2510 | ASSERT(level >= nlevels || | |
2511 | ((nlevels - level - 1) * epbs) + | |
2512 | highbit64(dn->dn_phys->dn_nblkptr) <= 64); | |
2513 | if (level >= nlevels || | |
2514 | blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << | |
2515 | ((nlevels - level - 1) * epbs)) || | |
2516 | (fail_sparse && | |
2517 | blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { | |
2518 | /* the buffer has no parent yet */ | |
2519 | return (SET_ERROR(ENOENT)); | |
2520 | } else if (level < nlevels-1) { | |
2521 | /* this block is referenced from an indirect block */ | |
2522 | int err; | |
2523 | if (dh == NULL) { | |
2524 | err = dbuf_hold_impl(dn, level+1, | |
2525 | blkid >> epbs, fail_sparse, FALSE, NULL, parentp); | |
2526 | } else { | |
2527 | __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1, | |
2528 | blkid >> epbs, fail_sparse, FALSE, NULL, | |
2529 | parentp, dh->dh_depth + 1); | |
2530 | err = __dbuf_hold_impl(dh + 1); | |
2531 | } | |
2532 | if (err) | |
2533 | return (err); | |
2534 | err = dbuf_read(*parentp, NULL, | |
2535 | (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); | |
2536 | if (err) { | |
2537 | dbuf_rele(*parentp, NULL); | |
2538 | *parentp = NULL; | |
2539 | return (err); | |
2540 | } | |
2541 | *bpp = ((blkptr_t *)(*parentp)->db.db_data) + | |
2542 | (blkid & ((1ULL << epbs) - 1)); | |
2543 | if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) | |
2544 | ASSERT(BP_IS_HOLE(*bpp)); | |
2545 | return (0); | |
2546 | } else { | |
2547 | /* the block is referenced from the dnode */ | |
2548 | ASSERT3U(level, ==, nlevels-1); | |
2549 | ASSERT(dn->dn_phys->dn_nblkptr == 0 || | |
2550 | blkid < dn->dn_phys->dn_nblkptr); | |
2551 | if (dn->dn_dbuf) { | |
2552 | dbuf_add_ref(dn->dn_dbuf, NULL); | |
2553 | *parentp = dn->dn_dbuf; | |
2554 | } | |
2555 | *bpp = &dn->dn_phys->dn_blkptr[blkid]; | |
2556 | return (0); | |
2557 | } | |
2558 | } | |
2559 | ||
2560 | static dmu_buf_impl_t * | |
2561 | dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, | |
2562 | dmu_buf_impl_t *parent, blkptr_t *blkptr) | |
2563 | { | |
2564 | objset_t *os = dn->dn_objset; | |
2565 | dmu_buf_impl_t *db, *odb; | |
2566 | ||
2567 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); | |
2568 | ASSERT(dn->dn_type != DMU_OT_NONE); | |
2569 | ||
2570 | db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); | |
2571 | ||
2572 | db->db_objset = os; | |
2573 | db->db.db_object = dn->dn_object; | |
2574 | db->db_level = level; | |
2575 | db->db_blkid = blkid; | |
2576 | db->db_last_dirty = NULL; | |
2577 | db->db_dirtycnt = 0; | |
2578 | db->db_dnode_handle = dn->dn_handle; | |
2579 | db->db_parent = parent; | |
2580 | db->db_blkptr = blkptr; | |
2581 | ||
2582 | db->db_user = NULL; | |
2583 | db->db_user_immediate_evict = FALSE; | |
2584 | db->db_freed_in_flight = FALSE; | |
2585 | db->db_pending_evict = FALSE; | |
2586 | ||
2587 | if (blkid == DMU_BONUS_BLKID) { | |
2588 | ASSERT3P(parent, ==, dn->dn_dbuf); | |
2589 | db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - | |
2590 | (dn->dn_nblkptr-1) * sizeof (blkptr_t); | |
2591 | ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); | |
2592 | db->db.db_offset = DMU_BONUS_BLKID; | |
2593 | db->db_state = DB_UNCACHED; | |
2594 | /* the bonus dbuf is not placed in the hash table */ | |
2595 | arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); | |
2596 | return (db); | |
2597 | } else if (blkid == DMU_SPILL_BLKID) { | |
2598 | db->db.db_size = (blkptr != NULL) ? | |
2599 | BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; | |
2600 | db->db.db_offset = 0; | |
2601 | } else { | |
2602 | int blocksize = | |
2603 | db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; | |
2604 | db->db.db_size = blocksize; | |
2605 | db->db.db_offset = db->db_blkid * blocksize; | |
2606 | } | |
2607 | ||
2608 | /* | |
2609 | * Hold the dn_dbufs_mtx while we get the new dbuf | |
2610 | * in the hash table *and* added to the dbufs list. | |
2611 | * This prevents a possible deadlock with someone | |
2612 | * trying to look up this dbuf before its added to the | |
2613 | * dn_dbufs list. | |
2614 | */ | |
2615 | mutex_enter(&dn->dn_dbufs_mtx); | |
2616 | db->db_state = DB_EVICTING; | |
2617 | if ((odb = dbuf_hash_insert(db)) != NULL) { | |
2618 | /* someone else inserted it first */ | |
2619 | kmem_cache_free(dbuf_kmem_cache, db); | |
2620 | mutex_exit(&dn->dn_dbufs_mtx); | |
2621 | DBUF_STAT_BUMP(hash_insert_race); | |
2622 | return (odb); | |
2623 | } | |
2624 | avl_add(&dn->dn_dbufs, db); | |
2625 | ||
2626 | db->db_state = DB_UNCACHED; | |
2627 | mutex_exit(&dn->dn_dbufs_mtx); | |
2628 | arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); | |
2629 | ||
2630 | if (parent && parent != dn->dn_dbuf) | |
2631 | dbuf_add_ref(parent, db); | |
2632 | ||
2633 | ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || | |
2634 | refcount_count(&dn->dn_holds) > 0); | |
2635 | (void) refcount_add(&dn->dn_holds, db); | |
2636 | atomic_inc_32(&dn->dn_dbufs_count); | |
2637 | ||
2638 | dprintf_dbuf(db, "db=%p\n", db); | |
2639 | ||
2640 | return (db); | |
2641 | } | |
2642 | ||
2643 | typedef struct dbuf_prefetch_arg { | |
2644 | spa_t *dpa_spa; /* The spa to issue the prefetch in. */ | |
2645 | zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ | |
2646 | int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ | |
2647 | int dpa_curlevel; /* The current level that we're reading */ | |
2648 | dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ | |
2649 | zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ | |
2650 | zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ | |
2651 | arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ | |
2652 | } dbuf_prefetch_arg_t; | |
2653 | ||
2654 | /* | |
2655 | * Actually issue the prefetch read for the block given. | |
2656 | */ | |
2657 | static void | |
2658 | dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) | |
2659 | { | |
2660 | if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) | |
2661 | return; | |
2662 | ||
2663 | int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; | |
2664 | arc_flags_t aflags = | |
2665 | dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; | |
2666 | ||
2667 | /* dnodes are always read as raw and then converted later */ | |
2668 | if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) && | |
2669 | dpa->dpa_curlevel == 0) | |
2670 | zio_flags |= ZIO_FLAG_RAW; | |
2671 | ||
2672 | ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); | |
2673 | ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); | |
2674 | ASSERT(dpa->dpa_zio != NULL); | |
2675 | (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, | |
2676 | dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb); | |
2677 | } | |
2678 | ||
2679 | /* | |
2680 | * Called when an indirect block above our prefetch target is read in. This | |
2681 | * will either read in the next indirect block down the tree or issue the actual | |
2682 | * prefetch if the next block down is our target. | |
2683 | */ | |
2684 | static void | |
2685 | dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, | |
2686 | const blkptr_t *iobp, arc_buf_t *abuf, void *private) | |
2687 | { | |
2688 | dbuf_prefetch_arg_t *dpa = private; | |
2689 | ||
2690 | ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); | |
2691 | ASSERT3S(dpa->dpa_curlevel, >, 0); | |
2692 | ||
2693 | /* | |
2694 | * The dpa_dnode is only valid if we are called with a NULL | |
2695 | * zio. This indicates that the arc_read() returned without | |
2696 | * first calling zio_read() to issue a physical read. Once | |
2697 | * a physical read is made the dpa_dnode must be invalidated | |
2698 | * as the locks guarding it may have been dropped. If the | |
2699 | * dpa_dnode is still valid, then we want to add it to the dbuf | |
2700 | * cache. To do so, we must hold the dbuf associated with the block | |
2701 | * we just prefetched, read its contents so that we associate it | |
2702 | * with an arc_buf_t, and then release it. | |
2703 | */ | |
2704 | if (zio != NULL) { | |
2705 | ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); | |
2706 | if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) { | |
2707 | ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); | |
2708 | } else { | |
2709 | ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); | |
2710 | } | |
2711 | ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); | |
2712 | ||
2713 | dpa->dpa_dnode = NULL; | |
2714 | } else if (dpa->dpa_dnode != NULL) { | |
2715 | uint64_t curblkid = dpa->dpa_zb.zb_blkid >> | |
2716 | (dpa->dpa_epbs * (dpa->dpa_curlevel - | |
2717 | dpa->dpa_zb.zb_level)); | |
2718 | dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, | |
2719 | dpa->dpa_curlevel, curblkid, FTAG); | |
2720 | (void) dbuf_read(db, NULL, | |
2721 | DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); | |
2722 | dbuf_rele(db, FTAG); | |
2723 | } | |
2724 | ||
2725 | if (abuf == NULL) { | |
2726 | kmem_free(dpa, sizeof (*dpa)); | |
2727 | return; | |
2728 | } | |
2729 | ||
2730 | dpa->dpa_curlevel--; | |
2731 | uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> | |
2732 | (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); | |
2733 | blkptr_t *bp = ((blkptr_t *)abuf->b_data) + | |
2734 | P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); | |
2735 | ||
2736 | if (BP_IS_HOLE(bp)) { | |
2737 | kmem_free(dpa, sizeof (*dpa)); | |
2738 | } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { | |
2739 | ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); | |
2740 | dbuf_issue_final_prefetch(dpa, bp); | |
2741 | kmem_free(dpa, sizeof (*dpa)); | |
2742 | } else { | |
2743 | arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; | |
2744 | zbookmark_phys_t zb; | |
2745 | ||
2746 | /* flag if L2ARC eligible, l2arc_noprefetch then decides */ | |
2747 | if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) | |
2748 | iter_aflags |= ARC_FLAG_L2CACHE; | |
2749 | ||
2750 | ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); | |
2751 | ||
2752 | SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, | |
2753 | dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); | |
2754 | ||
2755 | (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, | |
2756 | bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, | |
2757 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, | |
2758 | &iter_aflags, &zb); | |
2759 | } | |
2760 | ||
2761 | arc_buf_destroy(abuf, private); | |
2762 | } | |
2763 | ||
2764 | /* | |
2765 | * Issue prefetch reads for the given block on the given level. If the indirect | |
2766 | * blocks above that block are not in memory, we will read them in | |
2767 | * asynchronously. As a result, this call never blocks waiting for a read to | |
2768 | * complete. Note that the prefetch might fail if the dataset is encrypted and | |
2769 | * the encryption key is unmapped before the IO completes. | |
2770 | */ | |
2771 | void | |
2772 | dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, | |
2773 | arc_flags_t aflags) | |
2774 | { | |
2775 | blkptr_t bp; | |
2776 | int epbs, nlevels, curlevel; | |
2777 | uint64_t curblkid; | |
2778 | ||
2779 | ASSERT(blkid != DMU_BONUS_BLKID); | |
2780 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); | |
2781 | ||
2782 | if (blkid > dn->dn_maxblkid) | |
2783 | return; | |
2784 | ||
2785 | if (dnode_block_freed(dn, blkid)) | |
2786 | return; | |
2787 | ||
2788 | /* | |
2789 | * This dnode hasn't been written to disk yet, so there's nothing to | |
2790 | * prefetch. | |
2791 | */ | |
2792 | nlevels = dn->dn_phys->dn_nlevels; | |
2793 | if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) | |
2794 | return; | |
2795 | ||
2796 | epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
2797 | if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) | |
2798 | return; | |
2799 | ||
2800 | dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, | |
2801 | level, blkid); | |
2802 | if (db != NULL) { | |
2803 | mutex_exit(&db->db_mtx); | |
2804 | /* | |
2805 | * This dbuf already exists. It is either CACHED, or | |
2806 | * (we assume) about to be read or filled. | |
2807 | */ | |
2808 | return; | |
2809 | } | |
2810 | ||
2811 | /* | |
2812 | * Find the closest ancestor (indirect block) of the target block | |
2813 | * that is present in the cache. In this indirect block, we will | |
2814 | * find the bp that is at curlevel, curblkid. | |
2815 | */ | |
2816 | curlevel = level; | |
2817 | curblkid = blkid; | |
2818 | while (curlevel < nlevels - 1) { | |
2819 | int parent_level = curlevel + 1; | |
2820 | uint64_t parent_blkid = curblkid >> epbs; | |
2821 | dmu_buf_impl_t *db; | |
2822 | ||
2823 | if (dbuf_hold_impl(dn, parent_level, parent_blkid, | |
2824 | FALSE, TRUE, FTAG, &db) == 0) { | |
2825 | blkptr_t *bpp = db->db_buf->b_data; | |
2826 | bp = bpp[P2PHASE(curblkid, 1 << epbs)]; | |
2827 | dbuf_rele(db, FTAG); | |
2828 | break; | |
2829 | } | |
2830 | ||
2831 | curlevel = parent_level; | |
2832 | curblkid = parent_blkid; | |
2833 | } | |
2834 | ||
2835 | if (curlevel == nlevels - 1) { | |
2836 | /* No cached indirect blocks found. */ | |
2837 | ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); | |
2838 | bp = dn->dn_phys->dn_blkptr[curblkid]; | |
2839 | } | |
2840 | if (BP_IS_HOLE(&bp)) | |
2841 | return; | |
2842 | ||
2843 | ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); | |
2844 | ||
2845 | zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, | |
2846 | ZIO_FLAG_CANFAIL); | |
2847 | ||
2848 | dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); | |
2849 | dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; | |
2850 | SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, | |
2851 | dn->dn_object, level, blkid); | |
2852 | dpa->dpa_curlevel = curlevel; | |
2853 | dpa->dpa_prio = prio; | |
2854 | dpa->dpa_aflags = aflags; | |
2855 | dpa->dpa_spa = dn->dn_objset->os_spa; | |
2856 | dpa->dpa_dnode = dn; | |
2857 | dpa->dpa_epbs = epbs; | |
2858 | dpa->dpa_zio = pio; | |
2859 | ||
2860 | /* flag if L2ARC eligible, l2arc_noprefetch then decides */ | |
2861 | if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) | |
2862 | dpa->dpa_aflags |= ARC_FLAG_L2CACHE; | |
2863 | ||
2864 | /* | |
2865 | * If we have the indirect just above us, no need to do the asynchronous | |
2866 | * prefetch chain; we'll just run the last step ourselves. If we're at | |
2867 | * a higher level, though, we want to issue the prefetches for all the | |
2868 | * indirect blocks asynchronously, so we can go on with whatever we were | |
2869 | * doing. | |
2870 | */ | |
2871 | if (curlevel == level) { | |
2872 | ASSERT3U(curblkid, ==, blkid); | |
2873 | dbuf_issue_final_prefetch(dpa, &bp); | |
2874 | kmem_free(dpa, sizeof (*dpa)); | |
2875 | } else { | |
2876 | arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; | |
2877 | zbookmark_phys_t zb; | |
2878 | ||
2879 | /* flag if L2ARC eligible, l2arc_noprefetch then decides */ | |
2880 | if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) | |
2881 | iter_aflags |= ARC_FLAG_L2CACHE; | |
2882 | ||
2883 | SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, | |
2884 | dn->dn_object, curlevel, curblkid); | |
2885 | (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, | |
2886 | &bp, dbuf_prefetch_indirect_done, dpa, prio, | |
2887 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, | |
2888 | &iter_aflags, &zb); | |
2889 | } | |
2890 | /* | |
2891 | * We use pio here instead of dpa_zio since it's possible that | |
2892 | * dpa may have already been freed. | |
2893 | */ | |
2894 | zio_nowait(pio); | |
2895 | } | |
2896 | ||
2897 | #define DBUF_HOLD_IMPL_MAX_DEPTH 20 | |
2898 | ||
2899 | /* | |
2900 | * Helper function for __dbuf_hold_impl() to copy a buffer. Handles | |
2901 | * the case of encrypted, compressed and uncompressed buffers by | |
2902 | * allocating the new buffer, respectively, with arc_alloc_raw_buf(), | |
2903 | * arc_alloc_compressed_buf() or arc_alloc_buf().* | |
2904 | * | |
2905 | * NOTE: Declared noinline to avoid stack bloat in __dbuf_hold_impl(). | |
2906 | */ | |
2907 | noinline static void | |
2908 | dbuf_hold_copy(struct dbuf_hold_impl_data *dh) | |
2909 | { | |
2910 | dnode_t *dn = dh->dh_dn; | |
2911 | dmu_buf_impl_t *db = dh->dh_db; | |
2912 | dbuf_dirty_record_t *dr = dh->dh_dr; | |
2913 | arc_buf_t *data = dr->dt.dl.dr_data; | |
2914 | ||
2915 | enum zio_compress compress_type = arc_get_compression(data); | |
2916 | ||
2917 | if (arc_is_encrypted(data)) { | |
2918 | boolean_t byteorder; | |
2919 | uint8_t salt[ZIO_DATA_SALT_LEN]; | |
2920 | uint8_t iv[ZIO_DATA_IV_LEN]; | |
2921 | uint8_t mac[ZIO_DATA_MAC_LEN]; | |
2922 | ||
2923 | arc_get_raw_params(data, &byteorder, salt, iv, mac); | |
2924 | dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db, | |
2925 | dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac, | |
2926 | dn->dn_type, arc_buf_size(data), arc_buf_lsize(data), | |
2927 | compress_type)); | |
2928 | } else if (compress_type != ZIO_COMPRESS_OFF) { | |
2929 | dbuf_set_data(db, arc_alloc_compressed_buf( | |
2930 | dn->dn_objset->os_spa, db, arc_buf_size(data), | |
2931 | arc_buf_lsize(data), compress_type)); | |
2932 | } else { | |
2933 | dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db, | |
2934 | DBUF_GET_BUFC_TYPE(db), db->db.db_size)); | |
2935 | } | |
2936 | ||
2937 | bcopy(data->b_data, db->db.db_data, arc_buf_size(data)); | |
2938 | } | |
2939 | ||
2940 | /* | |
2941 | * Returns with db_holds incremented, and db_mtx not held. | |
2942 | * Note: dn_struct_rwlock must be held. | |
2943 | */ | |
2944 | static int | |
2945 | __dbuf_hold_impl(struct dbuf_hold_impl_data *dh) | |
2946 | { | |
2947 | ASSERT3S(dh->dh_depth, <, DBUF_HOLD_IMPL_MAX_DEPTH); | |
2948 | dh->dh_parent = NULL; | |
2949 | ||
2950 | ASSERT(dh->dh_blkid != DMU_BONUS_BLKID); | |
2951 | ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock)); | |
2952 | ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level); | |
2953 | ||
2954 | *(dh->dh_dbp) = NULL; | |
2955 | ||
2956 | /* dbuf_find() returns with db_mtx held */ | |
2957 | dh->dh_db = dbuf_find(dh->dh_dn->dn_objset, dh->dh_dn->dn_object, | |
2958 | dh->dh_level, dh->dh_blkid); | |
2959 | ||
2960 | if (dh->dh_db == NULL) { | |
2961 | dh->dh_bp = NULL; | |
2962 | ||
2963 | if (dh->dh_fail_uncached) | |
2964 | return (SET_ERROR(ENOENT)); | |
2965 | ||
2966 | ASSERT3P(dh->dh_parent, ==, NULL); | |
2967 | dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid, | |
2968 | dh->dh_fail_sparse, &dh->dh_parent, &dh->dh_bp, dh); | |
2969 | if (dh->dh_fail_sparse) { | |
2970 | if (dh->dh_err == 0 && | |
2971 | dh->dh_bp && BP_IS_HOLE(dh->dh_bp)) | |
2972 | dh->dh_err = SET_ERROR(ENOENT); | |
2973 | if (dh->dh_err) { | |
2974 | if (dh->dh_parent) | |
2975 | dbuf_rele(dh->dh_parent, NULL); | |
2976 | return (dh->dh_err); | |
2977 | } | |
2978 | } | |
2979 | if (dh->dh_err && dh->dh_err != ENOENT) | |
2980 | return (dh->dh_err); | |
2981 | dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid, | |
2982 | dh->dh_parent, dh->dh_bp); | |
2983 | } | |
2984 | ||
2985 | if (dh->dh_fail_uncached && dh->dh_db->db_state != DB_CACHED) { | |
2986 | mutex_exit(&dh->dh_db->db_mtx); | |
2987 | return (SET_ERROR(ENOENT)); | |
2988 | } | |
2989 | ||
2990 | if (dh->dh_db->db_buf != NULL) { | |
2991 | arc_buf_access(dh->dh_db->db_buf); | |
2992 | ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data); | |
2993 | } | |
2994 | ||
2995 | ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf)); | |
2996 | ||
2997 | /* | |
2998 | * If this buffer is currently syncing out, and we are are | |
2999 | * still referencing it from db_data, we need to make a copy | |
3000 | * of it in case we decide we want to dirty it again in this txg. | |
3001 | */ | |
3002 | if (dh->dh_db->db_level == 0 && | |
3003 | dh->dh_db->db_blkid != DMU_BONUS_BLKID && | |
3004 | dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT && | |
3005 | dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) { | |
3006 | dh->dh_dr = dh->dh_db->db_data_pending; | |
3007 | if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf) | |
3008 | dbuf_hold_copy(dh); | |
3009 | } | |
3010 | ||
3011 | if (multilist_link_active(&dh->dh_db->db_cache_link)) { | |
3012 | ASSERT(refcount_is_zero(&dh->dh_db->db_holds)); | |
3013 | multilist_remove(dbuf_cache, dh->dh_db); | |
3014 | (void) refcount_remove_many(&dbuf_cache_size, | |
3015 | dh->dh_db->db.db_size, dh->dh_db); | |
3016 | DBUF_STAT_BUMPDOWN(cache_levels[dh->dh_db->db_level]); | |
3017 | DBUF_STAT_BUMPDOWN(cache_count); | |
3018 | DBUF_STAT_DECR(cache_levels_bytes[dh->dh_db->db_level], | |
3019 | dh->dh_db->db.db_size); | |
3020 | } | |
3021 | (void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag); | |
3022 | DBUF_VERIFY(dh->dh_db); | |
3023 | mutex_exit(&dh->dh_db->db_mtx); | |
3024 | ||
3025 | /* NOTE: we can't rele the parent until after we drop the db_mtx */ | |
3026 | if (dh->dh_parent) | |
3027 | dbuf_rele(dh->dh_parent, NULL); | |
3028 | ||
3029 | ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn); | |
3030 | ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid); | |
3031 | ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level); | |
3032 | *(dh->dh_dbp) = dh->dh_db; | |
3033 | ||
3034 | return (0); | |
3035 | } | |
3036 | ||
3037 | /* | |
3038 | * The following code preserves the recursive function dbuf_hold_impl() | |
3039 | * but moves the local variables AND function arguments to the heap to | |
3040 | * minimize the stack frame size. Enough space is initially allocated | |
3041 | * on the stack for 20 levels of recursion. | |
3042 | */ | |
3043 | int | |
3044 | dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, | |
3045 | boolean_t fail_sparse, boolean_t fail_uncached, | |
3046 | void *tag, dmu_buf_impl_t **dbp) | |
3047 | { | |
3048 | struct dbuf_hold_impl_data *dh; | |
3049 | int error; | |
3050 | ||
3051 | dh = kmem_alloc(sizeof (struct dbuf_hold_impl_data) * | |
3052 | DBUF_HOLD_IMPL_MAX_DEPTH, KM_SLEEP); | |
3053 | __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, | |
3054 | fail_uncached, tag, dbp, 0); | |
3055 | ||
3056 | error = __dbuf_hold_impl(dh); | |
3057 | ||
3058 | kmem_free(dh, sizeof (struct dbuf_hold_impl_data) * | |
3059 | DBUF_HOLD_IMPL_MAX_DEPTH); | |
3060 | ||
3061 | return (error); | |
3062 | } | |
3063 | ||
3064 | static void | |
3065 | __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh, | |
3066 | dnode_t *dn, uint8_t level, uint64_t blkid, | |
3067 | boolean_t fail_sparse, boolean_t fail_uncached, | |
3068 | void *tag, dmu_buf_impl_t **dbp, int depth) | |
3069 | { | |
3070 | dh->dh_dn = dn; | |
3071 | dh->dh_level = level; | |
3072 | dh->dh_blkid = blkid; | |
3073 | ||
3074 | dh->dh_fail_sparse = fail_sparse; | |
3075 | dh->dh_fail_uncached = fail_uncached; | |
3076 | ||
3077 | dh->dh_tag = tag; | |
3078 | dh->dh_dbp = dbp; | |
3079 | ||
3080 | dh->dh_db = NULL; | |
3081 | dh->dh_parent = NULL; | |
3082 | dh->dh_bp = NULL; | |
3083 | dh->dh_err = 0; | |
3084 | dh->dh_dr = NULL; | |
3085 | ||
3086 | dh->dh_depth = depth; | |
3087 | } | |
3088 | ||
3089 | dmu_buf_impl_t * | |
3090 | dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) | |
3091 | { | |
3092 | return (dbuf_hold_level(dn, 0, blkid, tag)); | |
3093 | } | |
3094 | ||
3095 | dmu_buf_impl_t * | |
3096 | dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) | |
3097 | { | |
3098 | dmu_buf_impl_t *db; | |
3099 | int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); | |
3100 | return (err ? NULL : db); | |
3101 | } | |
3102 | ||
3103 | void | |
3104 | dbuf_create_bonus(dnode_t *dn) | |
3105 | { | |
3106 | ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); | |
3107 | ||
3108 | ASSERT(dn->dn_bonus == NULL); | |
3109 | dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); | |
3110 | } | |
3111 | ||
3112 | int | |
3113 | dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) | |
3114 | { | |
3115 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
3116 | dnode_t *dn; | |
3117 | ||
3118 | if (db->db_blkid != DMU_SPILL_BLKID) | |
3119 | return (SET_ERROR(ENOTSUP)); | |
3120 | if (blksz == 0) | |
3121 | blksz = SPA_MINBLOCKSIZE; | |
3122 | ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); | |
3123 | blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); | |
3124 | ||
3125 | DB_DNODE_ENTER(db); | |
3126 | dn = DB_DNODE(db); | |
3127 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
3128 | dbuf_new_size(db, blksz, tx); | |
3129 | rw_exit(&dn->dn_struct_rwlock); | |
3130 | DB_DNODE_EXIT(db); | |
3131 | ||
3132 | return (0); | |
3133 | } | |
3134 | ||
3135 | void | |
3136 | dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) | |
3137 | { | |
3138 | dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); | |
3139 | } | |
3140 | ||
3141 | #pragma weak dmu_buf_add_ref = dbuf_add_ref | |
3142 | void | |
3143 | dbuf_add_ref(dmu_buf_impl_t *db, void *tag) | |
3144 | { | |
3145 | int64_t holds = refcount_add(&db->db_holds, tag); | |
3146 | VERIFY3S(holds, >, 1); | |
3147 | } | |
3148 | ||
3149 | #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref | |
3150 | boolean_t | |
3151 | dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, | |
3152 | void *tag) | |
3153 | { | |
3154 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
3155 | dmu_buf_impl_t *found_db; | |
3156 | boolean_t result = B_FALSE; | |
3157 | ||
3158 | if (blkid == DMU_BONUS_BLKID) | |
3159 | found_db = dbuf_find_bonus(os, obj); | |
3160 | else | |
3161 | found_db = dbuf_find(os, obj, 0, blkid); | |
3162 | ||
3163 | if (found_db != NULL) { | |
3164 | if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { | |
3165 | (void) refcount_add(&db->db_holds, tag); | |
3166 | result = B_TRUE; | |
3167 | } | |
3168 | mutex_exit(&found_db->db_mtx); | |
3169 | } | |
3170 | return (result); | |
3171 | } | |
3172 | ||
3173 | /* | |
3174 | * If you call dbuf_rele() you had better not be referencing the dnode handle | |
3175 | * unless you have some other direct or indirect hold on the dnode. (An indirect | |
3176 | * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) | |
3177 | * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the | |
3178 | * dnode's parent dbuf evicting its dnode handles. | |
3179 | */ | |
3180 | void | |
3181 | dbuf_rele(dmu_buf_impl_t *db, void *tag) | |
3182 | { | |
3183 | mutex_enter(&db->db_mtx); | |
3184 | dbuf_rele_and_unlock(db, tag); | |
3185 | } | |
3186 | ||
3187 | void | |
3188 | dmu_buf_rele(dmu_buf_t *db, void *tag) | |
3189 | { | |
3190 | dbuf_rele((dmu_buf_impl_t *)db, tag); | |
3191 | } | |
3192 | ||
3193 | /* | |
3194 | * dbuf_rele() for an already-locked dbuf. This is necessary to allow | |
3195 | * db_dirtycnt and db_holds to be updated atomically. | |
3196 | */ | |
3197 | void | |
3198 | dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) | |
3199 | { | |
3200 | int64_t holds; | |
3201 | ||
3202 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
3203 | DBUF_VERIFY(db); | |
3204 | ||
3205 | /* | |
3206 | * Remove the reference to the dbuf before removing its hold on the | |
3207 | * dnode so we can guarantee in dnode_move() that a referenced bonus | |
3208 | * buffer has a corresponding dnode hold. | |
3209 | */ | |
3210 | holds = refcount_remove(&db->db_holds, tag); | |
3211 | ASSERT(holds >= 0); | |
3212 | ||
3213 | /* | |
3214 | * We can't freeze indirects if there is a possibility that they | |
3215 | * may be modified in the current syncing context. | |
3216 | */ | |
3217 | if (db->db_buf != NULL && | |
3218 | holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { | |
3219 | arc_buf_freeze(db->db_buf); | |
3220 | } | |
3221 | ||
3222 | if (holds == db->db_dirtycnt && | |
3223 | db->db_level == 0 && db->db_user_immediate_evict) | |
3224 | dbuf_evict_user(db); | |
3225 | ||
3226 | if (holds == 0) { | |
3227 | if (db->db_blkid == DMU_BONUS_BLKID) { | |
3228 | dnode_t *dn; | |
3229 | boolean_t evict_dbuf = db->db_pending_evict; | |
3230 | ||
3231 | /* | |
3232 | * If the dnode moves here, we cannot cross this | |
3233 | * barrier until the move completes. | |
3234 | */ | |
3235 | DB_DNODE_ENTER(db); | |
3236 | ||
3237 | dn = DB_DNODE(db); | |
3238 | atomic_dec_32(&dn->dn_dbufs_count); | |
3239 | ||
3240 | /* | |
3241 | * Decrementing the dbuf count means that the bonus | |
3242 | * buffer's dnode hold is no longer discounted in | |
3243 | * dnode_move(). The dnode cannot move until after | |
3244 | * the dnode_rele() below. | |
3245 | */ | |
3246 | DB_DNODE_EXIT(db); | |
3247 | ||
3248 | /* | |
3249 | * Do not reference db after its lock is dropped. | |
3250 | * Another thread may evict it. | |
3251 | */ | |
3252 | mutex_exit(&db->db_mtx); | |
3253 | ||
3254 | if (evict_dbuf) | |
3255 | dnode_evict_bonus(dn); | |
3256 | ||
3257 | dnode_rele(dn, db); | |
3258 | } else if (db->db_buf == NULL) { | |
3259 | /* | |
3260 | * This is a special case: we never associated this | |
3261 | * dbuf with any data allocated from the ARC. | |
3262 | */ | |
3263 | ASSERT(db->db_state == DB_UNCACHED || | |
3264 | db->db_state == DB_NOFILL); | |
3265 | dbuf_destroy(db); | |
3266 | } else if (arc_released(db->db_buf)) { | |
3267 | /* | |
3268 | * This dbuf has anonymous data associated with it. | |
3269 | */ | |
3270 | dbuf_destroy(db); | |
3271 | } else { | |
3272 | boolean_t do_arc_evict = B_FALSE; | |
3273 | blkptr_t bp; | |
3274 | spa_t *spa = dmu_objset_spa(db->db_objset); | |
3275 | ||
3276 | if (!DBUF_IS_CACHEABLE(db) && | |
3277 | db->db_blkptr != NULL && | |
3278 | !BP_IS_HOLE(db->db_blkptr) && | |
3279 | !BP_IS_EMBEDDED(db->db_blkptr)) { | |
3280 | do_arc_evict = B_TRUE; | |
3281 | bp = *db->db_blkptr; | |
3282 | } | |
3283 | ||
3284 | if (!DBUF_IS_CACHEABLE(db) || | |
3285 | db->db_pending_evict) { | |
3286 | dbuf_destroy(db); | |
3287 | } else if (!multilist_link_active(&db->db_cache_link)) { | |
3288 | multilist_insert(dbuf_cache, db); | |
3289 | (void) refcount_add_many(&dbuf_cache_size, | |
3290 | db->db.db_size, db); | |
3291 | DBUF_STAT_BUMP(cache_levels[db->db_level]); | |
3292 | DBUF_STAT_BUMP(cache_count); | |
3293 | DBUF_STAT_INCR(cache_levels_bytes[db->db_level], | |
3294 | db->db.db_size); | |
3295 | DBUF_STAT_MAX(cache_size_bytes_max, | |
3296 | refcount_count(&dbuf_cache_size)); | |
3297 | mutex_exit(&db->db_mtx); | |
3298 | ||
3299 | dbuf_evict_notify(); | |
3300 | } | |
3301 | ||
3302 | if (do_arc_evict) | |
3303 | arc_freed(spa, &bp); | |
3304 | } | |
3305 | } else { | |
3306 | mutex_exit(&db->db_mtx); | |
3307 | } | |
3308 | ||
3309 | } | |
3310 | ||
3311 | #pragma weak dmu_buf_refcount = dbuf_refcount | |
3312 | uint64_t | |
3313 | dbuf_refcount(dmu_buf_impl_t *db) | |
3314 | { | |
3315 | return (refcount_count(&db->db_holds)); | |
3316 | } | |
3317 | ||
3318 | void * | |
3319 | dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, | |
3320 | dmu_buf_user_t *new_user) | |
3321 | { | |
3322 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
3323 | ||
3324 | mutex_enter(&db->db_mtx); | |
3325 | dbuf_verify_user(db, DBVU_NOT_EVICTING); | |
3326 | if (db->db_user == old_user) | |
3327 | db->db_user = new_user; | |
3328 | else | |
3329 | old_user = db->db_user; | |
3330 | dbuf_verify_user(db, DBVU_NOT_EVICTING); | |
3331 | mutex_exit(&db->db_mtx); | |
3332 | ||
3333 | return (old_user); | |
3334 | } | |
3335 | ||
3336 | void * | |
3337 | dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) | |
3338 | { | |
3339 | return (dmu_buf_replace_user(db_fake, NULL, user)); | |
3340 | } | |
3341 | ||
3342 | void * | |
3343 | dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) | |
3344 | { | |
3345 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
3346 | ||
3347 | db->db_user_immediate_evict = TRUE; | |
3348 | return (dmu_buf_set_user(db_fake, user)); | |
3349 | } | |
3350 | ||
3351 | void * | |
3352 | dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) | |
3353 | { | |
3354 | return (dmu_buf_replace_user(db_fake, user, NULL)); | |
3355 | } | |
3356 | ||
3357 | void * | |
3358 | dmu_buf_get_user(dmu_buf_t *db_fake) | |
3359 | { | |
3360 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
3361 | ||
3362 | dbuf_verify_user(db, DBVU_NOT_EVICTING); | |
3363 | return (db->db_user); | |
3364 | } | |
3365 | ||
3366 | void | |
3367 | dmu_buf_user_evict_wait() | |
3368 | { | |
3369 | taskq_wait(dbu_evict_taskq); | |
3370 | } | |
3371 | ||
3372 | blkptr_t * | |
3373 | dmu_buf_get_blkptr(dmu_buf_t *db) | |
3374 | { | |
3375 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
3376 | return (dbi->db_blkptr); | |
3377 | } | |
3378 | ||
3379 | objset_t * | |
3380 | dmu_buf_get_objset(dmu_buf_t *db) | |
3381 | { | |
3382 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
3383 | return (dbi->db_objset); | |
3384 | } | |
3385 | ||
3386 | dnode_t * | |
3387 | dmu_buf_dnode_enter(dmu_buf_t *db) | |
3388 | { | |
3389 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
3390 | DB_DNODE_ENTER(dbi); | |
3391 | return (DB_DNODE(dbi)); | |
3392 | } | |
3393 | ||
3394 | void | |
3395 | dmu_buf_dnode_exit(dmu_buf_t *db) | |
3396 | { | |
3397 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
3398 | DB_DNODE_EXIT(dbi); | |
3399 | } | |
3400 | ||
3401 | static void | |
3402 | dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) | |
3403 | { | |
3404 | /* ASSERT(dmu_tx_is_syncing(tx) */ | |
3405 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
3406 | ||
3407 | if (db->db_blkptr != NULL) | |
3408 | return; | |
3409 | ||
3410 | if (db->db_blkid == DMU_SPILL_BLKID) { | |
3411 | db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); | |
3412 | BP_ZERO(db->db_blkptr); | |
3413 | return; | |
3414 | } | |
3415 | if (db->db_level == dn->dn_phys->dn_nlevels-1) { | |
3416 | /* | |
3417 | * This buffer was allocated at a time when there was | |
3418 | * no available blkptrs from the dnode, or it was | |
3419 | * inappropriate to hook it in (i.e., nlevels mis-match). | |
3420 | */ | |
3421 | ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); | |
3422 | ASSERT(db->db_parent == NULL); | |
3423 | db->db_parent = dn->dn_dbuf; | |
3424 | db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; | |
3425 | DBUF_VERIFY(db); | |
3426 | } else { | |
3427 | dmu_buf_impl_t *parent = db->db_parent; | |
3428 | int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
3429 | ||
3430 | ASSERT(dn->dn_phys->dn_nlevels > 1); | |
3431 | if (parent == NULL) { | |
3432 | mutex_exit(&db->db_mtx); | |
3433 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
3434 | parent = dbuf_hold_level(dn, db->db_level + 1, | |
3435 | db->db_blkid >> epbs, db); | |
3436 | rw_exit(&dn->dn_struct_rwlock); | |
3437 | mutex_enter(&db->db_mtx); | |
3438 | db->db_parent = parent; | |
3439 | } | |
3440 | db->db_blkptr = (blkptr_t *)parent->db.db_data + | |
3441 | (db->db_blkid & ((1ULL << epbs) - 1)); | |
3442 | DBUF_VERIFY(db); | |
3443 | } | |
3444 | } | |
3445 | ||
3446 | /* | |
3447 | * Ensure the dbuf's data is untransformed if the associated dirty | |
3448 | * record requires it. This is used by dbuf_sync_leaf() to ensure | |
3449 | * that a dnode block is decrypted before we write new data to it. | |
3450 | * For raw writes we assert that the buffer is already encrypted. | |
3451 | */ | |
3452 | static void | |
3453 | dbuf_check_crypt(dbuf_dirty_record_t *dr) | |
3454 | { | |
3455 | int err; | |
3456 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
3457 | ||
3458 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
3459 | ||
3460 | if (!dr->dt.dl.dr_raw && arc_is_encrypted(db->db_buf)) { | |
3461 | /* | |
3462 | * Unfortunately, there is currently no mechanism for | |
3463 | * syncing context to handle decryption errors. An error | |
3464 | * here is only possible if an attacker maliciously | |
3465 | * changed a dnode block and updated the associated | |
3466 | * checksums going up the block tree. | |
3467 | */ | |
3468 | err = arc_untransform(db->db_buf, db->db_objset->os_spa, | |
3469 | dmu_objset_id(db->db_objset), B_TRUE); | |
3470 | if (err) | |
3471 | panic("Invalid dnode block MAC"); | |
3472 | } else if (dr->dt.dl.dr_raw) { | |
3473 | /* | |
3474 | * Writing raw encrypted data requires the db's arc buffer | |
3475 | * to be converted to raw by the caller. | |
3476 | */ | |
3477 | ASSERT(arc_is_encrypted(db->db_buf)); | |
3478 | } | |
3479 | } | |
3480 | ||
3481 | /* | |
3482 | * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it | |
3483 | * is critical the we not allow the compiler to inline this function in to | |
3484 | * dbuf_sync_list() thereby drastically bloating the stack usage. | |
3485 | */ | |
3486 | noinline static void | |
3487 | dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) | |
3488 | { | |
3489 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
3490 | dnode_t *dn; | |
3491 | zio_t *zio; | |
3492 | ||
3493 | ASSERT(dmu_tx_is_syncing(tx)); | |
3494 | ||
3495 | dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); | |
3496 | ||
3497 | mutex_enter(&db->db_mtx); | |
3498 | ||
3499 | ASSERT(db->db_level > 0); | |
3500 | DBUF_VERIFY(db); | |
3501 | ||
3502 | /* Read the block if it hasn't been read yet. */ | |
3503 | if (db->db_buf == NULL) { | |
3504 | mutex_exit(&db->db_mtx); | |
3505 | (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); | |
3506 | mutex_enter(&db->db_mtx); | |
3507 | } | |
3508 | ASSERT3U(db->db_state, ==, DB_CACHED); | |
3509 | ASSERT(db->db_buf != NULL); | |
3510 | ||
3511 | DB_DNODE_ENTER(db); | |
3512 | dn = DB_DNODE(db); | |
3513 | /* Indirect block size must match what the dnode thinks it is. */ | |
3514 | ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); | |
3515 | dbuf_check_blkptr(dn, db); | |
3516 | DB_DNODE_EXIT(db); | |
3517 | ||
3518 | /* Provide the pending dirty record to child dbufs */ | |
3519 | db->db_data_pending = dr; | |
3520 | ||
3521 | mutex_exit(&db->db_mtx); | |
3522 | dbuf_write(dr, db->db_buf, tx); | |
3523 | ||
3524 | zio = dr->dr_zio; | |
3525 | mutex_enter(&dr->dt.di.dr_mtx); | |
3526 | dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); | |
3527 | ASSERT(list_head(&dr->dt.di.dr_children) == NULL); | |
3528 | mutex_exit(&dr->dt.di.dr_mtx); | |
3529 | zio_nowait(zio); | |
3530 | } | |
3531 | ||
3532 | /* | |
3533 | * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is | |
3534 | * critical the we not allow the compiler to inline this function in to | |
3535 | * dbuf_sync_list() thereby drastically bloating the stack usage. | |
3536 | */ | |
3537 | noinline static void | |
3538 | dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) | |
3539 | { | |
3540 | arc_buf_t **datap = &dr->dt.dl.dr_data; | |
3541 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
3542 | dnode_t *dn; | |
3543 | objset_t *os; | |
3544 | uint64_t txg = tx->tx_txg; | |
3545 | ||
3546 | ASSERT(dmu_tx_is_syncing(tx)); | |
3547 | ||
3548 | dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); | |
3549 | ||
3550 | mutex_enter(&db->db_mtx); | |
3551 | /* | |
3552 | * To be synced, we must be dirtied. But we | |
3553 | * might have been freed after the dirty. | |
3554 | */ | |
3555 | if (db->db_state == DB_UNCACHED) { | |
3556 | /* This buffer has been freed since it was dirtied */ | |
3557 | ASSERT(db->db.db_data == NULL); | |
3558 | } else if (db->db_state == DB_FILL) { | |
3559 | /* This buffer was freed and is now being re-filled */ | |
3560 | ASSERT(db->db.db_data != dr->dt.dl.dr_data); | |
3561 | } else { | |
3562 | ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); | |
3563 | } | |
3564 | DBUF_VERIFY(db); | |
3565 | ||
3566 | DB_DNODE_ENTER(db); | |
3567 | dn = DB_DNODE(db); | |
3568 | ||
3569 | if (db->db_blkid == DMU_SPILL_BLKID) { | |
3570 | mutex_enter(&dn->dn_mtx); | |
3571 | if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { | |
3572 | /* | |
3573 | * In the previous transaction group, the bonus buffer | |
3574 | * was entirely used to store the attributes for the | |
3575 | * dnode which overrode the dn_spill field. However, | |
3576 | * when adding more attributes to the file a spill | |
3577 | * block was required to hold the extra attributes. | |
3578 | * | |
3579 | * Make sure to clear the garbage left in the dn_spill | |
3580 | * field from the previous attributes in the bonus | |
3581 | * buffer. Otherwise, after writing out the spill | |
3582 | * block to the new allocated dva, it will free | |
3583 | * the old block pointed to by the invalid dn_spill. | |
3584 | */ | |
3585 | db->db_blkptr = NULL; | |
3586 | } | |
3587 | dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; | |
3588 | mutex_exit(&dn->dn_mtx); | |
3589 | } | |
3590 | ||
3591 | /* | |
3592 | * If this is a bonus buffer, simply copy the bonus data into the | |
3593 | * dnode. It will be written out when the dnode is synced (and it | |
3594 | * will be synced, since it must have been dirty for dbuf_sync to | |
3595 | * be called). | |
3596 | */ | |
3597 | if (db->db_blkid == DMU_BONUS_BLKID) { | |
3598 | dbuf_dirty_record_t **drp; | |
3599 | ||
3600 | ASSERT(*datap != NULL); | |
3601 | ASSERT0(db->db_level); | |
3602 | ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=, | |
3603 | DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); | |
3604 | bcopy(*datap, DN_BONUS(dn->dn_phys), | |
3605 | DN_MAX_BONUS_LEN(dn->dn_phys)); | |
3606 | DB_DNODE_EXIT(db); | |
3607 | ||
3608 | if (*datap != db->db.db_data) { | |
3609 | int slots = DB_DNODE(db)->dn_num_slots; | |
3610 | int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); | |
3611 | kmem_free(*datap, bonuslen); | |
3612 | arc_space_return(bonuslen, ARC_SPACE_BONUS); | |
3613 | } | |
3614 | db->db_data_pending = NULL; | |
3615 | drp = &db->db_last_dirty; | |
3616 | while (*drp != dr) | |
3617 | drp = &(*drp)->dr_next; | |
3618 | ASSERT(dr->dr_next == NULL); | |
3619 | ASSERT(dr->dr_dbuf == db); | |
3620 | *drp = dr->dr_next; | |
3621 | if (dr->dr_dbuf->db_level != 0) { | |
3622 | mutex_destroy(&dr->dt.di.dr_mtx); | |
3623 | list_destroy(&dr->dt.di.dr_children); | |
3624 | } | |
3625 | kmem_free(dr, sizeof (dbuf_dirty_record_t)); | |
3626 | ASSERT(db->db_dirtycnt > 0); | |
3627 | db->db_dirtycnt -= 1; | |
3628 | dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); | |
3629 | return; | |
3630 | } | |
3631 | ||
3632 | os = dn->dn_objset; | |
3633 | ||
3634 | /* | |
3635 | * This function may have dropped the db_mtx lock allowing a dmu_sync | |
3636 | * operation to sneak in. As a result, we need to ensure that we | |
3637 | * don't check the dr_override_state until we have returned from | |
3638 | * dbuf_check_blkptr. | |
3639 | */ | |
3640 | dbuf_check_blkptr(dn, db); | |
3641 | ||
3642 | /* | |
3643 | * If this buffer is in the middle of an immediate write, | |
3644 | * wait for the synchronous IO to complete. | |
3645 | */ | |
3646 | while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { | |
3647 | ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); | |
3648 | cv_wait(&db->db_changed, &db->db_mtx); | |
3649 | ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); | |
3650 | } | |
3651 | ||
3652 | /* | |
3653 | * If this is a dnode block, ensure it is appropriately encrypted | |
3654 | * or decrypted, depending on what we are writing to it this txg. | |
3655 | */ | |
3656 | if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT) | |
3657 | dbuf_check_crypt(dr); | |
3658 | ||
3659 | if (db->db_state != DB_NOFILL && | |
3660 | dn->dn_object != DMU_META_DNODE_OBJECT && | |
3661 | refcount_count(&db->db_holds) > 1 && | |
3662 | dr->dt.dl.dr_override_state != DR_OVERRIDDEN && | |
3663 | *datap == db->db_buf) { | |
3664 | /* | |
3665 | * If this buffer is currently "in use" (i.e., there | |
3666 | * are active holds and db_data still references it), | |
3667 | * then make a copy before we start the write so that | |
3668 | * any modifications from the open txg will not leak | |
3669 | * into this write. | |
3670 | * | |
3671 | * NOTE: this copy does not need to be made for | |
3672 | * objects only modified in the syncing context (e.g. | |
3673 | * DNONE_DNODE blocks). | |
3674 | */ | |
3675 | int psize = arc_buf_size(*datap); | |
3676 | int lsize = arc_buf_lsize(*datap); | |
3677 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
3678 | enum zio_compress compress_type = arc_get_compression(*datap); | |
3679 | ||
3680 | if (arc_is_encrypted(*datap)) { | |
3681 | boolean_t byteorder; | |
3682 | uint8_t salt[ZIO_DATA_SALT_LEN]; | |
3683 | uint8_t iv[ZIO_DATA_IV_LEN]; | |
3684 | uint8_t mac[ZIO_DATA_MAC_LEN]; | |
3685 | ||
3686 | arc_get_raw_params(*datap, &byteorder, salt, iv, mac); | |
3687 | *datap = arc_alloc_raw_buf(os->os_spa, db, | |
3688 | dmu_objset_id(os), byteorder, salt, iv, mac, | |
3689 | dn->dn_type, psize, lsize, compress_type); | |
3690 | } else if (compress_type != ZIO_COMPRESS_OFF) { | |
3691 | ASSERT3U(type, ==, ARC_BUFC_DATA); | |
3692 | *datap = arc_alloc_compressed_buf(os->os_spa, db, | |
3693 | psize, lsize, compress_type); | |
3694 | } else { | |
3695 | *datap = arc_alloc_buf(os->os_spa, db, type, psize); | |
3696 | } | |
3697 | bcopy(db->db.db_data, (*datap)->b_data, psize); | |
3698 | } | |
3699 | db->db_data_pending = dr; | |
3700 | ||
3701 | mutex_exit(&db->db_mtx); | |
3702 | ||
3703 | dbuf_write(dr, *datap, tx); | |
3704 | ||
3705 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
3706 | if (dn->dn_object == DMU_META_DNODE_OBJECT) { | |
3707 | list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); | |
3708 | DB_DNODE_EXIT(db); | |
3709 | } else { | |
3710 | /* | |
3711 | * Although zio_nowait() does not "wait for an IO", it does | |
3712 | * initiate the IO. If this is an empty write it seems plausible | |
3713 | * that the IO could actually be completed before the nowait | |
3714 | * returns. We need to DB_DNODE_EXIT() first in case | |
3715 | * zio_nowait() invalidates the dbuf. | |
3716 | */ | |
3717 | DB_DNODE_EXIT(db); | |
3718 | zio_nowait(dr->dr_zio); | |
3719 | } | |
3720 | } | |
3721 | ||
3722 | void | |
3723 | dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) | |
3724 | { | |
3725 | dbuf_dirty_record_t *dr; | |
3726 | ||
3727 | while ((dr = list_head(list))) { | |
3728 | if (dr->dr_zio != NULL) { | |
3729 | /* | |
3730 | * If we find an already initialized zio then we | |
3731 | * are processing the meta-dnode, and we have finished. | |
3732 | * The dbufs for all dnodes are put back on the list | |
3733 | * during processing, so that we can zio_wait() | |
3734 | * these IOs after initiating all child IOs. | |
3735 | */ | |
3736 | ASSERT3U(dr->dr_dbuf->db.db_object, ==, | |
3737 | DMU_META_DNODE_OBJECT); | |
3738 | break; | |
3739 | } | |
3740 | if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && | |
3741 | dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { | |
3742 | VERIFY3U(dr->dr_dbuf->db_level, ==, level); | |
3743 | } | |
3744 | list_remove(list, dr); | |
3745 | if (dr->dr_dbuf->db_level > 0) | |
3746 | dbuf_sync_indirect(dr, tx); | |
3747 | else | |
3748 | dbuf_sync_leaf(dr, tx); | |
3749 | } | |
3750 | } | |
3751 | ||
3752 | /* ARGSUSED */ | |
3753 | static void | |
3754 | dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | |
3755 | { | |
3756 | dmu_buf_impl_t *db = vdb; | |
3757 | dnode_t *dn; | |
3758 | blkptr_t *bp = zio->io_bp; | |
3759 | blkptr_t *bp_orig = &zio->io_bp_orig; | |
3760 | spa_t *spa = zio->io_spa; | |
3761 | int64_t delta; | |
3762 | uint64_t fill = 0; | |
3763 | int i; | |
3764 | ||
3765 | ASSERT3P(db->db_blkptr, !=, NULL); | |
3766 | ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); | |
3767 | ||
3768 | DB_DNODE_ENTER(db); | |
3769 | dn = DB_DNODE(db); | |
3770 | delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); | |
3771 | dnode_diduse_space(dn, delta - zio->io_prev_space_delta); | |
3772 | zio->io_prev_space_delta = delta; | |
3773 | ||
3774 | if (bp->blk_birth != 0) { | |
3775 | ASSERT((db->db_blkid != DMU_SPILL_BLKID && | |
3776 | BP_GET_TYPE(bp) == dn->dn_type) || | |
3777 | (db->db_blkid == DMU_SPILL_BLKID && | |
3778 | BP_GET_TYPE(bp) == dn->dn_bonustype) || | |
3779 | BP_IS_EMBEDDED(bp)); | |
3780 | ASSERT(BP_GET_LEVEL(bp) == db->db_level); | |
3781 | } | |
3782 | ||
3783 | mutex_enter(&db->db_mtx); | |
3784 | ||
3785 | #ifdef ZFS_DEBUG | |
3786 | if (db->db_blkid == DMU_SPILL_BLKID) { | |
3787 | ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); | |
3788 | ASSERT(!(BP_IS_HOLE(bp)) && | |
3789 | db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); | |
3790 | } | |
3791 | #endif | |
3792 | ||
3793 | if (db->db_level == 0) { | |
3794 | mutex_enter(&dn->dn_mtx); | |
3795 | if (db->db_blkid > dn->dn_phys->dn_maxblkid && | |
3796 | db->db_blkid != DMU_SPILL_BLKID) | |
3797 | dn->dn_phys->dn_maxblkid = db->db_blkid; | |
3798 | mutex_exit(&dn->dn_mtx); | |
3799 | ||
3800 | if (dn->dn_type == DMU_OT_DNODE) { | |
3801 | i = 0; | |
3802 | while (i < db->db.db_size) { | |
3803 | dnode_phys_t *dnp = | |
3804 | (void *)(((char *)db->db.db_data) + i); | |
3805 | ||
3806 | i += DNODE_MIN_SIZE; | |
3807 | if (dnp->dn_type != DMU_OT_NONE) { | |
3808 | fill++; | |
3809 | i += dnp->dn_extra_slots * | |
3810 | DNODE_MIN_SIZE; | |
3811 | } | |
3812 | } | |
3813 | } else { | |
3814 | if (BP_IS_HOLE(bp)) { | |
3815 | fill = 0; | |
3816 | } else { | |
3817 | fill = 1; | |
3818 | } | |
3819 | } | |
3820 | } else { | |
3821 | blkptr_t *ibp = db->db.db_data; | |
3822 | ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); | |
3823 | for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { | |
3824 | if (BP_IS_HOLE(ibp)) | |
3825 | continue; | |
3826 | fill += BP_GET_FILL(ibp); | |
3827 | } | |
3828 | } | |
3829 | DB_DNODE_EXIT(db); | |
3830 | ||
3831 | if (!BP_IS_EMBEDDED(bp)) | |
3832 | BP_SET_FILL(bp, fill); | |
3833 | ||
3834 | mutex_exit(&db->db_mtx); | |
3835 | ||
3836 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
3837 | *db->db_blkptr = *bp; | |
3838 | rw_exit(&dn->dn_struct_rwlock); | |
3839 | } | |
3840 | ||
3841 | /* ARGSUSED */ | |
3842 | /* | |
3843 | * This function gets called just prior to running through the compression | |
3844 | * stage of the zio pipeline. If we're an indirect block comprised of only | |
3845 | * holes, then we want this indirect to be compressed away to a hole. In | |
3846 | * order to do that we must zero out any information about the holes that | |
3847 | * this indirect points to prior to before we try to compress it. | |
3848 | */ | |
3849 | static void | |
3850 | dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | |
3851 | { | |
3852 | dmu_buf_impl_t *db = vdb; | |
3853 | dnode_t *dn; | |
3854 | blkptr_t *bp; | |
3855 | unsigned int epbs, i; | |
3856 | ||
3857 | ASSERT3U(db->db_level, >, 0); | |
3858 | DB_DNODE_ENTER(db); | |
3859 | dn = DB_DNODE(db); | |
3860 | epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
3861 | ASSERT3U(epbs, <, 31); | |
3862 | ||
3863 | /* Determine if all our children are holes */ | |
3864 | for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) { | |
3865 | if (!BP_IS_HOLE(bp)) | |
3866 | break; | |
3867 | } | |
3868 | ||
3869 | /* | |
3870 | * If all the children are holes, then zero them all out so that | |
3871 | * we may get compressed away. | |
3872 | */ | |
3873 | if (i == 1ULL << epbs) { | |
3874 | /* | |
3875 | * We only found holes. Grab the rwlock to prevent | |
3876 | * anybody from reading the blocks we're about to | |
3877 | * zero out. | |
3878 | */ | |
3879 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
3880 | bzero(db->db.db_data, db->db.db_size); | |
3881 | rw_exit(&dn->dn_struct_rwlock); | |
3882 | } | |
3883 | DB_DNODE_EXIT(db); | |
3884 | } | |
3885 | ||
3886 | /* | |
3887 | * The SPA will call this callback several times for each zio - once | |
3888 | * for every physical child i/o (zio->io_phys_children times). This | |
3889 | * allows the DMU to monitor the progress of each logical i/o. For example, | |
3890 | * there may be 2 copies of an indirect block, or many fragments of a RAID-Z | |
3891 | * block. There may be a long delay before all copies/fragments are completed, | |
3892 | * so this callback allows us to retire dirty space gradually, as the physical | |
3893 | * i/os complete. | |
3894 | */ | |
3895 | /* ARGSUSED */ | |
3896 | static void | |
3897 | dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) | |
3898 | { | |
3899 | dmu_buf_impl_t *db = arg; | |
3900 | objset_t *os = db->db_objset; | |
3901 | dsl_pool_t *dp = dmu_objset_pool(os); | |
3902 | dbuf_dirty_record_t *dr; | |
3903 | int delta = 0; | |
3904 | ||
3905 | dr = db->db_data_pending; | |
3906 | ASSERT3U(dr->dr_txg, ==, zio->io_txg); | |
3907 | ||
3908 | /* | |
3909 | * The callback will be called io_phys_children times. Retire one | |
3910 | * portion of our dirty space each time we are called. Any rounding | |
3911 | * error will be cleaned up by dsl_pool_sync()'s call to | |
3912 | * dsl_pool_undirty_space(). | |
3913 | */ | |
3914 | delta = dr->dr_accounted / zio->io_phys_children; | |
3915 | dsl_pool_undirty_space(dp, delta, zio->io_txg); | |
3916 | } | |
3917 | ||
3918 | /* ARGSUSED */ | |
3919 | static void | |
3920 | dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) | |
3921 | { | |
3922 | dmu_buf_impl_t *db = vdb; | |
3923 | blkptr_t *bp_orig = &zio->io_bp_orig; | |
3924 | blkptr_t *bp = db->db_blkptr; | |
3925 | objset_t *os = db->db_objset; | |
3926 | dmu_tx_t *tx = os->os_synctx; | |
3927 | dbuf_dirty_record_t **drp, *dr; | |
3928 | ||
3929 | ASSERT0(zio->io_error); | |
3930 | ASSERT(db->db_blkptr == bp); | |
3931 | ||
3932 | /* | |
3933 | * For nopwrites and rewrites we ensure that the bp matches our | |
3934 | * original and bypass all the accounting. | |
3935 | */ | |
3936 | if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { | |
3937 | ASSERT(BP_EQUAL(bp, bp_orig)); | |
3938 | } else { | |
3939 | dsl_dataset_t *ds = os->os_dsl_dataset; | |
3940 | (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); | |
3941 | dsl_dataset_block_born(ds, bp, tx); | |
3942 | } | |
3943 | ||
3944 | mutex_enter(&db->db_mtx); | |
3945 | ||
3946 | DBUF_VERIFY(db); | |
3947 | ||
3948 | drp = &db->db_last_dirty; | |
3949 | while ((dr = *drp) != db->db_data_pending) | |
3950 | drp = &dr->dr_next; | |
3951 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
3952 | ASSERT(dr->dr_dbuf == db); | |
3953 | ASSERT(dr->dr_next == NULL); | |
3954 | *drp = dr->dr_next; | |
3955 | ||
3956 | #ifdef ZFS_DEBUG | |
3957 | if (db->db_blkid == DMU_SPILL_BLKID) { | |
3958 | dnode_t *dn; | |
3959 | ||
3960 | DB_DNODE_ENTER(db); | |
3961 | dn = DB_DNODE(db); | |
3962 | ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); | |
3963 | ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && | |
3964 | db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); | |
3965 | DB_DNODE_EXIT(db); | |
3966 | } | |
3967 | #endif | |
3968 | ||
3969 | if (db->db_level == 0) { | |
3970 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
3971 | ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); | |
3972 | if (db->db_state != DB_NOFILL) { | |
3973 | if (dr->dt.dl.dr_data != db->db_buf) | |
3974 | arc_buf_destroy(dr->dt.dl.dr_data, db); | |
3975 | } | |
3976 | } else { | |
3977 | dnode_t *dn; | |
3978 | ||
3979 | DB_DNODE_ENTER(db); | |
3980 | dn = DB_DNODE(db); | |
3981 | ASSERT(list_head(&dr->dt.di.dr_children) == NULL); | |
3982 | ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); | |
3983 | if (!BP_IS_HOLE(db->db_blkptr)) { | |
3984 | ASSERTV(int epbs = dn->dn_phys->dn_indblkshift - | |
3985 | SPA_BLKPTRSHIFT); | |
3986 | ASSERT3U(db->db_blkid, <=, | |
3987 | dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); | |
3988 | ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, | |
3989 | db->db.db_size); | |
3990 | } | |
3991 | DB_DNODE_EXIT(db); | |
3992 | mutex_destroy(&dr->dt.di.dr_mtx); | |
3993 | list_destroy(&dr->dt.di.dr_children); | |
3994 | } | |
3995 | kmem_free(dr, sizeof (dbuf_dirty_record_t)); | |
3996 | ||
3997 | cv_broadcast(&db->db_changed); | |
3998 | ASSERT(db->db_dirtycnt > 0); | |
3999 | db->db_dirtycnt -= 1; | |
4000 | db->db_data_pending = NULL; | |
4001 | dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg); | |
4002 | } | |
4003 | ||
4004 | static void | |
4005 | dbuf_write_nofill_ready(zio_t *zio) | |
4006 | { | |
4007 | dbuf_write_ready(zio, NULL, zio->io_private); | |
4008 | } | |
4009 | ||
4010 | static void | |
4011 | dbuf_write_nofill_done(zio_t *zio) | |
4012 | { | |
4013 | dbuf_write_done(zio, NULL, zio->io_private); | |
4014 | } | |
4015 | ||
4016 | static void | |
4017 | dbuf_write_override_ready(zio_t *zio) | |
4018 | { | |
4019 | dbuf_dirty_record_t *dr = zio->io_private; | |
4020 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
4021 | ||
4022 | dbuf_write_ready(zio, NULL, db); | |
4023 | } | |
4024 | ||
4025 | static void | |
4026 | dbuf_write_override_done(zio_t *zio) | |
4027 | { | |
4028 | dbuf_dirty_record_t *dr = zio->io_private; | |
4029 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
4030 | blkptr_t *obp = &dr->dt.dl.dr_overridden_by; | |
4031 | ||
4032 | mutex_enter(&db->db_mtx); | |
4033 | if (!BP_EQUAL(zio->io_bp, obp)) { | |
4034 | if (!BP_IS_HOLE(obp)) | |
4035 | dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); | |
4036 | arc_release(dr->dt.dl.dr_data, db); | |
4037 | } | |
4038 | mutex_exit(&db->db_mtx); | |
4039 | ||
4040 | dbuf_write_done(zio, NULL, db); | |
4041 | ||
4042 | if (zio->io_abd != NULL) | |
4043 | abd_put(zio->io_abd); | |
4044 | } | |
4045 | ||
4046 | /* Issue I/O to commit a dirty buffer to disk. */ | |
4047 | static void | |
4048 | dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) | |
4049 | { | |
4050 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
4051 | dnode_t *dn; | |
4052 | objset_t *os; | |
4053 | dmu_buf_impl_t *parent = db->db_parent; | |
4054 | uint64_t txg = tx->tx_txg; | |
4055 | zbookmark_phys_t zb; | |
4056 | zio_prop_t zp; | |
4057 | zio_t *zio; | |
4058 | int wp_flag = 0; | |
4059 | ||
4060 | ASSERT(dmu_tx_is_syncing(tx)); | |
4061 | ||
4062 | DB_DNODE_ENTER(db); | |
4063 | dn = DB_DNODE(db); | |
4064 | os = dn->dn_objset; | |
4065 | ||
4066 | if (db->db_state != DB_NOFILL) { | |
4067 | if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { | |
4068 | /* | |
4069 | * Private object buffers are released here rather | |
4070 | * than in dbuf_dirty() since they are only modified | |
4071 | * in the syncing context and we don't want the | |
4072 | * overhead of making multiple copies of the data. | |
4073 | */ | |
4074 | if (BP_IS_HOLE(db->db_blkptr)) { | |
4075 | arc_buf_thaw(data); | |
4076 | } else { | |
4077 | dbuf_release_bp(db); | |
4078 | } | |
4079 | } | |
4080 | } | |
4081 | ||
4082 | if (parent != dn->dn_dbuf) { | |
4083 | /* Our parent is an indirect block. */ | |
4084 | /* We have a dirty parent that has been scheduled for write. */ | |
4085 | ASSERT(parent && parent->db_data_pending); | |
4086 | /* Our parent's buffer is one level closer to the dnode. */ | |
4087 | ASSERT(db->db_level == parent->db_level-1); | |
4088 | /* | |
4089 | * We're about to modify our parent's db_data by modifying | |
4090 | * our block pointer, so the parent must be released. | |
4091 | */ | |
4092 | ASSERT(arc_released(parent->db_buf)); | |
4093 | zio = parent->db_data_pending->dr_zio; | |
4094 | } else { | |
4095 | /* Our parent is the dnode itself. */ | |
4096 | ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && | |
4097 | db->db_blkid != DMU_SPILL_BLKID) || | |
4098 | (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); | |
4099 | if (db->db_blkid != DMU_SPILL_BLKID) | |
4100 | ASSERT3P(db->db_blkptr, ==, | |
4101 | &dn->dn_phys->dn_blkptr[db->db_blkid]); | |
4102 | zio = dn->dn_zio; | |
4103 | } | |
4104 | ||
4105 | ASSERT(db->db_level == 0 || data == db->db_buf); | |
4106 | ASSERT3U(db->db_blkptr->blk_birth, <=, txg); | |
4107 | ASSERT(zio); | |
4108 | ||
4109 | SET_BOOKMARK(&zb, os->os_dsl_dataset ? | |
4110 | os->os_dsl_dataset->ds_object : DMU_META_OBJSET, | |
4111 | db->db.db_object, db->db_level, db->db_blkid); | |
4112 | ||
4113 | if (db->db_blkid == DMU_SPILL_BLKID) | |
4114 | wp_flag = WP_SPILL; | |
4115 | wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; | |
4116 | ||
4117 | dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); | |
4118 | DB_DNODE_EXIT(db); | |
4119 | ||
4120 | /* | |
4121 | * We copy the blkptr now (rather than when we instantiate the dirty | |
4122 | * record), because its value can change between open context and | |
4123 | * syncing context. We do not need to hold dn_struct_rwlock to read | |
4124 | * db_blkptr because we are in syncing context. | |
4125 | */ | |
4126 | dr->dr_bp_copy = *db->db_blkptr; | |
4127 | ||
4128 | if (db->db_level == 0 && | |
4129 | dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { | |
4130 | /* | |
4131 | * The BP for this block has been provided by open context | |
4132 | * (by dmu_sync() or dmu_buf_write_embedded()). | |
4133 | */ | |
4134 | abd_t *contents = (data != NULL) ? | |
4135 | abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; | |
4136 | ||
4137 | dr->dr_zio = zio_write(zio, os->os_spa, txg, | |
4138 | &dr->dr_bp_copy, contents, db->db.db_size, db->db.db_size, | |
4139 | &zp, dbuf_write_override_ready, NULL, NULL, | |
4140 | dbuf_write_override_done, | |
4141 | dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); | |
4142 | mutex_enter(&db->db_mtx); | |
4143 | dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; | |
4144 | zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, | |
4145 | dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); | |
4146 | mutex_exit(&db->db_mtx); | |
4147 | } else if (db->db_state == DB_NOFILL) { | |
4148 | ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || | |
4149 | zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); | |
4150 | dr->dr_zio = zio_write(zio, os->os_spa, txg, | |
4151 | &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, | |
4152 | dbuf_write_nofill_ready, NULL, NULL, | |
4153 | dbuf_write_nofill_done, db, | |
4154 | ZIO_PRIORITY_ASYNC_WRITE, | |
4155 | ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); | |
4156 | } else { | |
4157 | ASSERT(arc_released(data)); | |
4158 | ||
4159 | /* | |
4160 | * For indirect blocks, we want to setup the children | |
4161 | * ready callback so that we can properly handle an indirect | |
4162 | * block that only contains holes. | |
4163 | */ | |
4164 | arc_write_done_func_t *children_ready_cb = NULL; | |
4165 | if (db->db_level != 0) | |
4166 | children_ready_cb = dbuf_write_children_ready; | |
4167 | ||
4168 | dr->dr_zio = arc_write(zio, os->os_spa, txg, | |
4169 | &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), | |
4170 | &zp, dbuf_write_ready, | |
4171 | children_ready_cb, dbuf_write_physdone, | |
4172 | dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, | |
4173 | ZIO_FLAG_MUSTSUCCEED, &zb); | |
4174 | } | |
4175 | } | |
4176 | ||
4177 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
4178 | EXPORT_SYMBOL(dbuf_find); | |
4179 | EXPORT_SYMBOL(dbuf_is_metadata); | |
4180 | EXPORT_SYMBOL(dbuf_destroy); | |
4181 | EXPORT_SYMBOL(dbuf_loan_arcbuf); | |
4182 | EXPORT_SYMBOL(dbuf_whichblock); | |
4183 | EXPORT_SYMBOL(dbuf_read); | |
4184 | EXPORT_SYMBOL(dbuf_unoverride); | |
4185 | EXPORT_SYMBOL(dbuf_free_range); | |
4186 | EXPORT_SYMBOL(dbuf_new_size); | |
4187 | EXPORT_SYMBOL(dbuf_release_bp); | |
4188 | EXPORT_SYMBOL(dbuf_dirty); | |
4189 | EXPORT_SYMBOL(dmu_buf_will_change_crypt_params); | |
4190 | EXPORT_SYMBOL(dmu_buf_will_dirty); | |
4191 | EXPORT_SYMBOL(dmu_buf_will_not_fill); | |
4192 | EXPORT_SYMBOL(dmu_buf_will_fill); | |
4193 | EXPORT_SYMBOL(dmu_buf_fill_done); | |
4194 | EXPORT_SYMBOL(dmu_buf_rele); | |
4195 | EXPORT_SYMBOL(dbuf_assign_arcbuf); | |
4196 | EXPORT_SYMBOL(dbuf_prefetch); | |
4197 | EXPORT_SYMBOL(dbuf_hold_impl); | |
4198 | EXPORT_SYMBOL(dbuf_hold); | |
4199 | EXPORT_SYMBOL(dbuf_hold_level); | |
4200 | EXPORT_SYMBOL(dbuf_create_bonus); | |
4201 | EXPORT_SYMBOL(dbuf_spill_set_blksz); | |
4202 | EXPORT_SYMBOL(dbuf_rm_spill); | |
4203 | EXPORT_SYMBOL(dbuf_add_ref); | |
4204 | EXPORT_SYMBOL(dbuf_rele); | |
4205 | EXPORT_SYMBOL(dbuf_rele_and_unlock); | |
4206 | EXPORT_SYMBOL(dbuf_refcount); | |
4207 | EXPORT_SYMBOL(dbuf_sync_list); | |
4208 | EXPORT_SYMBOL(dmu_buf_set_user); | |
4209 | EXPORT_SYMBOL(dmu_buf_set_user_ie); | |
4210 | EXPORT_SYMBOL(dmu_buf_get_user); | |
4211 | EXPORT_SYMBOL(dmu_buf_get_blkptr); | |
4212 | ||
4213 | /* BEGIN CSTYLED */ | |
4214 | module_param(dbuf_cache_max_bytes, ulong, 0644); | |
4215 | MODULE_PARM_DESC(dbuf_cache_max_bytes, | |
4216 | "Maximum size in bytes of the dbuf cache."); | |
4217 | ||
4218 | module_param(dbuf_cache_hiwater_pct, uint, 0644); | |
4219 | MODULE_PARM_DESC(dbuf_cache_hiwater_pct, | |
4220 | "Percentage over dbuf_cache_max_bytes when dbufs must be evicted " | |
4221 | "directly."); | |
4222 | ||
4223 | module_param(dbuf_cache_lowater_pct, uint, 0644); | |
4224 | MODULE_PARM_DESC(dbuf_cache_lowater_pct, | |
4225 | "Percentage below dbuf_cache_max_bytes when the evict thread stops " | |
4226 | "evicting dbufs."); | |
4227 | ||
4228 | module_param(dbuf_cache_shift, int, 0644); | |
4229 | MODULE_PARM_DESC(dbuf_cache_shift, | |
4230 | "Set the size of the dbuf cache to a log2 fraction of arc size."); | |
4231 | /* END CSTYLED */ | |
4232 | #endif |