]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
1d3ba0bf | 9 | * or https://opensource.org/licenses/CDDL-1.0. |
34dc7c2f BB |
10 | * See the License for the specific language governing permissions |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
ef3c1dea | 23 | * Copyright 2011 Nexenta Systems, Inc. All rights reserved. |
ba67d821 | 24 | * Copyright (c) 2012, 2020 by Delphix. All rights reserved. |
3a17a7a9 | 25 | * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. |
0c66c32d | 26 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. |
10b3c7f5 MN |
27 | * Copyright (c) 2019, Klara Inc. |
28 | * Copyright (c) 2019, Allan Jude | |
67a1b037 | 29 | * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek |
34dc7c2f BB |
30 | */ |
31 | ||
34dc7c2f | 32 | #include <sys/zfs_context.h> |
c28b2279 | 33 | #include <sys/arc.h> |
34dc7c2f | 34 | #include <sys/dmu.h> |
ea97f8ce | 35 | #include <sys/dmu_send.h> |
34dc7c2f BB |
36 | #include <sys/dmu_impl.h> |
37 | #include <sys/dbuf.h> | |
38 | #include <sys/dmu_objset.h> | |
39 | #include <sys/dsl_dataset.h> | |
40 | #include <sys/dsl_dir.h> | |
41 | #include <sys/dmu_tx.h> | |
42 | #include <sys/spa.h> | |
43 | #include <sys/zio.h> | |
44 | #include <sys/dmu_zfetch.h> | |
428870ff BB |
45 | #include <sys/sa.h> |
46 | #include <sys/sa_impl.h> | |
9b67f605 MA |
47 | #include <sys/zfeature.h> |
48 | #include <sys/blkptr.h> | |
9bd274dd | 49 | #include <sys/range_tree.h> |
e5d1c27e | 50 | #include <sys/trace_zfs.h> |
d3c2ae1c | 51 | #include <sys/callb.h> |
a6255b7f | 52 | #include <sys/abd.h> |
67a1b037 | 53 | #include <sys/brt.h> |
a1d477c2 | 54 | #include <sys/vdev.h> |
3f387973 | 55 | #include <cityhash.h> |
2e5dc449 | 56 | #include <sys/spa_impl.h> |
c4c162c1 | 57 | #include <sys/wmsum.h> |
c9d62d13 | 58 | #include <sys/vdev_impl.h> |
34dc7c2f | 59 | |
18168da7 | 60 | static kstat_t *dbuf_ksp; |
5e021f56 GDN |
61 | |
62 | typedef struct dbuf_stats { | |
63 | /* | |
64 | * Various statistics about the size of the dbuf cache. | |
65 | */ | |
66 | kstat_named_t cache_count; | |
67 | kstat_named_t cache_size_bytes; | |
68 | kstat_named_t cache_size_bytes_max; | |
69 | /* | |
70 | * Statistics regarding the bounds on the dbuf cache size. | |
71 | */ | |
72 | kstat_named_t cache_target_bytes; | |
73 | kstat_named_t cache_lowater_bytes; | |
74 | kstat_named_t cache_hiwater_bytes; | |
75 | /* | |
76 | * Total number of dbuf cache evictions that have occurred. | |
77 | */ | |
78 | kstat_named_t cache_total_evicts; | |
79 | /* | |
80 | * The distribution of dbuf levels in the dbuf cache and | |
81 | * the total size of all dbufs at each level. | |
82 | */ | |
83 | kstat_named_t cache_levels[DN_MAX_LEVELS]; | |
84 | kstat_named_t cache_levels_bytes[DN_MAX_LEVELS]; | |
85 | /* | |
86 | * Statistics about the dbuf hash table. | |
87 | */ | |
88 | kstat_named_t hash_hits; | |
89 | kstat_named_t hash_misses; | |
90 | kstat_named_t hash_collisions; | |
91 | kstat_named_t hash_elements; | |
92 | kstat_named_t hash_elements_max; | |
93 | /* | |
94 | * Number of sublists containing more than one dbuf in the dbuf | |
95 | * hash table. Keep track of the longest hash chain. | |
96 | */ | |
97 | kstat_named_t hash_chains; | |
98 | kstat_named_t hash_chain_max; | |
99 | /* | |
100 | * Number of times a dbuf_create() discovers that a dbuf was | |
101 | * already created and in the dbuf hash table. | |
102 | */ | |
103 | kstat_named_t hash_insert_race; | |
505df8d1 BB |
104 | /* |
105 | * Number of entries in the hash table dbuf and mutex arrays. | |
106 | */ | |
107 | kstat_named_t hash_table_count; | |
108 | kstat_named_t hash_mutex_count; | |
2e5dc449 MA |
109 | /* |
110 | * Statistics about the size of the metadata dbuf cache. | |
111 | */ | |
112 | kstat_named_t metadata_cache_count; | |
113 | kstat_named_t metadata_cache_size_bytes; | |
114 | kstat_named_t metadata_cache_size_bytes_max; | |
115 | /* | |
116 | * For diagnostic purposes, this is incremented whenever we can't add | |
117 | * something to the metadata cache because it's full, and instead put | |
118 | * the data in the regular dbuf cache. | |
119 | */ | |
120 | kstat_named_t metadata_cache_overflow; | |
5e021f56 GDN |
121 | } dbuf_stats_t; |
122 | ||
123 | dbuf_stats_t dbuf_stats = { | |
124 | { "cache_count", KSTAT_DATA_UINT64 }, | |
125 | { "cache_size_bytes", KSTAT_DATA_UINT64 }, | |
126 | { "cache_size_bytes_max", KSTAT_DATA_UINT64 }, | |
127 | { "cache_target_bytes", KSTAT_DATA_UINT64 }, | |
128 | { "cache_lowater_bytes", KSTAT_DATA_UINT64 }, | |
129 | { "cache_hiwater_bytes", KSTAT_DATA_UINT64 }, | |
130 | { "cache_total_evicts", KSTAT_DATA_UINT64 }, | |
131 | { { "cache_levels_N", KSTAT_DATA_UINT64 } }, | |
132 | { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } }, | |
133 | { "hash_hits", KSTAT_DATA_UINT64 }, | |
134 | { "hash_misses", KSTAT_DATA_UINT64 }, | |
135 | { "hash_collisions", KSTAT_DATA_UINT64 }, | |
136 | { "hash_elements", KSTAT_DATA_UINT64 }, | |
137 | { "hash_elements_max", KSTAT_DATA_UINT64 }, | |
138 | { "hash_chains", KSTAT_DATA_UINT64 }, | |
139 | { "hash_chain_max", KSTAT_DATA_UINT64 }, | |
2e5dc449 | 140 | { "hash_insert_race", KSTAT_DATA_UINT64 }, |
505df8d1 BB |
141 | { "hash_table_count", KSTAT_DATA_UINT64 }, |
142 | { "hash_mutex_count", KSTAT_DATA_UINT64 }, | |
2e5dc449 MA |
143 | { "metadata_cache_count", KSTAT_DATA_UINT64 }, |
144 | { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 }, | |
145 | { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 }, | |
146 | { "metadata_cache_overflow", KSTAT_DATA_UINT64 } | |
5e021f56 GDN |
147 | }; |
148 | ||
c4c162c1 AM |
149 | struct { |
150 | wmsum_t cache_count; | |
151 | wmsum_t cache_total_evicts; | |
152 | wmsum_t cache_levels[DN_MAX_LEVELS]; | |
153 | wmsum_t cache_levels_bytes[DN_MAX_LEVELS]; | |
154 | wmsum_t hash_hits; | |
155 | wmsum_t hash_misses; | |
156 | wmsum_t hash_collisions; | |
157 | wmsum_t hash_chains; | |
158 | wmsum_t hash_insert_race; | |
159 | wmsum_t metadata_cache_count; | |
160 | wmsum_t metadata_cache_overflow; | |
161 | } dbuf_sums; | |
162 | ||
5e021f56 | 163 | #define DBUF_STAT_INCR(stat, val) \ |
c4c162c1 | 164 | wmsum_add(&dbuf_sums.stat, val); |
5e021f56 GDN |
165 | #define DBUF_STAT_DECR(stat, val) \ |
166 | DBUF_STAT_INCR(stat, -(val)); | |
167 | #define DBUF_STAT_BUMP(stat) \ | |
168 | DBUF_STAT_INCR(stat, 1); | |
169 | #define DBUF_STAT_BUMPDOWN(stat) \ | |
170 | DBUF_STAT_INCR(stat, -1); | |
171 | #define DBUF_STAT_MAX(stat, v) { \ | |
172 | uint64_t _m; \ | |
173 | while ((v) > (_m = dbuf_stats.stat.value.ui64) && \ | |
174 | (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\ | |
175 | continue; \ | |
176 | } | |
177 | ||
b128c09f | 178 | static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); |
fa3922df | 179 | static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr); |
8b3547a4 | 180 | static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags); |
34dc7c2f BB |
181 | |
182 | /* | |
183 | * Global data structures and functions for the dbuf cache. | |
184 | */ | |
d3c2ae1c | 185 | static kmem_cache_t *dbuf_kmem_cache; |
0c66c32d | 186 | static taskq_t *dbu_evict_taskq; |
34dc7c2f | 187 | |
d3c2ae1c GW |
188 | static kthread_t *dbuf_cache_evict_thread; |
189 | static kmutex_t dbuf_evict_lock; | |
190 | static kcondvar_t dbuf_evict_cv; | |
191 | static boolean_t dbuf_evict_thread_exit; | |
192 | ||
193 | /* | |
2e5dc449 MA |
194 | * There are two dbuf caches; each dbuf can only be in one of them at a time. |
195 | * | |
196 | * 1. Cache of metadata dbufs, to help make read-heavy administrative commands | |
197 | * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs | |
198 | * that represent the metadata that describes filesystems/snapshots/ | |
199 | * bookmarks/properties/etc. We only evict from this cache when we export a | |
200 | * pool, to short-circuit as much I/O as possible for all administrative | |
201 | * commands that need the metadata. There is no eviction policy for this | |
202 | * cache, because we try to only include types in it which would occupy a | |
203 | * very small amount of space per object but create a large impact on the | |
204 | * performance of these commands. Instead, after it reaches a maximum size | |
205 | * (which should only happen on very small memory systems with a very large | |
206 | * number of filesystem objects), we stop taking new dbufs into the | |
207 | * metadata cache, instead putting them in the normal dbuf cache. | |
208 | * | |
209 | * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that | |
210 | * are not currently held but have been recently released. These dbufs | |
211 | * are not eligible for arc eviction until they are aged out of the cache. | |
212 | * Dbufs that are aged out of the cache will be immediately destroyed and | |
213 | * become eligible for arc eviction. | |
214 | * | |
215 | * Dbufs are added to these caches once the last hold is released. If a dbuf is | |
216 | * later accessed and still exists in the dbuf cache, then it will be removed | |
217 | * from the cache and later re-added to the head of the cache. | |
218 | * | |
219 | * If a given dbuf meets the requirements for the metadata cache, it will go | |
220 | * there, otherwise it will be considered for the generic LRU dbuf cache. The | |
221 | * caches and the refcounts tracking their sizes are stored in an array indexed | |
222 | * by those caches' matching enum values (from dbuf_cached_state_t). | |
d3c2ae1c | 223 | */ |
2e5dc449 | 224 | typedef struct dbuf_cache { |
ffdf019c AM |
225 | multilist_t cache; |
226 | zfs_refcount_t size ____cacheline_aligned; | |
2e5dc449 MA |
227 | } dbuf_cache_t; |
228 | dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; | |
d3c2ae1c | 229 | |
2e5dc449 | 230 | /* Size limits for the caches */ |
ab8d9c17 RY |
231 | static uint64_t dbuf_cache_max_bytes = UINT64_MAX; |
232 | static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX; | |
8348fac3 | 233 | |
2e5dc449 | 234 | /* Set the default sizes of the caches to log2 fraction of arc size */ |
fdc2d303 RY |
235 | static uint_t dbuf_cache_shift = 5; |
236 | static uint_t dbuf_metadata_cache_shift = 6; | |
d3c2ae1c | 237 | |
505df8d1 | 238 | /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */ |
fdc2d303 | 239 | static uint_t dbuf_mutex_cache_shift = 0; |
505df8d1 | 240 | |
8348fac3 RM |
241 | static unsigned long dbuf_cache_target_bytes(void); |
242 | static unsigned long dbuf_metadata_cache_target_bytes(void); | |
243 | ||
d3c2ae1c | 244 | /* |
2e5dc449 | 245 | * The LRU dbuf cache uses a three-stage eviction policy: |
d3c2ae1c GW |
246 | * - A low water marker designates when the dbuf eviction thread |
247 | * should stop evicting from the dbuf cache. | |
248 | * - When we reach the maximum size (aka mid water mark), we | |
249 | * signal the eviction thread to run. | |
250 | * - The high water mark indicates when the eviction thread | |
251 | * is unable to keep up with the incoming load and eviction must | |
252 | * happen in the context of the calling thread. | |
253 | * | |
254 | * The dbuf cache: | |
255 | * (max size) | |
256 | * low water mid water hi water | |
257 | * +----------------------------------------+----------+----------+ | |
258 | * | | | | | |
259 | * | | | | | |
260 | * | | | | | |
261 | * | | | | | |
262 | * +----------------------------------------+----------+----------+ | |
263 | * stop signal evict | |
264 | * evicting eviction directly | |
265 | * thread | |
266 | * | |
267 | * The high and low water marks indicate the operating range for the eviction | |
268 | * thread. The low water mark is, by default, 90% of the total size of the | |
269 | * cache and the high water mark is at 110% (both of these percentages can be | |
270 | * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, | |
271 | * respectively). The eviction thread will try to ensure that the cache remains | |
272 | * within this range by waking up every second and checking if the cache is | |
273 | * above the low water mark. The thread can also be woken up by callers adding | |
274 | * elements into the cache if the cache is larger than the mid water (i.e max | |
275 | * cache size). Once the eviction thread is woken up and eviction is required, | |
276 | * it will continue evicting buffers until it's able to reduce the cache size | |
277 | * to the low water mark. If the cache size continues to grow and hits the high | |
4e33ba4c | 278 | * water mark, then callers adding elements to the cache will begin to evict |
d3c2ae1c GW |
279 | * directly from the cache until the cache is no longer above the high water |
280 | * mark. | |
281 | */ | |
282 | ||
283 | /* | |
284 | * The percentage above and below the maximum cache size. | |
285 | */ | |
18168da7 AZ |
286 | static uint_t dbuf_cache_hiwater_pct = 10; |
287 | static uint_t dbuf_cache_lowater_pct = 10; | |
d3c2ae1c | 288 | |
34dc7c2f BB |
289 | static int |
290 | dbuf_cons(void *vdb, void *unused, int kmflag) | |
291 | { | |
14e4e3cb | 292 | (void) unused, (void) kmflag; |
34dc7c2f | 293 | dmu_buf_impl_t *db = vdb; |
861166b0 | 294 | memset(db, 0, sizeof (dmu_buf_impl_t)); |
34dc7c2f BB |
295 | |
296 | mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); | |
f664f1ee | 297 | rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL); |
34dc7c2f | 298 | cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); |
d3c2ae1c | 299 | multilist_link_init(&db->db_cache_link); |
424fd7c3 | 300 | zfs_refcount_create(&db->db_holds); |
8951cb8d | 301 | |
34dc7c2f BB |
302 | return (0); |
303 | } | |
304 | ||
34dc7c2f BB |
305 | static void |
306 | dbuf_dest(void *vdb, void *unused) | |
307 | { | |
14e4e3cb | 308 | (void) unused; |
34dc7c2f BB |
309 | dmu_buf_impl_t *db = vdb; |
310 | mutex_destroy(&db->db_mtx); | |
f664f1ee | 311 | rw_destroy(&db->db_rwlock); |
34dc7c2f | 312 | cv_destroy(&db->db_changed); |
d3c2ae1c | 313 | ASSERT(!multilist_link_active(&db->db_cache_link)); |
424fd7c3 | 314 | zfs_refcount_destroy(&db->db_holds); |
34dc7c2f BB |
315 | } |
316 | ||
317 | /* | |
318 | * dbuf hash table routines | |
319 | */ | |
320 | static dbuf_hash_table_t dbuf_hash_table; | |
321 | ||
37fb3e43 PD |
322 | /* |
323 | * We use Cityhash for this. It's fast, and has good hash properties without | |
324 | * requiring any large static buffers. | |
325 | */ | |
34dc7c2f BB |
326 | static uint64_t |
327 | dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) | |
328 | { | |
37fb3e43 | 329 | return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); |
34dc7c2f BB |
330 | } |
331 | ||
8b3547a4 MM |
332 | #define DTRACE_SET_STATE(db, why) \ |
333 | DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \ | |
334 | const char *, why) | |
335 | ||
34dc7c2f BB |
336 | #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ |
337 | ((dbuf)->db.db_object == (obj) && \ | |
338 | (dbuf)->db_objset == (os) && \ | |
339 | (dbuf)->db_level == (level) && \ | |
340 | (dbuf)->db_blkid == (blkid)) | |
341 | ||
342 | dmu_buf_impl_t * | |
3236c0b8 RY |
343 | dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid, |
344 | uint64_t *hash_out) | |
34dc7c2f BB |
345 | { |
346 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
d6320ddb BB |
347 | uint64_t hv; |
348 | uint64_t idx; | |
34dc7c2f BB |
349 | dmu_buf_impl_t *db; |
350 | ||
d3c2ae1c | 351 | hv = dbuf_hash(os, obj, level, blkid); |
d6320ddb BB |
352 | idx = hv & h->hash_table_mask; |
353 | ||
223b04d2 | 354 | mutex_enter(DBUF_HASH_MUTEX(h, idx)); |
34dc7c2f BB |
355 | for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { |
356 | if (DBUF_EQUAL(db, os, obj, level, blkid)) { | |
357 | mutex_enter(&db->db_mtx); | |
358 | if (db->db_state != DB_EVICTING) { | |
223b04d2 | 359 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); |
34dc7c2f BB |
360 | return (db); |
361 | } | |
362 | mutex_exit(&db->db_mtx); | |
363 | } | |
364 | } | |
223b04d2 | 365 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); |
3236c0b8 RY |
366 | if (hash_out != NULL) |
367 | *hash_out = hv; | |
34dc7c2f BB |
368 | return (NULL); |
369 | } | |
370 | ||
6ebebace JG |
371 | static dmu_buf_impl_t * |
372 | dbuf_find_bonus(objset_t *os, uint64_t object) | |
373 | { | |
374 | dnode_t *dn; | |
375 | dmu_buf_impl_t *db = NULL; | |
376 | ||
377 | if (dnode_hold(os, object, FTAG, &dn) == 0) { | |
378 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
379 | if (dn->dn_bonus != NULL) { | |
380 | db = dn->dn_bonus; | |
381 | mutex_enter(&db->db_mtx); | |
382 | } | |
383 | rw_exit(&dn->dn_struct_rwlock); | |
384 | dnode_rele(dn, FTAG); | |
385 | } | |
386 | return (db); | |
387 | } | |
388 | ||
34dc7c2f BB |
389 | /* |
390 | * Insert an entry into the hash table. If there is already an element | |
391 | * equal to elem in the hash table, then the already existing element | |
392 | * will be returned and the new element will not be inserted. | |
393 | * Otherwise returns NULL. | |
394 | */ | |
395 | static dmu_buf_impl_t * | |
396 | dbuf_hash_insert(dmu_buf_impl_t *db) | |
397 | { | |
398 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
428870ff | 399 | objset_t *os = db->db_objset; |
34dc7c2f BB |
400 | uint64_t obj = db->db.db_object; |
401 | int level = db->db_level; | |
3236c0b8 | 402 | uint64_t blkid, idx; |
34dc7c2f | 403 | dmu_buf_impl_t *dbf; |
5e021f56 | 404 | uint32_t i; |
34dc7c2f | 405 | |
d6320ddb | 406 | blkid = db->db_blkid; |
3236c0b8 RY |
407 | ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash); |
408 | idx = db->db_hash & h->hash_table_mask; | |
d6320ddb | 409 | |
223b04d2 | 410 | mutex_enter(DBUF_HASH_MUTEX(h, idx)); |
5e021f56 GDN |
411 | for (dbf = h->hash_table[idx], i = 0; dbf != NULL; |
412 | dbf = dbf->db_hash_next, i++) { | |
34dc7c2f BB |
413 | if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { |
414 | mutex_enter(&dbf->db_mtx); | |
415 | if (dbf->db_state != DB_EVICTING) { | |
223b04d2 | 416 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); |
34dc7c2f BB |
417 | return (dbf); |
418 | } | |
419 | mutex_exit(&dbf->db_mtx); | |
420 | } | |
421 | } | |
422 | ||
5e021f56 GDN |
423 | if (i > 0) { |
424 | DBUF_STAT_BUMP(hash_collisions); | |
425 | if (i == 1) | |
426 | DBUF_STAT_BUMP(hash_chains); | |
427 | ||
428 | DBUF_STAT_MAX(hash_chain_max, i); | |
429 | } | |
430 | ||
34dc7c2f BB |
431 | mutex_enter(&db->db_mtx); |
432 | db->db_hash_next = h->hash_table[idx]; | |
433 | h->hash_table[idx] = db; | |
223b04d2 | 434 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); |
c4c162c1 AM |
435 | uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64); |
436 | DBUF_STAT_MAX(hash_elements_max, he); | |
34dc7c2f BB |
437 | |
438 | return (NULL); | |
439 | } | |
440 | ||
2e5dc449 MA |
441 | /* |
442 | * This returns whether this dbuf should be stored in the metadata cache, which | |
443 | * is based on whether it's from one of the dnode types that store data related | |
444 | * to traversing dataset hierarchies. | |
445 | */ | |
446 | static boolean_t | |
447 | dbuf_include_in_metadata_cache(dmu_buf_impl_t *db) | |
448 | { | |
449 | DB_DNODE_ENTER(db); | |
450 | dmu_object_type_t type = DB_DNODE(db)->dn_type; | |
451 | DB_DNODE_EXIT(db); | |
452 | ||
453 | /* Check if this dbuf is one of the types we care about */ | |
454 | if (DMU_OT_IS_METADATA_CACHED(type)) { | |
455 | /* If we hit this, then we set something up wrong in dmu_ot */ | |
456 | ASSERT(DMU_OT_IS_METADATA(type)); | |
457 | ||
458 | /* | |
459 | * Sanity check for small-memory systems: don't allocate too | |
460 | * much memory for this purpose. | |
461 | */ | |
424fd7c3 TS |
462 | if (zfs_refcount_count( |
463 | &dbuf_caches[DB_DBUF_METADATA_CACHE].size) > | |
8348fac3 | 464 | dbuf_metadata_cache_target_bytes()) { |
2e5dc449 MA |
465 | DBUF_STAT_BUMP(metadata_cache_overflow); |
466 | return (B_FALSE); | |
467 | } | |
468 | ||
469 | return (B_TRUE); | |
470 | } | |
471 | ||
472 | return (B_FALSE); | |
473 | } | |
474 | ||
34dc7c2f | 475 | /* |
bd089c54 | 476 | * Remove an entry from the hash table. It must be in the EVICTING state. |
34dc7c2f BB |
477 | */ |
478 | static void | |
479 | dbuf_hash_remove(dmu_buf_impl_t *db) | |
480 | { | |
481 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
3236c0b8 | 482 | uint64_t idx; |
34dc7c2f BB |
483 | dmu_buf_impl_t *dbf, **dbp; |
484 | ||
3236c0b8 RY |
485 | ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level, |
486 | db->db_blkid), ==, db->db_hash); | |
487 | idx = db->db_hash & h->hash_table_mask; | |
d6320ddb | 488 | |
34dc7c2f | 489 | /* |
4e33ba4c | 490 | * We mustn't hold db_mtx to maintain lock ordering: |
223b04d2 | 491 | * DBUF_HASH_MUTEX > db_mtx. |
34dc7c2f | 492 | */ |
424fd7c3 | 493 | ASSERT(zfs_refcount_is_zero(&db->db_holds)); |
34dc7c2f BB |
494 | ASSERT(db->db_state == DB_EVICTING); |
495 | ASSERT(!MUTEX_HELD(&db->db_mtx)); | |
496 | ||
223b04d2 | 497 | mutex_enter(DBUF_HASH_MUTEX(h, idx)); |
34dc7c2f BB |
498 | dbp = &h->hash_table[idx]; |
499 | while ((dbf = *dbp) != db) { | |
500 | dbp = &dbf->db_hash_next; | |
501 | ASSERT(dbf != NULL); | |
502 | } | |
503 | *dbp = db->db_hash_next; | |
504 | db->db_hash_next = NULL; | |
5e021f56 GDN |
505 | if (h->hash_table[idx] && |
506 | h->hash_table[idx]->db_hash_next == NULL) | |
507 | DBUF_STAT_BUMPDOWN(hash_chains); | |
223b04d2 | 508 | mutex_exit(DBUF_HASH_MUTEX(h, idx)); |
c4c162c1 | 509 | atomic_dec_64(&dbuf_stats.hash_elements.value.ui64); |
34dc7c2f BB |
510 | } |
511 | ||
0c66c32d JG |
512 | typedef enum { |
513 | DBVU_EVICTING, | |
514 | DBVU_NOT_EVICTING | |
515 | } dbvu_verify_type_t; | |
516 | ||
517 | static void | |
518 | dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) | |
519 | { | |
520 | #ifdef ZFS_DEBUG | |
521 | int64_t holds; | |
522 | ||
523 | if (db->db_user == NULL) | |
524 | return; | |
525 | ||
526 | /* Only data blocks support the attachment of user data. */ | |
527 | ASSERT(db->db_level == 0); | |
528 | ||
529 | /* Clients must resolve a dbuf before attaching user data. */ | |
530 | ASSERT(db->db.db_data != NULL); | |
531 | ASSERT3U(db->db_state, ==, DB_CACHED); | |
532 | ||
424fd7c3 | 533 | holds = zfs_refcount_count(&db->db_holds); |
0c66c32d JG |
534 | if (verify_type == DBVU_EVICTING) { |
535 | /* | |
536 | * Immediate eviction occurs when holds == dirtycnt. | |
537 | * For normal eviction buffers, holds is zero on | |
538 | * eviction, except when dbuf_fix_old_data() calls | |
539 | * dbuf_clear_data(). However, the hold count can grow | |
540 | * during eviction even though db_mtx is held (see | |
541 | * dmu_bonus_hold() for an example), so we can only | |
542 | * test the generic invariant that holds >= dirtycnt. | |
543 | */ | |
544 | ASSERT3U(holds, >=, db->db_dirtycnt); | |
545 | } else { | |
bc4501f7 | 546 | if (db->db_user_immediate_evict == TRUE) |
0c66c32d JG |
547 | ASSERT3U(holds, >=, db->db_dirtycnt); |
548 | else | |
549 | ASSERT3U(holds, >, 0); | |
550 | } | |
551 | #endif | |
552 | } | |
553 | ||
34dc7c2f BB |
554 | static void |
555 | dbuf_evict_user(dmu_buf_impl_t *db) | |
556 | { | |
0c66c32d JG |
557 | dmu_buf_user_t *dbu = db->db_user; |
558 | ||
34dc7c2f BB |
559 | ASSERT(MUTEX_HELD(&db->db_mtx)); |
560 | ||
0c66c32d | 561 | if (dbu == NULL) |
34dc7c2f BB |
562 | return; |
563 | ||
0c66c32d JG |
564 | dbuf_verify_user(db, DBVU_EVICTING); |
565 | db->db_user = NULL; | |
566 | ||
567 | #ifdef ZFS_DEBUG | |
568 | if (dbu->dbu_clear_on_evict_dbufp != NULL) | |
569 | *dbu->dbu_clear_on_evict_dbufp = NULL; | |
570 | #endif | |
571 | ||
92dc4ad8 RN |
572 | if (db->db_caching_status != DB_NO_CACHE) { |
573 | /* | |
574 | * This is a cached dbuf, so the size of the user data is | |
575 | * included in its cached amount. We adjust it here because the | |
576 | * user data has already been detached from the dbuf, and the | |
577 | * sync functions are not supposed to touch it (the dbuf might | |
578 | * not exist anymore by the time the sync functions run. | |
579 | */ | |
580 | uint64_t size = dbu->dbu_size; | |
581 | (void) zfs_refcount_remove_many( | |
582 | &dbuf_caches[db->db_caching_status].size, size, db); | |
583 | if (db->db_caching_status == DB_DBUF_CACHE) | |
584 | DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size); | |
585 | } | |
586 | ||
0c66c32d | 587 | /* |
39efbde7 GM |
588 | * There are two eviction callbacks - one that we call synchronously |
589 | * and one that we invoke via a taskq. The async one is useful for | |
590 | * avoiding lock order reversals and limiting stack depth. | |
591 | * | |
592 | * Note that if we have a sync callback but no async callback, | |
593 | * it's likely that the sync callback will free the structure | |
594 | * containing the dbu. In that case we need to take care to not | |
595 | * dereference dbu after calling the sync evict func. | |
0c66c32d | 596 | */ |
a7004725 | 597 | boolean_t has_async = (dbu->dbu_evict_func_async != NULL); |
39efbde7 GM |
598 | |
599 | if (dbu->dbu_evict_func_sync != NULL) | |
600 | dbu->dbu_evict_func_sync(dbu); | |
601 | ||
602 | if (has_async) { | |
603 | taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, | |
604 | dbu, 0, &dbu->dbu_tqent); | |
605 | } | |
34dc7c2f BB |
606 | } |
607 | ||
572e2857 BB |
608 | boolean_t |
609 | dbuf_is_metadata(dmu_buf_impl_t *db) | |
610 | { | |
cc79a5c2 BB |
611 | /* |
612 | * Consider indirect blocks and spill blocks to be meta data. | |
613 | */ | |
614 | if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) { | |
572e2857 BB |
615 | return (B_TRUE); |
616 | } else { | |
617 | boolean_t is_metadata; | |
618 | ||
619 | DB_DNODE_ENTER(db); | |
9ae529ec | 620 | is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); |
572e2857 BB |
621 | DB_DNODE_EXIT(db); |
622 | ||
623 | return (is_metadata); | |
624 | } | |
625 | } | |
626 | ||
c9d62d13 GA |
627 | /* |
628 | * We want to exclude buffers that are on a special allocation class from | |
629 | * L2ARC. | |
630 | */ | |
631 | boolean_t | |
632 | dbuf_is_l2cacheable(dmu_buf_impl_t *db) | |
633 | { | |
ca9e32d3 GA |
634 | if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL || |
635 | (db->db_objset->os_secondary_cache == | |
636 | ZFS_CACHE_METADATA && dbuf_is_metadata(db))) { | |
637 | if (l2arc_exclude_special == 0) | |
638 | return (B_TRUE); | |
639 | ||
640 | blkptr_t *bp = db->db_blkptr; | |
641 | if (bp == NULL || BP_IS_HOLE(bp)) | |
642 | return (B_FALSE); | |
c9d62d13 GA |
643 | uint64_t vdev = DVA_GET_VDEV(bp->blk_dva); |
644 | vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev; | |
ca9e32d3 | 645 | vdev_t *vd = NULL; |
c9d62d13 GA |
646 | |
647 | if (vdev < rvd->vdev_children) | |
648 | vd = rvd->vdev_child[vdev]; | |
649 | ||
ca9e32d3 GA |
650 | if (vd == NULL) |
651 | return (B_TRUE); | |
c9d62d13 | 652 | |
ca9e32d3 GA |
653 | if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL && |
654 | vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) | |
655 | return (B_TRUE); | |
c9d62d13 | 656 | } |
c9d62d13 GA |
657 | return (B_FALSE); |
658 | } | |
659 | ||
660 | static inline boolean_t | |
661 | dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level) | |
662 | { | |
ca9e32d3 GA |
663 | if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL || |
664 | (dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA && | |
665 | (level > 0 || | |
666 | DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) { | |
667 | if (l2arc_exclude_special == 0) | |
668 | return (B_TRUE); | |
669 | ||
670 | if (bp == NULL || BP_IS_HOLE(bp)) | |
671 | return (B_FALSE); | |
c9d62d13 GA |
672 | uint64_t vdev = DVA_GET_VDEV(bp->blk_dva); |
673 | vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev; | |
ca9e32d3 | 674 | vdev_t *vd = NULL; |
c9d62d13 GA |
675 | |
676 | if (vdev < rvd->vdev_children) | |
677 | vd = rvd->vdev_child[vdev]; | |
678 | ||
ca9e32d3 GA |
679 | if (vd == NULL) |
680 | return (B_TRUE); | |
c9d62d13 | 681 | |
ca9e32d3 GA |
682 | if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL && |
683 | vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) | |
684 | return (B_TRUE); | |
c9d62d13 | 685 | } |
c9d62d13 GA |
686 | return (B_FALSE); |
687 | } | |
688 | ||
d3c2ae1c GW |
689 | |
690 | /* | |
691 | * This function *must* return indices evenly distributed between all | |
692 | * sublists of the multilist. This is needed due to how the dbuf eviction | |
693 | * code is laid out; dbuf_evict_thread() assumes dbufs are evenly | |
694 | * distributed between all sublists and uses this assumption when | |
695 | * deciding which sublist to evict from and how much to evict from it. | |
696 | */ | |
65c7cc49 | 697 | static unsigned int |
d3c2ae1c | 698 | dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) |
34dc7c2f | 699 | { |
d3c2ae1c GW |
700 | dmu_buf_impl_t *db = obj; |
701 | ||
702 | /* | |
703 | * The assumption here, is the hash value for a given | |
704 | * dmu_buf_impl_t will remain constant throughout it's lifetime | |
705 | * (i.e. it's objset, object, level and blkid fields don't change). | |
706 | * Thus, we don't need to store the dbuf's sublist index | |
707 | * on insertion, as this index can be recalculated on removal. | |
708 | * | |
709 | * Also, the low order bits of the hash value are thought to be | |
710 | * distributed evenly. Otherwise, in the case that the multilist | |
711 | * has a power of two number of sublists, each sublists' usage | |
5b7053a9 AM |
712 | * would not be evenly distributed. In this context full 64bit |
713 | * division would be a waste of time, so limit it to 32 bits. | |
d3c2ae1c | 714 | */ |
5b7053a9 | 715 | return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object, |
d3c2ae1c GW |
716 | db->db_level, db->db_blkid) % |
717 | multilist_get_num_sublists(ml)); | |
718 | } | |
719 | ||
8348fac3 RM |
720 | /* |
721 | * The target size of the dbuf cache can grow with the ARC target, | |
722 | * unless limited by the tunable dbuf_cache_max_bytes. | |
723 | */ | |
e71cade6 | 724 | static inline unsigned long |
725 | dbuf_cache_target_bytes(void) | |
726 | { | |
8348fac3 RM |
727 | return (MIN(dbuf_cache_max_bytes, |
728 | arc_target_bytes() >> dbuf_cache_shift)); | |
729 | } | |
730 | ||
731 | /* | |
732 | * The target size of the dbuf metadata cache can grow with the ARC target, | |
733 | * unless limited by the tunable dbuf_metadata_cache_max_bytes. | |
734 | */ | |
735 | static inline unsigned long | |
736 | dbuf_metadata_cache_target_bytes(void) | |
737 | { | |
738 | return (MIN(dbuf_metadata_cache_max_bytes, | |
739 | arc_target_bytes() >> dbuf_metadata_cache_shift)); | |
e71cade6 | 740 | } |
741 | ||
5e021f56 GDN |
742 | static inline uint64_t |
743 | dbuf_cache_hiwater_bytes(void) | |
d3c2ae1c | 744 | { |
e71cade6 | 745 | uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); |
5e021f56 GDN |
746 | return (dbuf_cache_target + |
747 | (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100); | |
748 | } | |
e71cade6 | 749 | |
5e021f56 GDN |
750 | static inline uint64_t |
751 | dbuf_cache_lowater_bytes(void) | |
752 | { | |
753 | uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); | |
754 | return (dbuf_cache_target - | |
755 | (dbuf_cache_target * dbuf_cache_lowater_pct) / 100); | |
756 | } | |
d3c2ae1c | 757 | |
d3c2ae1c GW |
758 | static inline boolean_t |
759 | dbuf_cache_above_lowater(void) | |
760 | { | |
424fd7c3 | 761 | return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > |
2e5dc449 | 762 | dbuf_cache_lowater_bytes()); |
d3c2ae1c GW |
763 | } |
764 | ||
765 | /* | |
766 | * Evict the oldest eligible dbuf from the dbuf cache. | |
767 | */ | |
768 | static void | |
769 | dbuf_evict_one(void) | |
770 | { | |
ffdf019c | 771 | int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache); |
2e5dc449 | 772 | multilist_sublist_t *mls = multilist_sublist_lock( |
ffdf019c | 773 | &dbuf_caches[DB_DBUF_CACHE].cache, idx); |
1c27024e | 774 | |
d3c2ae1c GW |
775 | ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); |
776 | ||
1c27024e | 777 | dmu_buf_impl_t *db = multilist_sublist_tail(mls); |
d3c2ae1c GW |
778 | while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { |
779 | db = multilist_sublist_prev(mls, db); | |
780 | } | |
781 | ||
782 | DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, | |
783 | multilist_sublist_t *, mls); | |
784 | ||
785 | if (db != NULL) { | |
786 | multilist_sublist_remove(mls, db); | |
787 | multilist_sublist_unlock(mls); | |
92dc4ad8 | 788 | uint64_t size = db->db.db_size + dmu_buf_user_size(&db->db); |
424fd7c3 | 789 | (void) zfs_refcount_remove_many( |
92dc4ad8 | 790 | &dbuf_caches[DB_DBUF_CACHE].size, size, db); |
5e021f56 GDN |
791 | DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); |
792 | DBUF_STAT_BUMPDOWN(cache_count); | |
92dc4ad8 | 793 | DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size); |
2e5dc449 MA |
794 | ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE); |
795 | db->db_caching_status = DB_NO_CACHE; | |
d3c2ae1c | 796 | dbuf_destroy(db); |
5e021f56 | 797 | DBUF_STAT_BUMP(cache_total_evicts); |
d3c2ae1c GW |
798 | } else { |
799 | multilist_sublist_unlock(mls); | |
800 | } | |
d3c2ae1c GW |
801 | } |
802 | ||
803 | /* | |
804 | * The dbuf evict thread is responsible for aging out dbufs from the | |
805 | * cache. Once the cache has reached it's maximum size, dbufs are removed | |
806 | * and destroyed. The eviction thread will continue running until the size | |
807 | * of the dbuf cache is at or below the maximum size. Once the dbuf is aged | |
808 | * out of the cache it is destroyed and becomes eligible for arc eviction. | |
809 | */ | |
460748d4 | 810 | static __attribute__((noreturn)) void |
c25b8f99 | 811 | dbuf_evict_thread(void *unused) |
d3c2ae1c | 812 | { |
14e4e3cb | 813 | (void) unused; |
d3c2ae1c GW |
814 | callb_cpr_t cpr; |
815 | ||
816 | CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); | |
817 | ||
818 | mutex_enter(&dbuf_evict_lock); | |
819 | while (!dbuf_evict_thread_exit) { | |
820 | while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { | |
821 | CALLB_CPR_SAFE_BEGIN(&cpr); | |
ac6e5fb2 | 822 | (void) cv_timedwait_idle_hires(&dbuf_evict_cv, |
d3c2ae1c GW |
823 | &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); |
824 | CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); | |
825 | } | |
826 | mutex_exit(&dbuf_evict_lock); | |
827 | ||
828 | /* | |
829 | * Keep evicting as long as we're above the low water mark | |
830 | * for the cache. We do this without holding the locks to | |
831 | * minimize lock contention. | |
832 | */ | |
833 | while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { | |
834 | dbuf_evict_one(); | |
835 | } | |
836 | ||
837 | mutex_enter(&dbuf_evict_lock); | |
838 | } | |
839 | ||
840 | dbuf_evict_thread_exit = B_FALSE; | |
841 | cv_broadcast(&dbuf_evict_cv); | |
842 | CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ | |
843 | thread_exit(); | |
844 | } | |
845 | ||
846 | /* | |
847 | * Wake up the dbuf eviction thread if the dbuf cache is at its max size. | |
848 | * If the dbuf cache is at its high water mark, then evict a dbuf from the | |
7ada752a | 849 | * dbuf cache using the caller's context. |
d3c2ae1c GW |
850 | */ |
851 | static void | |
cbd8f5b7 | 852 | dbuf_evict_notify(uint64_t size) |
d3c2ae1c | 853 | { |
38240ebd MA |
854 | /* |
855 | * We check if we should evict without holding the dbuf_evict_lock, | |
856 | * because it's OK to occasionally make the wrong decision here, | |
857 | * and grabbing the lock results in massive lock contention. | |
858 | */ | |
cbd8f5b7 AM |
859 | if (size > dbuf_cache_target_bytes()) { |
860 | if (size > dbuf_cache_hiwater_bytes()) | |
d3c2ae1c | 861 | dbuf_evict_one(); |
38240ebd | 862 | cv_signal(&dbuf_evict_cv); |
d3c2ae1c | 863 | } |
34dc7c2f BB |
864 | } |
865 | ||
5e021f56 GDN |
866 | static int |
867 | dbuf_kstat_update(kstat_t *ksp, int rw) | |
868 | { | |
869 | dbuf_stats_t *ds = ksp->ks_data; | |
505df8d1 | 870 | dbuf_hash_table_t *h = &dbuf_hash_table; |
d3c2ae1c | 871 | |
c4c162c1 | 872 | if (rw == KSTAT_WRITE) |
5e021f56 | 873 | return (SET_ERROR(EACCES)); |
5e021f56 | 874 | |
c4c162c1 AM |
875 | ds->cache_count.value.ui64 = |
876 | wmsum_value(&dbuf_sums.cache_count); | |
877 | ds->cache_size_bytes.value.ui64 = | |
878 | zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size); | |
879 | ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes(); | |
880 | ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes(); | |
881 | ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes(); | |
882 | ds->cache_total_evicts.value.ui64 = | |
883 | wmsum_value(&dbuf_sums.cache_total_evicts); | |
884 | for (int i = 0; i < DN_MAX_LEVELS; i++) { | |
885 | ds->cache_levels[i].value.ui64 = | |
886 | wmsum_value(&dbuf_sums.cache_levels[i]); | |
887 | ds->cache_levels_bytes[i].value.ui64 = | |
888 | wmsum_value(&dbuf_sums.cache_levels_bytes[i]); | |
889 | } | |
890 | ds->hash_hits.value.ui64 = | |
891 | wmsum_value(&dbuf_sums.hash_hits); | |
892 | ds->hash_misses.value.ui64 = | |
893 | wmsum_value(&dbuf_sums.hash_misses); | |
894 | ds->hash_collisions.value.ui64 = | |
895 | wmsum_value(&dbuf_sums.hash_collisions); | |
896 | ds->hash_chains.value.ui64 = | |
897 | wmsum_value(&dbuf_sums.hash_chains); | |
898 | ds->hash_insert_race.value.ui64 = | |
899 | wmsum_value(&dbuf_sums.hash_insert_race); | |
505df8d1 BB |
900 | ds->hash_table_count.value.ui64 = h->hash_table_mask + 1; |
901 | ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1; | |
c4c162c1 AM |
902 | ds->metadata_cache_count.value.ui64 = |
903 | wmsum_value(&dbuf_sums.metadata_cache_count); | |
904 | ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count( | |
905 | &dbuf_caches[DB_DBUF_METADATA_CACHE].size); | |
906 | ds->metadata_cache_overflow.value.ui64 = | |
907 | wmsum_value(&dbuf_sums.metadata_cache_overflow); | |
5e021f56 GDN |
908 | return (0); |
909 | } | |
d3c2ae1c | 910 | |
34dc7c2f BB |
911 | void |
912 | dbuf_init(void) | |
913 | { | |
505df8d1 | 914 | uint64_t hmsize, hsize = 1ULL << 16; |
34dc7c2f | 915 | dbuf_hash_table_t *h = &dbuf_hash_table; |
34dc7c2f BB |
916 | |
917 | /* | |
490c845e | 918 | * The hash table is big enough to fill one eighth of physical memory |
69de3421 TC |
919 | * with an average block size of zfs_arc_average_blocksize (default 8K). |
920 | * By default, the table will take up | |
921 | * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). | |
34dc7c2f | 922 | */ |
490c845e | 923 | while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8) |
34dc7c2f BB |
924 | hsize <<= 1; |
925 | ||
505df8d1 BB |
926 | h->hash_table = NULL; |
927 | while (h->hash_table == NULL) { | |
928 | h->hash_table_mask = hsize - 1; | |
929 | ||
930 | h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP); | |
931 | if (h->hash_table == NULL) | |
932 | hsize >>= 1; | |
933 | ||
934 | ASSERT3U(hsize, >=, 1ULL << 10); | |
935 | } | |
936 | ||
d1d7e268 | 937 | /* |
505df8d1 BB |
938 | * The hash table buckets are protected by an array of mutexes where |
939 | * each mutex is reponsible for protecting 128 buckets. A minimum | |
940 | * array size of 8192 is targeted to avoid contention. | |
d1d7e268 | 941 | */ |
505df8d1 BB |
942 | if (dbuf_mutex_cache_shift == 0) |
943 | hmsize = MAX(hsize >> 7, 1ULL << 13); | |
944 | else | |
945 | hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24); | |
946 | ||
947 | h->hash_mutexes = NULL; | |
948 | while (h->hash_mutexes == NULL) { | |
949 | h->hash_mutex_mask = hmsize - 1; | |
950 | ||
951 | h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t), | |
952 | KM_SLEEP); | |
953 | if (h->hash_mutexes == NULL) | |
954 | hmsize >>= 1; | |
34dc7c2f BB |
955 | } |
956 | ||
d3c2ae1c | 957 | dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", |
34dc7c2f BB |
958 | sizeof (dmu_buf_impl_t), |
959 | 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); | |
960 | ||
505df8d1 | 961 | for (int i = 0; i < hmsize; i++) |
223b04d2 | 962 | mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); |
e0b0ca98 BB |
963 | |
964 | dbuf_stats_init(h); | |
0c66c32d JG |
965 | |
966 | /* | |
967 | * All entries are queued via taskq_dispatch_ent(), so min/maxalloc | |
968 | * configuration is not required. | |
969 | */ | |
1229323d | 970 | dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0); |
d3c2ae1c | 971 | |
2e5dc449 | 972 | for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { |
ffdf019c AM |
973 | multilist_create(&dbuf_caches[dcs].cache, |
974 | sizeof (dmu_buf_impl_t), | |
2e5dc449 MA |
975 | offsetof(dmu_buf_impl_t, db_cache_link), |
976 | dbuf_cache_multilist_index_func); | |
424fd7c3 | 977 | zfs_refcount_create(&dbuf_caches[dcs].size); |
2e5dc449 | 978 | } |
d3c2ae1c | 979 | |
d3c2ae1c GW |
980 | dbuf_evict_thread_exit = B_FALSE; |
981 | mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); | |
982 | cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); | |
983 | dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, | |
984 | NULL, 0, &p0, TS_RUN, minclsyspri); | |
5e021f56 | 985 | |
c4c162c1 AM |
986 | wmsum_init(&dbuf_sums.cache_count, 0); |
987 | wmsum_init(&dbuf_sums.cache_total_evicts, 0); | |
505df8d1 | 988 | for (int i = 0; i < DN_MAX_LEVELS; i++) { |
c4c162c1 AM |
989 | wmsum_init(&dbuf_sums.cache_levels[i], 0); |
990 | wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0); | |
991 | } | |
992 | wmsum_init(&dbuf_sums.hash_hits, 0); | |
993 | wmsum_init(&dbuf_sums.hash_misses, 0); | |
994 | wmsum_init(&dbuf_sums.hash_collisions, 0); | |
995 | wmsum_init(&dbuf_sums.hash_chains, 0); | |
996 | wmsum_init(&dbuf_sums.hash_insert_race, 0); | |
997 | wmsum_init(&dbuf_sums.metadata_cache_count, 0); | |
998 | wmsum_init(&dbuf_sums.metadata_cache_overflow, 0); | |
999 | ||
5e021f56 GDN |
1000 | dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc", |
1001 | KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t), | |
1002 | KSTAT_FLAG_VIRTUAL); | |
1003 | if (dbuf_ksp != NULL) { | |
505df8d1 | 1004 | for (int i = 0; i < DN_MAX_LEVELS; i++) { |
5e021f56 GDN |
1005 | snprintf(dbuf_stats.cache_levels[i].name, |
1006 | KSTAT_STRLEN, "cache_level_%d", i); | |
1007 | dbuf_stats.cache_levels[i].data_type = | |
1008 | KSTAT_DATA_UINT64; | |
1009 | snprintf(dbuf_stats.cache_levels_bytes[i].name, | |
1010 | KSTAT_STRLEN, "cache_level_%d_bytes", i); | |
1011 | dbuf_stats.cache_levels_bytes[i].data_type = | |
1012 | KSTAT_DATA_UINT64; | |
1013 | } | |
741db5a3 AM |
1014 | dbuf_ksp->ks_data = &dbuf_stats; |
1015 | dbuf_ksp->ks_update = dbuf_kstat_update; | |
1016 | kstat_install(dbuf_ksp); | |
5e021f56 | 1017 | } |
34dc7c2f BB |
1018 | } |
1019 | ||
1020 | void | |
1021 | dbuf_fini(void) | |
1022 | { | |
1023 | dbuf_hash_table_t *h = &dbuf_hash_table; | |
34dc7c2f | 1024 | |
e0b0ca98 BB |
1025 | dbuf_stats_destroy(); |
1026 | ||
505df8d1 | 1027 | for (int i = 0; i < (h->hash_mutex_mask + 1); i++) |
223b04d2 | 1028 | mutex_destroy(&h->hash_mutexes[i]); |
505df8d1 | 1029 | |
00b46022 | 1030 | vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); |
505df8d1 BB |
1031 | vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) * |
1032 | sizeof (kmutex_t)); | |
1033 | ||
d3c2ae1c | 1034 | kmem_cache_destroy(dbuf_kmem_cache); |
0c66c32d | 1035 | taskq_destroy(dbu_evict_taskq); |
d3c2ae1c GW |
1036 | |
1037 | mutex_enter(&dbuf_evict_lock); | |
1038 | dbuf_evict_thread_exit = B_TRUE; | |
1039 | while (dbuf_evict_thread_exit) { | |
1040 | cv_signal(&dbuf_evict_cv); | |
1041 | cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); | |
1042 | } | |
1043 | mutex_exit(&dbuf_evict_lock); | |
d3c2ae1c GW |
1044 | |
1045 | mutex_destroy(&dbuf_evict_lock); | |
1046 | cv_destroy(&dbuf_evict_cv); | |
1047 | ||
2e5dc449 | 1048 | for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { |
424fd7c3 | 1049 | zfs_refcount_destroy(&dbuf_caches[dcs].size); |
ffdf019c | 1050 | multilist_destroy(&dbuf_caches[dcs].cache); |
2e5dc449 | 1051 | } |
5e021f56 GDN |
1052 | |
1053 | if (dbuf_ksp != NULL) { | |
1054 | kstat_delete(dbuf_ksp); | |
1055 | dbuf_ksp = NULL; | |
1056 | } | |
c4c162c1 AM |
1057 | |
1058 | wmsum_fini(&dbuf_sums.cache_count); | |
1059 | wmsum_fini(&dbuf_sums.cache_total_evicts); | |
505df8d1 | 1060 | for (int i = 0; i < DN_MAX_LEVELS; i++) { |
c4c162c1 AM |
1061 | wmsum_fini(&dbuf_sums.cache_levels[i]); |
1062 | wmsum_fini(&dbuf_sums.cache_levels_bytes[i]); | |
1063 | } | |
1064 | wmsum_fini(&dbuf_sums.hash_hits); | |
1065 | wmsum_fini(&dbuf_sums.hash_misses); | |
1066 | wmsum_fini(&dbuf_sums.hash_collisions); | |
1067 | wmsum_fini(&dbuf_sums.hash_chains); | |
1068 | wmsum_fini(&dbuf_sums.hash_insert_race); | |
1069 | wmsum_fini(&dbuf_sums.metadata_cache_count); | |
1070 | wmsum_fini(&dbuf_sums.metadata_cache_overflow); | |
34dc7c2f BB |
1071 | } |
1072 | ||
1073 | /* | |
1074 | * Other stuff. | |
1075 | */ | |
1076 | ||
1077 | #ifdef ZFS_DEBUG | |
1078 | static void | |
1079 | dbuf_verify(dmu_buf_impl_t *db) | |
1080 | { | |
572e2857 | 1081 | dnode_t *dn; |
428870ff | 1082 | dbuf_dirty_record_t *dr; |
cccbed9f | 1083 | uint32_t txg_prev; |
34dc7c2f BB |
1084 | |
1085 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1086 | ||
1087 | if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) | |
1088 | return; | |
1089 | ||
1090 | ASSERT(db->db_objset != NULL); | |
572e2857 BB |
1091 | DB_DNODE_ENTER(db); |
1092 | dn = DB_DNODE(db); | |
34dc7c2f BB |
1093 | if (dn == NULL) { |
1094 | ASSERT(db->db_parent == NULL); | |
1095 | ASSERT(db->db_blkptr == NULL); | |
1096 | } else { | |
1097 | ASSERT3U(db->db.db_object, ==, dn->dn_object); | |
1098 | ASSERT3P(db->db_objset, ==, dn->dn_objset); | |
1099 | ASSERT3U(db->db_level, <, dn->dn_nlevels); | |
572e2857 BB |
1100 | ASSERT(db->db_blkid == DMU_BONUS_BLKID || |
1101 | db->db_blkid == DMU_SPILL_BLKID || | |
8951cb8d | 1102 | !avl_is_empty(&dn->dn_dbufs)); |
34dc7c2f | 1103 | } |
428870ff BB |
1104 | if (db->db_blkid == DMU_BONUS_BLKID) { |
1105 | ASSERT(dn != NULL); | |
1106 | ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); | |
1107 | ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); | |
1108 | } else if (db->db_blkid == DMU_SPILL_BLKID) { | |
34dc7c2f | 1109 | ASSERT(dn != NULL); |
c99c9001 | 1110 | ASSERT0(db->db.db_offset); |
34dc7c2f BB |
1111 | } else { |
1112 | ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); | |
1113 | } | |
1114 | ||
cccbed9f | 1115 | if ((dr = list_head(&db->db_dirty_records)) != NULL) { |
428870ff | 1116 | ASSERT(dr->dr_dbuf == db); |
cccbed9f MM |
1117 | txg_prev = dr->dr_txg; |
1118 | for (dr = list_next(&db->db_dirty_records, dr); dr != NULL; | |
1119 | dr = list_next(&db->db_dirty_records, dr)) { | |
1120 | ASSERT(dr->dr_dbuf == db); | |
1121 | ASSERT(txg_prev > dr->dr_txg); | |
1122 | txg_prev = dr->dr_txg; | |
1123 | } | |
1124 | } | |
428870ff | 1125 | |
b128c09f BB |
1126 | /* |
1127 | * We can't assert that db_size matches dn_datablksz because it | |
1128 | * can be momentarily different when another thread is doing | |
1129 | * dnode_set_blksz(). | |
1130 | */ | |
1131 | if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { | |
428870ff | 1132 | dr = db->db_data_pending; |
b128c09f BB |
1133 | /* |
1134 | * It should only be modified in syncing context, so | |
1135 | * make sure we only have one copy of the data. | |
1136 | */ | |
1137 | ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); | |
34dc7c2f BB |
1138 | } |
1139 | ||
1140 | /* verify db->db_blkptr */ | |
1141 | if (db->db_blkptr) { | |
1142 | if (db->db_parent == dn->dn_dbuf) { | |
1143 | /* db is pointed to by the dnode */ | |
1144 | /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ | |
9babb374 | 1145 | if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) |
34dc7c2f BB |
1146 | ASSERT(db->db_parent == NULL); |
1147 | else | |
1148 | ASSERT(db->db_parent != NULL); | |
428870ff BB |
1149 | if (db->db_blkid != DMU_SPILL_BLKID) |
1150 | ASSERT3P(db->db_blkptr, ==, | |
1151 | &dn->dn_phys->dn_blkptr[db->db_blkid]); | |
34dc7c2f BB |
1152 | } else { |
1153 | /* db is pointed to by an indirect block */ | |
2a8ba608 MM |
1154 | int epb __maybe_unused = db->db_parent->db.db_size >> |
1155 | SPA_BLKPTRSHIFT; | |
34dc7c2f BB |
1156 | ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); |
1157 | ASSERT3U(db->db_parent->db.db_object, ==, | |
1158 | db->db.db_object); | |
1159 | /* | |
1160 | * dnode_grow_indblksz() can make this fail if we don't | |
f664f1ee | 1161 | * have the parent's rwlock. XXX indblksz no longer |
34dc7c2f BB |
1162 | * grows. safe to do this now? |
1163 | */ | |
f664f1ee | 1164 | if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) { |
34dc7c2f BB |
1165 | ASSERT3P(db->db_blkptr, ==, |
1166 | ((blkptr_t *)db->db_parent->db.db_data + | |
1167 | db->db_blkid % epb)); | |
1168 | } | |
1169 | } | |
1170 | } | |
1171 | if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && | |
428870ff BB |
1172 | (db->db_buf == NULL || db->db_buf->b_data) && |
1173 | db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && | |
3cb293a6 | 1174 | db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) { |
34dc7c2f BB |
1175 | /* |
1176 | * If the blkptr isn't set but they have nonzero data, | |
1177 | * it had better be dirty, otherwise we'll lose that | |
1178 | * data when we evict this buffer. | |
bc77ba73 PD |
1179 | * |
1180 | * There is an exception to this rule for indirect blocks; in | |
1181 | * this case, if the indirect block is a hole, we fill in a few | |
1182 | * fields on each of the child blocks (importantly, birth time) | |
1183 | * to prevent hole birth times from being lost when you | |
1184 | * partially fill in a hole. | |
34dc7c2f BB |
1185 | */ |
1186 | if (db->db_dirtycnt == 0) { | |
bc77ba73 PD |
1187 | if (db->db_level == 0) { |
1188 | uint64_t *buf = db->db.db_data; | |
1189 | int i; | |
34dc7c2f | 1190 | |
bc77ba73 PD |
1191 | for (i = 0; i < db->db.db_size >> 3; i++) { |
1192 | ASSERT(buf[i] == 0); | |
1193 | } | |
1194 | } else { | |
bc77ba73 PD |
1195 | blkptr_t *bps = db->db.db_data; |
1196 | ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, | |
1197 | db->db.db_size); | |
1198 | /* | |
1199 | * We want to verify that all the blkptrs in the | |
1200 | * indirect block are holes, but we may have | |
1201 | * automatically set up a few fields for them. | |
1202 | * We iterate through each blkptr and verify | |
1203 | * they only have those fields set. | |
1204 | */ | |
1c27024e | 1205 | for (int i = 0; |
bc77ba73 PD |
1206 | i < db->db.db_size / sizeof (blkptr_t); |
1207 | i++) { | |
1208 | blkptr_t *bp = &bps[i]; | |
1209 | ASSERT(ZIO_CHECKSUM_IS_ZERO( | |
1210 | &bp->blk_cksum)); | |
1211 | ASSERT( | |
1212 | DVA_IS_EMPTY(&bp->blk_dva[0]) && | |
1213 | DVA_IS_EMPTY(&bp->blk_dva[1]) && | |
1214 | DVA_IS_EMPTY(&bp->blk_dva[2])); | |
1215 | ASSERT0(bp->blk_fill); | |
1216 | ASSERT0(bp->blk_pad[0]); | |
1217 | ASSERT0(bp->blk_pad[1]); | |
1218 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
1219 | ASSERT(BP_IS_HOLE(bp)); | |
1220 | ASSERT0(bp->blk_phys_birth); | |
1221 | } | |
34dc7c2f BB |
1222 | } |
1223 | } | |
1224 | } | |
572e2857 | 1225 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
1226 | } |
1227 | #endif | |
1228 | ||
0c66c32d JG |
1229 | static void |
1230 | dbuf_clear_data(dmu_buf_impl_t *db) | |
1231 | { | |
1232 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1233 | dbuf_evict_user(db); | |
d3c2ae1c | 1234 | ASSERT3P(db->db_buf, ==, NULL); |
0c66c32d | 1235 | db->db.db_data = NULL; |
8b3547a4 | 1236 | if (db->db_state != DB_NOFILL) { |
0c66c32d | 1237 | db->db_state = DB_UNCACHED; |
8b3547a4 MM |
1238 | DTRACE_SET_STATE(db, "clear data"); |
1239 | } | |
0c66c32d JG |
1240 | } |
1241 | ||
34dc7c2f BB |
1242 | static void |
1243 | dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) | |
1244 | { | |
1245 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
0c66c32d JG |
1246 | ASSERT(buf != NULL); |
1247 | ||
34dc7c2f | 1248 | db->db_buf = buf; |
0c66c32d JG |
1249 | ASSERT(buf->b_data != NULL); |
1250 | db->db.db_data = buf->b_data; | |
34dc7c2f BB |
1251 | } |
1252 | ||
8b3547a4 MM |
1253 | static arc_buf_t * |
1254 | dbuf_alloc_arcbuf(dmu_buf_impl_t *db) | |
1255 | { | |
1256 | spa_t *spa = db->db_objset->os_spa; | |
1257 | ||
1258 | return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size)); | |
1259 | } | |
1260 | ||
428870ff BB |
1261 | /* |
1262 | * Loan out an arc_buf for read. Return the loaned arc_buf. | |
1263 | */ | |
1264 | arc_buf_t * | |
1265 | dbuf_loan_arcbuf(dmu_buf_impl_t *db) | |
1266 | { | |
1267 | arc_buf_t *abuf; | |
1268 | ||
d3c2ae1c | 1269 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
428870ff | 1270 | mutex_enter(&db->db_mtx); |
424fd7c3 | 1271 | if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) { |
428870ff | 1272 | int blksz = db->db.db_size; |
b0bc7a84 | 1273 | spa_t *spa = db->db_objset->os_spa; |
572e2857 | 1274 | |
428870ff | 1275 | mutex_exit(&db->db_mtx); |
2aa34383 | 1276 | abuf = arc_loan_buf(spa, B_FALSE, blksz); |
861166b0 | 1277 | memcpy(abuf->b_data, db->db.db_data, blksz); |
428870ff BB |
1278 | } else { |
1279 | abuf = db->db_buf; | |
1280 | arc_loan_inuse_buf(abuf, db); | |
d3c2ae1c | 1281 | db->db_buf = NULL; |
0c66c32d | 1282 | dbuf_clear_data(db); |
428870ff BB |
1283 | mutex_exit(&db->db_mtx); |
1284 | } | |
1285 | return (abuf); | |
1286 | } | |
1287 | ||
fcff0f35 PD |
1288 | /* |
1289 | * Calculate which level n block references the data at the level 0 offset | |
1290 | * provided. | |
1291 | */ | |
34dc7c2f | 1292 | uint64_t |
031d7c2f | 1293 | dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset) |
34dc7c2f | 1294 | { |
fcff0f35 PD |
1295 | if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { |
1296 | /* | |
1297 | * The level n blkid is equal to the level 0 blkid divided by | |
1298 | * the number of level 0s in a level n block. | |
1299 | * | |
1300 | * The level 0 blkid is offset >> datablkshift = | |
1301 | * offset / 2^datablkshift. | |
1302 | * | |
1303 | * The number of level 0s in a level n is the number of block | |
1304 | * pointers in an indirect block, raised to the power of level. | |
1305 | * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = | |
1306 | * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). | |
1307 | * | |
1308 | * Thus, the level n blkid is: offset / | |
fe8a7982 | 1309 | * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT)))) |
fcff0f35 PD |
1310 | * = offset / 2^(datablkshift + level * |
1311 | * (indblkshift - SPA_BLKPTRSHIFT)) | |
1312 | * = offset >> (datablkshift + level * | |
1313 | * (indblkshift - SPA_BLKPTRSHIFT)) | |
1314 | */ | |
031d7c2f GN |
1315 | |
1316 | const unsigned exp = dn->dn_datablkshift + | |
1317 | level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT); | |
1318 | ||
1319 | if (exp >= 8 * sizeof (offset)) { | |
1320 | /* This only happens on the highest indirection level */ | |
1321 | ASSERT3U(level, ==, dn->dn_nlevels - 1); | |
1322 | return (0); | |
1323 | } | |
1324 | ||
1325 | ASSERT3U(exp, <, 8 * sizeof (offset)); | |
1326 | ||
1327 | return (offset >> exp); | |
34dc7c2f BB |
1328 | } else { |
1329 | ASSERT3U(offset, <, dn->dn_datablksz); | |
1330 | return (0); | |
1331 | } | |
1332 | } | |
1333 | ||
f664f1ee PD |
1334 | /* |
1335 | * This function is used to lock the parent of the provided dbuf. This should be | |
1336 | * used when modifying or reading db_blkptr. | |
1337 | */ | |
1338 | db_lock_type_t | |
a926aab9 | 1339 | dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag) |
f664f1ee PD |
1340 | { |
1341 | enum db_lock_type ret = DLT_NONE; | |
1342 | if (db->db_parent != NULL) { | |
1343 | rw_enter(&db->db_parent->db_rwlock, rw); | |
1344 | ret = DLT_PARENT; | |
1345 | } else if (dmu_objset_ds(db->db_objset) != NULL) { | |
1346 | rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw, | |
1347 | tag); | |
1348 | ret = DLT_OBJSET; | |
1349 | } | |
1350 | /* | |
1351 | * We only return a DLT_NONE lock when it's the top-most indirect block | |
1352 | * of the meta-dnode of the MOS. | |
1353 | */ | |
1354 | return (ret); | |
1355 | } | |
1356 | ||
1357 | /* | |
1358 | * We need to pass the lock type in because it's possible that the block will | |
1359 | * move from being the topmost indirect block in a dnode (and thus, have no | |
1360 | * parent) to not the top-most via an indirection increase. This would cause a | |
1361 | * panic if we didn't pass the lock type in. | |
1362 | */ | |
1363 | void | |
a926aab9 | 1364 | dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag) |
f664f1ee PD |
1365 | { |
1366 | if (type == DLT_PARENT) | |
1367 | rw_exit(&db->db_parent->db_rwlock); | |
1368 | else if (type == DLT_OBJSET) | |
1369 | rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag); | |
1370 | } | |
1371 | ||
34dc7c2f | 1372 | static void |
d4a72f23 TC |
1373 | dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, |
1374 | arc_buf_t *buf, void *vdb) | |
34dc7c2f | 1375 | { |
14e4e3cb | 1376 | (void) zb, (void) bp; |
34dc7c2f BB |
1377 | dmu_buf_impl_t *db = vdb; |
1378 | ||
1379 | mutex_enter(&db->db_mtx); | |
1380 | ASSERT3U(db->db_state, ==, DB_READ); | |
1381 | /* | |
1382 | * All reads are synchronous, so we must have a hold on the dbuf | |
1383 | */ | |
424fd7c3 | 1384 | ASSERT(zfs_refcount_count(&db->db_holds) > 0); |
34dc7c2f BB |
1385 | ASSERT(db->db_buf == NULL); |
1386 | ASSERT(db->db.db_data == NULL); | |
c3bd3fb4 TC |
1387 | if (buf == NULL) { |
1388 | /* i/o error */ | |
1389 | ASSERT(zio == NULL || zio->io_error != 0); | |
1390 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
1391 | ASSERT3P(db->db_buf, ==, NULL); | |
1392 | db->db_state = DB_UNCACHED; | |
8b3547a4 | 1393 | DTRACE_SET_STATE(db, "i/o error"); |
c3bd3fb4 TC |
1394 | } else if (db->db_level == 0 && db->db_freed_in_flight) { |
1395 | /* freed in flight */ | |
1396 | ASSERT(zio == NULL || zio->io_error == 0); | |
34dc7c2f | 1397 | arc_release(buf, db); |
861166b0 | 1398 | memset(buf->b_data, 0, db->db.db_size); |
34dc7c2f BB |
1399 | arc_buf_freeze(buf); |
1400 | db->db_freed_in_flight = FALSE; | |
1401 | dbuf_set_data(db, buf); | |
1402 | db->db_state = DB_CACHED; | |
8b3547a4 | 1403 | DTRACE_SET_STATE(db, "freed in flight"); |
c3bd3fb4 TC |
1404 | } else { |
1405 | /* success */ | |
1406 | ASSERT(zio == NULL || zio->io_error == 0); | |
34dc7c2f BB |
1407 | dbuf_set_data(db, buf); |
1408 | db->db_state = DB_CACHED; | |
8b3547a4 | 1409 | DTRACE_SET_STATE(db, "successful read"); |
34dc7c2f BB |
1410 | } |
1411 | cv_broadcast(&db->db_changed); | |
3d503a76 | 1412 | dbuf_rele_and_unlock(db, NULL, B_FALSE); |
34dc7c2f BB |
1413 | } |
1414 | ||
8b3547a4 MM |
1415 | /* |
1416 | * Shortcut for performing reads on bonus dbufs. Returns | |
1417 | * an error if we fail to verify the dnode associated with | |
1418 | * a decrypted block. Otherwise success. | |
1419 | */ | |
1420 | static int | |
1421 | dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags) | |
1422 | { | |
1423 | int bonuslen, max_bonuslen, err; | |
1424 | ||
1425 | err = dbuf_read_verify_dnode_crypt(db, flags); | |
1426 | if (err) | |
1427 | return (err); | |
1428 | ||
1429 | bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); | |
1430 | max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); | |
1431 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1432 | ASSERT(DB_DNODE_HELD(db)); | |
1433 | ASSERT3U(bonuslen, <=, db->db.db_size); | |
1434 | db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP); | |
1435 | arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); | |
1436 | if (bonuslen < max_bonuslen) | |
861166b0 | 1437 | memset(db->db.db_data, 0, max_bonuslen); |
8b3547a4 | 1438 | if (bonuslen) |
861166b0 | 1439 | memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen); |
8b3547a4 MM |
1440 | db->db_state = DB_CACHED; |
1441 | DTRACE_SET_STATE(db, "bonus buffer filled"); | |
1442 | return (0); | |
1443 | } | |
1444 | ||
1445 | static void | |
67a1b037 | 1446 | dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp) |
8b3547a4 MM |
1447 | { |
1448 | blkptr_t *bps = db->db.db_data; | |
1449 | uint32_t indbs = 1ULL << dn->dn_indblkshift; | |
1450 | int n_bps = indbs >> SPA_BLKPTRSHIFT; | |
1451 | ||
1452 | for (int i = 0; i < n_bps; i++) { | |
1453 | blkptr_t *bp = &bps[i]; | |
1454 | ||
67a1b037 PJD |
1455 | ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs); |
1456 | BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ? | |
1457 | dn->dn_datablksz : BP_GET_LSIZE(dbbp)); | |
1458 | BP_SET_TYPE(bp, BP_GET_TYPE(dbbp)); | |
1459 | BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1); | |
1460 | BP_SET_BIRTH(bp, dbbp->blk_birth, 0); | |
8b3547a4 MM |
1461 | } |
1462 | } | |
1463 | ||
1464 | /* | |
1465 | * Handle reads on dbufs that are holes, if necessary. This function | |
1466 | * requires that the dbuf's mutex is held. Returns success (0) if action | |
1467 | * was taken, ENOENT if no action was taken. | |
1468 | */ | |
1469 | static int | |
67a1b037 | 1470 | dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp) |
8b3547a4 MM |
1471 | { |
1472 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1473 | ||
67a1b037 | 1474 | int is_hole = bp == NULL || BP_IS_HOLE(bp); |
8b3547a4 MM |
1475 | /* |
1476 | * For level 0 blocks only, if the above check fails: | |
1477 | * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() | |
1478 | * processes the delete record and clears the bp while we are waiting | |
1479 | * for the dn_mtx (resulting in a "no" from block_freed). | |
1480 | */ | |
67a1b037 PJD |
1481 | if (!is_hole && db->db_level == 0) |
1482 | is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp); | |
8b3547a4 MM |
1483 | |
1484 | if (is_hole) { | |
1485 | dbuf_set_data(db, dbuf_alloc_arcbuf(db)); | |
861166b0 | 1486 | memset(db->db.db_data, 0, db->db.db_size); |
8b3547a4 | 1487 | |
67a1b037 PJD |
1488 | if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) && |
1489 | bp->blk_birth != 0) { | |
1490 | dbuf_handle_indirect_hole(db, dn, bp); | |
8b3547a4 MM |
1491 | } |
1492 | db->db_state = DB_CACHED; | |
1493 | DTRACE_SET_STATE(db, "hole read satisfied"); | |
1494 | return (0); | |
1495 | } | |
1496 | return (ENOENT); | |
1497 | } | |
69830602 TC |
1498 | |
1499 | /* | |
1500 | * This function ensures that, when doing a decrypting read of a block, | |
1501 | * we make sure we have decrypted the dnode associated with it. We must do | |
1502 | * this so that we ensure we are fully authenticating the checksum-of-MACs | |
1503 | * tree from the root of the objset down to this block. Indirect blocks are | |
1504 | * always verified against their secure checksum-of-MACs assuming that the | |
1505 | * dnode containing them is correct. Now that we are doing a decrypting read, | |
1506 | * we can be sure that the key is loaded and verify that assumption. This is | |
1507 | * especially important considering that we always read encrypted dnode | |
1508 | * blocks as raw data (without verifying their MACs) to start, and | |
1509 | * decrypt / authenticate them when we need to read an encrypted bonus buffer. | |
1510 | */ | |
1511 | static int | |
1512 | dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags) | |
1513 | { | |
1514 | int err = 0; | |
1515 | objset_t *os = db->db_objset; | |
1516 | arc_buf_t *dnode_abuf; | |
1517 | dnode_t *dn; | |
1518 | zbookmark_phys_t zb; | |
1519 | ||
1520 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1521 | ||
fd61b2ea AM |
1522 | if ((flags & DB_RF_NO_DECRYPT) != 0 || |
1523 | !os->os_encrypted || os->os_raw_receive) | |
69830602 TC |
1524 | return (0); |
1525 | ||
1526 | DB_DNODE_ENTER(db); | |
1527 | dn = DB_DNODE(db); | |
1528 | dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL; | |
1529 | ||
1530 | if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) { | |
1531 | DB_DNODE_EXIT(db); | |
1532 | return (0); | |
1533 | } | |
1534 | ||
1535 | SET_BOOKMARK(&zb, dmu_objset_id(os), | |
1536 | DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid); | |
1537 | err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE); | |
1538 | ||
1539 | /* | |
1540 | * An error code of EACCES tells us that the key is still not | |
1541 | * available. This is ok if we are only reading authenticated | |
1542 | * (and therefore non-encrypted) blocks. | |
1543 | */ | |
1544 | if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID && | |
1545 | !DMU_OT_IS_ENCRYPTED(dn->dn_type)) || | |
1546 | (db->db_blkid == DMU_BONUS_BLKID && | |
1547 | !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)))) | |
1548 | err = 0; | |
1549 | ||
69830602 TC |
1550 | DB_DNODE_EXIT(db); |
1551 | ||
1552 | return (err); | |
1553 | } | |
1554 | ||
f664f1ee PD |
1555 | /* |
1556 | * Drops db_mtx and the parent lock specified by dblt and tag before | |
1557 | * returning. | |
1558 | */ | |
5f6d0b6f | 1559 | static int |
f664f1ee | 1560 | dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags, |
a926aab9 | 1561 | db_lock_type_t dblt, const void *tag) |
34dc7c2f | 1562 | { |
572e2857 | 1563 | dnode_t *dn; |
5dbd68a3 | 1564 | zbookmark_phys_t zb; |
2a432414 | 1565 | uint32_t aflags = ARC_FLAG_NOWAIT; |
8b3547a4 | 1566 | int err, zio_flags; |
67a1b037 | 1567 | blkptr_t bp, *bpp; |
34dc7c2f | 1568 | |
572e2857 BB |
1569 | DB_DNODE_ENTER(db); |
1570 | dn = DB_DNODE(db); | |
424fd7c3 | 1571 | ASSERT(!zfs_refcount_is_zero(&db->db_holds)); |
34dc7c2f | 1572 | ASSERT(MUTEX_HELD(&db->db_mtx)); |
67a1b037 | 1573 | ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); |
34dc7c2f | 1574 | ASSERT(db->db_buf == NULL); |
f664f1ee PD |
1575 | ASSERT(db->db_parent == NULL || |
1576 | RW_LOCK_HELD(&db->db_parent->db_rwlock)); | |
34dc7c2f | 1577 | |
428870ff | 1578 | if (db->db_blkid == DMU_BONUS_BLKID) { |
8b3547a4 MM |
1579 | err = dbuf_read_bonus(db, dn, flags); |
1580 | goto early_unlock; | |
34dc7c2f BB |
1581 | } |
1582 | ||
67a1b037 PJD |
1583 | if (db->db_state == DB_UNCACHED) { |
1584 | if (db->db_blkptr == NULL) { | |
1585 | bpp = NULL; | |
1586 | } else { | |
1587 | bp = *db->db_blkptr; | |
1588 | bpp = &bp; | |
1589 | } | |
1590 | } else { | |
67a1b037 PJD |
1591 | dbuf_dirty_record_t *dr; |
1592 | ||
1593 | ASSERT3S(db->db_state, ==, DB_NOFILL); | |
1594 | ||
555ef90c PJD |
1595 | /* |
1596 | * Block cloning: If we have a pending block clone, | |
1597 | * we don't want to read the underlying block, but the content | |
1598 | * of the block being cloned, so we have the most recent data. | |
1599 | */ | |
67a1b037 | 1600 | dr = list_head(&db->db_dirty_records); |
555ef90c | 1601 | if (dr == NULL || !dr->dt.dl.dr_brtwrite) { |
67a1b037 PJD |
1602 | err = EIO; |
1603 | goto early_unlock; | |
67a1b037 | 1604 | } |
555ef90c PJD |
1605 | bp = dr->dt.dl.dr_overridden_by; |
1606 | bpp = &bp; | |
67a1b037 PJD |
1607 | } |
1608 | ||
1609 | err = dbuf_read_hole(db, dn, bpp); | |
8b3547a4 MM |
1610 | if (err == 0) |
1611 | goto early_unlock; | |
34dc7c2f | 1612 | |
67a1b037 PJD |
1613 | ASSERT(bpp != NULL); |
1614 | ||
30af21b0 PD |
1615 | /* |
1616 | * Any attempt to read a redacted block should result in an error. This | |
1617 | * will never happen under normal conditions, but can be useful for | |
1618 | * debugging purposes. | |
1619 | */ | |
67a1b037 | 1620 | if (BP_IS_REDACTED(bpp)) { |
30af21b0 PD |
1621 | ASSERT(dsl_dataset_feature_is_active( |
1622 | db->db_objset->os_dsl_dataset, | |
1623 | SPA_FEATURE_REDACTED_DATASETS)); | |
8b3547a4 MM |
1624 | err = SET_ERROR(EIO); |
1625 | goto early_unlock; | |
30af21b0 PD |
1626 | } |
1627 | ||
370bbf66 TC |
1628 | SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), |
1629 | db->db.db_object, db->db_level, db->db_blkid); | |
1630 | ||
b5256303 TC |
1631 | /* |
1632 | * All bps of an encrypted os should have the encryption bit set. | |
1633 | * If this is not true it indicates tampering and we report an error. | |
1634 | */ | |
67a1b037 | 1635 | if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) { |
d96e2957 | 1636 | spa_log_error(db->db_objset->os_spa, &zb, &bpp->blk_birth); |
8b3547a4 MM |
1637 | err = SET_ERROR(EIO); |
1638 | goto early_unlock; | |
b5256303 TC |
1639 | } |
1640 | ||
69830602 | 1641 | err = dbuf_read_verify_dnode_crypt(db, flags); |
8b3547a4 MM |
1642 | if (err != 0) |
1643 | goto early_unlock; | |
69830602 TC |
1644 | |
1645 | DB_DNODE_EXIT(db); | |
1646 | ||
1647 | db->db_state = DB_READ; | |
8b3547a4 | 1648 | DTRACE_SET_STATE(db, "read issued"); |
69830602 TC |
1649 | mutex_exit(&db->db_mtx); |
1650 | ||
ed2f7ba0 AM |
1651 | if (!DBUF_IS_CACHEABLE(db)) |
1652 | aflags |= ARC_FLAG_UNCACHED; | |
1653 | else if (dbuf_is_l2cacheable(db)) | |
69830602 TC |
1654 | aflags |= ARC_FLAG_L2CACHE; |
1655 | ||
34dc7c2f | 1656 | dbuf_add_ref(db, NULL); |
b128c09f | 1657 | |
b5256303 TC |
1658 | zio_flags = (flags & DB_RF_CANFAIL) ? |
1659 | ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED; | |
1660 | ||
1661 | if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr)) | |
1662 | zio_flags |= ZIO_FLAG_RAW; | |
f664f1ee | 1663 | /* |
67a1b037 PJD |
1664 | * The zio layer will copy the provided blkptr later, but we have our |
1665 | * own copy so that we can release the parent's rwlock. We have to | |
1666 | * do that so that if dbuf_read_done is called synchronously (on | |
f664f1ee PD |
1667 | * an l1 cache hit) we don't acquire the db_mtx while holding the |
1668 | * parent's rwlock, which would be a lock ordering violation. | |
1669 | */ | |
f664f1ee | 1670 | dmu_buf_unlock_parent(db, dblt, tag); |
67a1b037 | 1671 | (void) arc_read(zio, db->db_objset->os_spa, bpp, |
b5256303 | 1672 | dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags, |
34dc7c2f | 1673 | &aflags, &zb); |
da8d5748 | 1674 | return (err); |
8b3547a4 MM |
1675 | early_unlock: |
1676 | DB_DNODE_EXIT(db); | |
1677 | mutex_exit(&db->db_mtx); | |
1678 | dmu_buf_unlock_parent(db, dblt, tag); | |
1679 | return (err); | |
34dc7c2f BB |
1680 | } |
1681 | ||
2aa34383 DK |
1682 | /* |
1683 | * This is our just-in-time copy function. It makes a copy of buffers that | |
1684 | * have been modified in a previous transaction group before we access them in | |
1685 | * the current active group. | |
1686 | * | |
1687 | * This function is used in three places: when we are dirtying a buffer for the | |
1688 | * first time in a txg, when we are freeing a range in a dnode that includes | |
1689 | * this buffer, and when we are accessing a buffer which was received compressed | |
1690 | * and later referenced in a WRITE_BYREF record. | |
1691 | * | |
1692 | * Note that when we are called from dbuf_free_range() we do not put a hold on | |
1693 | * the buffer, we just traverse the active dbuf list for the dnode. | |
1694 | */ | |
1695 | static void | |
1696 | dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) | |
1697 | { | |
cccbed9f | 1698 | dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); |
2aa34383 DK |
1699 | |
1700 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
1701 | ASSERT(db->db.db_data != NULL); | |
1702 | ASSERT(db->db_level == 0); | |
1703 | ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); | |
1704 | ||
1705 | if (dr == NULL || | |
1706 | (dr->dt.dl.dr_data != | |
1707 | ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) | |
1708 | return; | |
1709 | ||
1710 | /* | |
1711 | * If the last dirty record for this dbuf has not yet synced | |
1712 | * and its referencing the dbuf data, either: | |
1713 | * reset the reference to point to a new copy, | |
1714 | * or (if there a no active holders) | |
1715 | * just null out the current db_data pointer. | |
1716 | */ | |
4807c0ba | 1717 | ASSERT3U(dr->dr_txg, >=, txg - 2); |
2aa34383 | 1718 | if (db->db_blkid == DMU_BONUS_BLKID) { |
2aa34383 DK |
1719 | dnode_t *dn = DB_DNODE(db); |
1720 | int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); | |
a3fd9d9e | 1721 | dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP); |
2aa34383 | 1722 | arc_space_consume(bonuslen, ARC_SPACE_BONUS); |
861166b0 | 1723 | memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen); |
424fd7c3 | 1724 | } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) { |
a81b8124 AR |
1725 | dnode_t *dn = DB_DNODE(db); |
1726 | int size = arc_buf_size(db->db_buf); | |
1727 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
1728 | spa_t *spa = db->db_objset->os_spa; | |
1729 | enum zio_compress compress_type = | |
1730 | arc_get_compression(db->db_buf); | |
1731 | uint8_t complevel = arc_get_complevel(db->db_buf); | |
1732 | ||
1733 | if (arc_is_encrypted(db->db_buf)) { | |
1734 | boolean_t byteorder; | |
1735 | uint8_t salt[ZIO_DATA_SALT_LEN]; | |
1736 | uint8_t iv[ZIO_DATA_IV_LEN]; | |
1737 | uint8_t mac[ZIO_DATA_MAC_LEN]; | |
1738 | ||
1739 | arc_get_raw_params(db->db_buf, &byteorder, salt, | |
1740 | iv, mac); | |
1741 | dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db, | |
1742 | dmu_objset_id(dn->dn_objset), byteorder, salt, iv, | |
1743 | mac, dn->dn_type, size, arc_buf_lsize(db->db_buf), | |
1744 | compress_type, complevel); | |
1745 | } else if (compress_type != ZIO_COMPRESS_OFF) { | |
1746 | ASSERT3U(type, ==, ARC_BUFC_DATA); | |
1747 | dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, | |
1748 | size, arc_buf_lsize(db->db_buf), compress_type, | |
1749 | complevel); | |
1750 | } else { | |
1751 | dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); | |
1752 | } | |
861166b0 | 1753 | memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size); |
2aa34383 DK |
1754 | } else { |
1755 | db->db_buf = NULL; | |
1756 | dbuf_clear_data(db); | |
1757 | } | |
1758 | } | |
1759 | ||
34dc7c2f BB |
1760 | int |
1761 | dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) | |
1762 | { | |
1763 | int err = 0; | |
b0bc7a84 | 1764 | boolean_t prefetch; |
572e2857 | 1765 | dnode_t *dn; |
34dc7c2f BB |
1766 | |
1767 | /* | |
1768 | * We don't have to hold the mutex to check db_state because it | |
1769 | * can't be freed while we have a hold on the buffer. | |
1770 | */ | |
424fd7c3 | 1771 | ASSERT(!zfs_refcount_is_zero(&db->db_holds)); |
34dc7c2f | 1772 | |
572e2857 BB |
1773 | DB_DNODE_ENTER(db); |
1774 | dn = DB_DNODE(db); | |
34dc7c2f | 1775 | |
428870ff | 1776 | prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && |
ed2f7ba0 | 1777 | (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL; |
34dc7c2f BB |
1778 | |
1779 | mutex_enter(&db->db_mtx); | |
ed2f7ba0 AM |
1780 | if (flags & DB_RF_PARTIAL_FIRST) |
1781 | db->db_partial_read = B_TRUE; | |
1782 | else if (!(flags & DB_RF_PARTIAL_MORE)) | |
1783 | db->db_partial_read = B_FALSE; | |
34dc7c2f | 1784 | if (db->db_state == DB_CACHED) { |
2aa34383 | 1785 | /* |
69830602 TC |
1786 | * Ensure that this block's dnode has been decrypted if |
1787 | * the caller has requested decrypted data. | |
2aa34383 | 1788 | */ |
69830602 TC |
1789 | err = dbuf_read_verify_dnode_crypt(db, flags); |
1790 | ||
1791 | /* | |
1792 | * If the arc buf is compressed or encrypted and the caller | |
1793 | * requested uncompressed data, we need to untransform it | |
1794 | * before returning. We also call arc_untransform() on any | |
1795 | * unauthenticated blocks, which will verify their MAC if | |
1796 | * the key is now available. | |
1797 | */ | |
1798 | if (err == 0 && db->db_buf != NULL && | |
1799 | (flags & DB_RF_NO_DECRYPT) == 0 && | |
b5256303 | 1800 | (arc_is_encrypted(db->db_buf) || |
69830602 | 1801 | arc_is_unauthenticated(db->db_buf) || |
b5256303 | 1802 | arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) { |
fd61b2ea | 1803 | spa_t *spa = dn->dn_objset->os_spa; |
a2c2ed1b TC |
1804 | zbookmark_phys_t zb; |
1805 | ||
1806 | SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), | |
1807 | db->db.db_object, db->db_level, db->db_blkid); | |
b5256303 | 1808 | dbuf_fix_old_data(db, spa_syncing_txg(spa)); |
a2c2ed1b | 1809 | err = arc_untransform(db->db_buf, spa, &zb, B_FALSE); |
2aa34383 DK |
1810 | dbuf_set_data(db, db->db_buf); |
1811 | } | |
34dc7c2f | 1812 | mutex_exit(&db->db_mtx); |
f664f1ee PD |
1813 | if (err == 0 && prefetch) { |
1814 | dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, | |
891568c9 | 1815 | B_FALSE, flags & DB_RF_HAVESTRUCT); |
f664f1ee | 1816 | } |
572e2857 | 1817 | DB_DNODE_EXIT(db); |
5e021f56 | 1818 | DBUF_STAT_BUMP(hash_hits); |
67a1b037 | 1819 | } else if (db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL) { |
a0043383 | 1820 | boolean_t need_wait = B_FALSE; |
572e2857 | 1821 | |
f664f1ee PD |
1822 | db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); |
1823 | ||
67a1b037 PJD |
1824 | if (zio == NULL && (db->db_state == DB_NOFILL || |
1825 | (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)))) { | |
fd61b2ea | 1826 | spa_t *spa = dn->dn_objset->os_spa; |
572e2857 | 1827 | zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); |
a0043383 MA |
1828 | need_wait = B_TRUE; |
1829 | } | |
f664f1ee PD |
1830 | err = dbuf_read_impl(db, zio, flags, dblt, FTAG); |
1831 | /* | |
1832 | * dbuf_read_impl has dropped db_mtx and our parent's rwlock | |
1833 | * for us | |
1834 | */ | |
1835 | if (!err && prefetch) { | |
1836 | dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, | |
891568c9 | 1837 | db->db_state != DB_CACHED, |
f664f1ee PD |
1838 | flags & DB_RF_HAVESTRUCT); |
1839 | } | |
34dc7c2f | 1840 | |
572e2857 | 1841 | DB_DNODE_EXIT(db); |
5e021f56 | 1842 | DBUF_STAT_BUMP(hash_misses); |
34dc7c2f | 1843 | |
5e7f3ace TC |
1844 | /* |
1845 | * If we created a zio_root we must execute it to avoid | |
1846 | * leaking it, even if it isn't attached to any work due | |
1847 | * to an error in dbuf_read_impl(). | |
1848 | */ | |
1849 | if (need_wait) { | |
1850 | if (err == 0) | |
1851 | err = zio_wait(zio); | |
1852 | else | |
1853 | VERIFY0(zio_wait(zio)); | |
1854 | } | |
34dc7c2f | 1855 | } else { |
e49f1e20 WA |
1856 | /* |
1857 | * Another reader came in while the dbuf was in flight | |
1858 | * between UNCACHED and CACHED. Either a writer will finish | |
1859 | * writing the buffer (sending the dbuf to CACHED) or the | |
1860 | * first reader's request will reach the read_done callback | |
1861 | * and send the dbuf to CACHED. Otherwise, a failure | |
1862 | * occurred and the dbuf went to UNCACHED. | |
1863 | */ | |
34dc7c2f | 1864 | mutex_exit(&db->db_mtx); |
f664f1ee PD |
1865 | if (prefetch) { |
1866 | dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, | |
891568c9 | 1867 | B_TRUE, flags & DB_RF_HAVESTRUCT); |
f664f1ee | 1868 | } |
572e2857 | 1869 | DB_DNODE_EXIT(db); |
5e021f56 | 1870 | DBUF_STAT_BUMP(hash_misses); |
34dc7c2f | 1871 | |
e49f1e20 | 1872 | /* Skip the wait per the caller's request. */ |
34dc7c2f | 1873 | if ((flags & DB_RF_NEVERWAIT) == 0) { |
28caa74b | 1874 | mutex_enter(&db->db_mtx); |
34dc7c2f BB |
1875 | while (db->db_state == DB_READ || |
1876 | db->db_state == DB_FILL) { | |
1877 | ASSERT(db->db_state == DB_READ || | |
1878 | (flags & DB_RF_HAVESTRUCT) == 0); | |
64dbba36 AL |
1879 | DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, |
1880 | db, zio_t *, zio); | |
34dc7c2f BB |
1881 | cv_wait(&db->db_changed, &db->db_mtx); |
1882 | } | |
1883 | if (db->db_state == DB_UNCACHED) | |
2e528b49 | 1884 | err = SET_ERROR(EIO); |
28caa74b | 1885 | mutex_exit(&db->db_mtx); |
34dc7c2f | 1886 | } |
34dc7c2f BB |
1887 | } |
1888 | ||
34dc7c2f BB |
1889 | return (err); |
1890 | } | |
1891 | ||
1892 | static void | |
1893 | dbuf_noread(dmu_buf_impl_t *db) | |
1894 | { | |
424fd7c3 | 1895 | ASSERT(!zfs_refcount_is_zero(&db->db_holds)); |
428870ff | 1896 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f BB |
1897 | mutex_enter(&db->db_mtx); |
1898 | while (db->db_state == DB_READ || db->db_state == DB_FILL) | |
1899 | cv_wait(&db->db_changed, &db->db_mtx); | |
1900 | if (db->db_state == DB_UNCACHED) { | |
34dc7c2f BB |
1901 | ASSERT(db->db_buf == NULL); |
1902 | ASSERT(db->db.db_data == NULL); | |
8b3547a4 | 1903 | dbuf_set_data(db, dbuf_alloc_arcbuf(db)); |
34dc7c2f | 1904 | db->db_state = DB_FILL; |
8b3547a4 | 1905 | DTRACE_SET_STATE(db, "assigning filled buffer"); |
b128c09f | 1906 | } else if (db->db_state == DB_NOFILL) { |
0c66c32d | 1907 | dbuf_clear_data(db); |
34dc7c2f BB |
1908 | } else { |
1909 | ASSERT3U(db->db_state, ==, DB_CACHED); | |
1910 | } | |
1911 | mutex_exit(&db->db_mtx); | |
1912 | } | |
1913 | ||
34dc7c2f BB |
1914 | void |
1915 | dbuf_unoverride(dbuf_dirty_record_t *dr) | |
1916 | { | |
1917 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
428870ff | 1918 | blkptr_t *bp = &dr->dt.dl.dr_overridden_by; |
34dc7c2f BB |
1919 | uint64_t txg = dr->dr_txg; |
1920 | ||
1921 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
00710365 AS |
1922 | /* |
1923 | * This assert is valid because dmu_sync() expects to be called by | |
1924 | * a zilog's get_data while holding a range lock. This call only | |
1925 | * comes from dbuf_dirty() callers who must also hold a range lock. | |
1926 | */ | |
34dc7c2f BB |
1927 | ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); |
1928 | ASSERT(db->db_level == 0); | |
1929 | ||
428870ff | 1930 | if (db->db_blkid == DMU_BONUS_BLKID || |
34dc7c2f BB |
1931 | dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) |
1932 | return; | |
1933 | ||
428870ff BB |
1934 | ASSERT(db->db_data_pending != dr); |
1935 | ||
34dc7c2f | 1936 | /* free this block */ |
b0bc7a84 MG |
1937 | if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) |
1938 | zio_free(db->db_objset->os_spa, txg, bp); | |
428870ff | 1939 | |
86e115e2 AM |
1940 | if (dr->dt.dl.dr_brtwrite) { |
1941 | ASSERT0P(dr->dt.dl.dr_data); | |
1942 | dr->dt.dl.dr_data = db->db_buf; | |
1943 | } | |
34dc7c2f | 1944 | dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; |
03c6040b | 1945 | dr->dt.dl.dr_nopwrite = B_FALSE; |
555ef90c | 1946 | dr->dt.dl.dr_brtwrite = B_FALSE; |
0c03d21a | 1947 | dr->dt.dl.dr_has_raw_params = B_FALSE; |
03c6040b | 1948 | |
34dc7c2f BB |
1949 | /* |
1950 | * Release the already-written buffer, so we leave it in | |
1951 | * a consistent dirty state. Note that all callers are | |
1952 | * modifying the buffer, so they will immediately do | |
1953 | * another (redundant) arc_release(). Therefore, leave | |
1954 | * the buf thawed to save the effort of freezing & | |
1955 | * immediately re-thawing it. | |
1956 | */ | |
86e115e2 | 1957 | if (dr->dt.dl.dr_data) |
67a1b037 | 1958 | arc_release(dr->dt.dl.dr_data, db); |
34dc7c2f BB |
1959 | } |
1960 | ||
b128c09f BB |
1961 | /* |
1962 | * Evict (if its unreferenced) or clear (if its referenced) any level-0 | |
1963 | * data blocks in the free range, so that any future readers will find | |
b0bc7a84 | 1964 | * empty blocks. |
b128c09f | 1965 | */ |
34dc7c2f | 1966 | void |
8951cb8d AR |
1967 | dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, |
1968 | dmu_tx_t *tx) | |
34dc7c2f | 1969 | { |
0c66c32d JG |
1970 | dmu_buf_impl_t *db_search; |
1971 | dmu_buf_impl_t *db, *db_next; | |
34dc7c2f | 1972 | uint64_t txg = tx->tx_txg; |
8951cb8d | 1973 | avl_index_t where; |
7b49bbc8 | 1974 | dbuf_dirty_record_t *dr; |
8951cb8d | 1975 | |
9c9531cb GM |
1976 | if (end_blkid > dn->dn_maxblkid && |
1977 | !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) | |
8951cb8d | 1978 | end_blkid = dn->dn_maxblkid; |
8e739b2c RE |
1979 | dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid, |
1980 | (u_longlong_t)end_blkid); | |
34dc7c2f | 1981 | |
0c66c32d | 1982 | db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP); |
8951cb8d AR |
1983 | db_search->db_level = 0; |
1984 | db_search->db_blkid = start_blkid; | |
9925c28c | 1985 | db_search->db_state = DB_SEARCH; |
ea97f8ce | 1986 | |
b663a23d | 1987 | mutex_enter(&dn->dn_dbufs_mtx); |
8951cb8d AR |
1988 | db = avl_find(&dn->dn_dbufs, db_search, &where); |
1989 | ASSERT3P(db, ==, NULL); | |
9c9531cb | 1990 | |
8951cb8d AR |
1991 | db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); |
1992 | ||
1993 | for (; db != NULL; db = db_next) { | |
1994 | db_next = AVL_NEXT(&dn->dn_dbufs, db); | |
428870ff | 1995 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
b128c09f | 1996 | |
8951cb8d AR |
1997 | if (db->db_level != 0 || db->db_blkid > end_blkid) { |
1998 | break; | |
1999 | } | |
2000 | ASSERT3U(db->db_blkid, >=, start_blkid); | |
34dc7c2f BB |
2001 | |
2002 | /* found a level 0 buffer in the range */ | |
13fe0198 MA |
2003 | mutex_enter(&db->db_mtx); |
2004 | if (dbuf_undirty(db, tx)) { | |
2005 | /* mutex has been dropped and dbuf destroyed */ | |
34dc7c2f | 2006 | continue; |
13fe0198 | 2007 | } |
34dc7c2f | 2008 | |
34dc7c2f | 2009 | if (db->db_state == DB_UNCACHED || |
b128c09f | 2010 | db->db_state == DB_NOFILL || |
34dc7c2f BB |
2011 | db->db_state == DB_EVICTING) { |
2012 | ASSERT(db->db.db_data == NULL); | |
2013 | mutex_exit(&db->db_mtx); | |
2014 | continue; | |
2015 | } | |
2016 | if (db->db_state == DB_READ || db->db_state == DB_FILL) { | |
2017 | /* will be handled in dbuf_read_done or dbuf_rele */ | |
2018 | db->db_freed_in_flight = TRUE; | |
2019 | mutex_exit(&db->db_mtx); | |
2020 | continue; | |
2021 | } | |
424fd7c3 | 2022 | if (zfs_refcount_count(&db->db_holds) == 0) { |
34dc7c2f | 2023 | ASSERT(db->db_buf); |
d3c2ae1c | 2024 | dbuf_destroy(db); |
34dc7c2f BB |
2025 | continue; |
2026 | } | |
2027 | /* The dbuf is referenced */ | |
2028 | ||
7b49bbc8 MM |
2029 | dr = list_head(&db->db_dirty_records); |
2030 | if (dr != NULL) { | |
34dc7c2f BB |
2031 | if (dr->dr_txg == txg) { |
2032 | /* | |
2033 | * This buffer is "in-use", re-adjust the file | |
2034 | * size to reflect that this buffer may | |
2035 | * contain new data when we sync. | |
2036 | */ | |
428870ff BB |
2037 | if (db->db_blkid != DMU_SPILL_BLKID && |
2038 | db->db_blkid > dn->dn_maxblkid) | |
34dc7c2f BB |
2039 | dn->dn_maxblkid = db->db_blkid; |
2040 | dbuf_unoverride(dr); | |
2041 | } else { | |
2042 | /* | |
2043 | * This dbuf is not dirty in the open context. | |
2044 | * Either uncache it (if its not referenced in | |
2045 | * the open context) or reset its contents to | |
2046 | * empty. | |
2047 | */ | |
2048 | dbuf_fix_old_data(db, txg); | |
2049 | } | |
2050 | } | |
2051 | /* clear the contents if its cached */ | |
2052 | if (db->db_state == DB_CACHED) { | |
2053 | ASSERT(db->db.db_data != NULL); | |
2054 | arc_release(db->db_buf, db); | |
f664f1ee | 2055 | rw_enter(&db->db_rwlock, RW_WRITER); |
861166b0 | 2056 | memset(db->db.db_data, 0, db->db.db_size); |
f664f1ee | 2057 | rw_exit(&db->db_rwlock); |
34dc7c2f BB |
2058 | arc_buf_freeze(db->db_buf); |
2059 | } | |
2060 | ||
2061 | mutex_exit(&db->db_mtx); | |
2062 | } | |
8951cb8d | 2063 | |
34dc7c2f | 2064 | mutex_exit(&dn->dn_dbufs_mtx); |
547df816 | 2065 | kmem_free(db_search, sizeof (dmu_buf_impl_t)); |
34dc7c2f BB |
2066 | } |
2067 | ||
34dc7c2f BB |
2068 | void |
2069 | dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) | |
2070 | { | |
28caa74b | 2071 | arc_buf_t *buf, *old_buf; |
cccbed9f | 2072 | dbuf_dirty_record_t *dr; |
34dc7c2f BB |
2073 | int osize = db->db.db_size; |
2074 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
572e2857 | 2075 | dnode_t *dn; |
34dc7c2f | 2076 | |
428870ff | 2077 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f | 2078 | |
572e2857 BB |
2079 | DB_DNODE_ENTER(db); |
2080 | dn = DB_DNODE(db); | |
2081 | ||
34dc7c2f BB |
2082 | /* |
2083 | * XXX we should be doing a dbuf_read, checking the return | |
2084 | * value and returning that up to our callers | |
2085 | */ | |
b0bc7a84 | 2086 | dmu_buf_will_dirty(&db->db, tx); |
34dc7c2f BB |
2087 | |
2088 | /* create the data buffer for the new block */ | |
2aa34383 | 2089 | buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); |
34dc7c2f BB |
2090 | |
2091 | /* copy old block data to the new block */ | |
28caa74b | 2092 | old_buf = db->db_buf; |
861166b0 | 2093 | memcpy(buf->b_data, old_buf->b_data, MIN(osize, size)); |
34dc7c2f BB |
2094 | /* zero the remainder */ |
2095 | if (size > osize) | |
861166b0 | 2096 | memset((uint8_t *)buf->b_data + osize, 0, size - osize); |
34dc7c2f BB |
2097 | |
2098 | mutex_enter(&db->db_mtx); | |
2099 | dbuf_set_data(db, buf); | |
28caa74b | 2100 | arc_buf_destroy(old_buf, db); |
34dc7c2f BB |
2101 | db->db.db_size = size; |
2102 | ||
cccbed9f | 2103 | dr = list_head(&db->db_dirty_records); |
7b49bbc8 MM |
2104 | /* dirty record added by dmu_buf_will_dirty() */ |
2105 | VERIFY(dr != NULL); | |
cccbed9f MM |
2106 | if (db->db_level == 0) |
2107 | dr->dt.dl.dr_data = buf; | |
2108 | ASSERT3U(dr->dr_txg, ==, tx->tx_txg); | |
2109 | ASSERT3U(dr->dr_accounted, ==, osize); | |
2110 | dr->dr_accounted = size; | |
34dc7c2f BB |
2111 | mutex_exit(&db->db_mtx); |
2112 | ||
3ec3bc21 | 2113 | dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); |
572e2857 | 2114 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
2115 | } |
2116 | ||
428870ff BB |
2117 | void |
2118 | dbuf_release_bp(dmu_buf_impl_t *db) | |
2119 | { | |
2a8ba608 | 2120 | objset_t *os __maybe_unused = db->db_objset; |
428870ff BB |
2121 | |
2122 | ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); | |
2123 | ASSERT(arc_released(os->os_phys_buf) || | |
2124 | list_link_active(&os->os_dsl_dataset->ds_synced_link)); | |
2125 | ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); | |
2126 | ||
294f6806 | 2127 | (void) arc_release(db->db_buf, db); |
428870ff BB |
2128 | } |
2129 | ||
5a28a973 MA |
2130 | /* |
2131 | * We already have a dirty record for this TXG, and we are being | |
2132 | * dirtied again. | |
2133 | */ | |
2134 | static void | |
2135 | dbuf_redirty(dbuf_dirty_record_t *dr) | |
2136 | { | |
2137 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
2138 | ||
2139 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
2140 | ||
2141 | if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { | |
2142 | /* | |
2143 | * If this buffer has already been written out, | |
2144 | * we now need to reset its state. | |
2145 | */ | |
2146 | dbuf_unoverride(dr); | |
2147 | if (db->db.db_object != DMU_META_DNODE_OBJECT && | |
2148 | db->db_state != DB_NOFILL) { | |
2149 | /* Already released on initial dirty, so just thaw. */ | |
2150 | ASSERT(arc_released(db->db_buf)); | |
2151 | arc_buf_thaw(db->db_buf); | |
2152 | } | |
2153 | } | |
2154 | } | |
2155 | ||
ba67d821 MA |
2156 | dbuf_dirty_record_t * |
2157 | dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx) | |
2158 | { | |
2159 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
2160 | IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid); | |
2161 | dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE); | |
2162 | ASSERT(dn->dn_maxblkid >= blkid); | |
2163 | ||
2164 | dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP); | |
2165 | list_link_init(&dr->dr_dirty_node); | |
2166 | list_link_init(&dr->dr_dbuf_node); | |
2167 | dr->dr_dnode = dn; | |
2168 | dr->dr_txg = tx->tx_txg; | |
2169 | dr->dt.dll.dr_blkid = blkid; | |
2170 | dr->dr_accounted = dn->dn_datablksz; | |
2171 | ||
2172 | /* | |
2173 | * There should not be any dbuf for the block that we're dirtying. | |
2174 | * Otherwise the buffer contents could be inconsistent between the | |
2175 | * dbuf and the lightweight dirty record. | |
2176 | */ | |
3236c0b8 RY |
2177 | ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid, |
2178 | NULL)); | |
ba67d821 MA |
2179 | |
2180 | mutex_enter(&dn->dn_mtx); | |
2181 | int txgoff = tx->tx_txg & TXG_MASK; | |
2182 | if (dn->dn_free_ranges[txgoff] != NULL) { | |
2183 | range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1); | |
2184 | } | |
2185 | ||
2186 | if (dn->dn_nlevels == 1) { | |
2187 | ASSERT3U(blkid, <, dn->dn_nblkptr); | |
2188 | list_insert_tail(&dn->dn_dirty_records[txgoff], dr); | |
2189 | mutex_exit(&dn->dn_mtx); | |
2190 | rw_exit(&dn->dn_struct_rwlock); | |
2191 | dnode_setdirty(dn, tx); | |
2192 | } else { | |
2193 | mutex_exit(&dn->dn_mtx); | |
2194 | ||
2195 | int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
2196 | dmu_buf_impl_t *parent_db = dbuf_hold_level(dn, | |
2197 | 1, blkid >> epbs, FTAG); | |
2198 | rw_exit(&dn->dn_struct_rwlock); | |
2199 | if (parent_db == NULL) { | |
2200 | kmem_free(dr, sizeof (*dr)); | |
2201 | return (NULL); | |
2202 | } | |
2203 | int err = dbuf_read(parent_db, NULL, | |
2204 | (DB_RF_NOPREFETCH | DB_RF_CANFAIL)); | |
2205 | if (err != 0) { | |
2206 | dbuf_rele(parent_db, FTAG); | |
2207 | kmem_free(dr, sizeof (*dr)); | |
2208 | return (NULL); | |
2209 | } | |
2210 | ||
2211 | dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx); | |
2212 | dbuf_rele(parent_db, FTAG); | |
2213 | mutex_enter(&parent_dr->dt.di.dr_mtx); | |
2214 | ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg); | |
2215 | list_insert_tail(&parent_dr->dt.di.dr_children, dr); | |
2216 | mutex_exit(&parent_dr->dt.di.dr_mtx); | |
2217 | dr->dr_parent = parent_dr; | |
2218 | } | |
2219 | ||
2220 | dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx); | |
2221 | ||
2222 | return (dr); | |
2223 | } | |
2224 | ||
34dc7c2f BB |
2225 | dbuf_dirty_record_t * |
2226 | dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) | |
2227 | { | |
572e2857 BB |
2228 | dnode_t *dn; |
2229 | objset_t *os; | |
cccbed9f | 2230 | dbuf_dirty_record_t *dr, *dr_next, *dr_head; |
34dc7c2f | 2231 | int txgoff = tx->tx_txg & TXG_MASK; |
f664f1ee | 2232 | boolean_t drop_struct_rwlock = B_FALSE; |
34dc7c2f BB |
2233 | |
2234 | ASSERT(tx->tx_txg != 0); | |
424fd7c3 | 2235 | ASSERT(!zfs_refcount_is_zero(&db->db_holds)); |
34dc7c2f BB |
2236 | DMU_TX_DIRTY_BUF(tx, db); |
2237 | ||
572e2857 BB |
2238 | DB_DNODE_ENTER(db); |
2239 | dn = DB_DNODE(db); | |
34dc7c2f BB |
2240 | /* |
2241 | * Shouldn't dirty a regular buffer in syncing context. Private | |
2242 | * objects may be dirtied in syncing context, but only if they | |
2243 | * were already pre-dirtied in open context. | |
34dc7c2f | 2244 | */ |
6d8da841 | 2245 | #ifdef ZFS_DEBUG |
cc9bb3e5 GM |
2246 | if (dn->dn_objset->os_dsl_dataset != NULL) { |
2247 | rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, | |
2248 | RW_READER, FTAG); | |
2249 | } | |
34dc7c2f BB |
2250 | ASSERT(!dmu_tx_is_syncing(tx) || |
2251 | BP_IS_HOLE(dn->dn_objset->os_rootbp) || | |
9babb374 BB |
2252 | DMU_OBJECT_IS_SPECIAL(dn->dn_object) || |
2253 | dn->dn_objset->os_dsl_dataset == NULL); | |
cc9bb3e5 GM |
2254 | if (dn->dn_objset->os_dsl_dataset != NULL) |
2255 | rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); | |
2256 | #endif | |
34dc7c2f BB |
2257 | /* |
2258 | * We make this assert for private objects as well, but after we | |
2259 | * check if we're already dirty. They are allowed to re-dirty | |
2260 | * in syncing context. | |
2261 | */ | |
2262 | ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || | |
2263 | dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == | |
2264 | (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); | |
2265 | ||
2266 | mutex_enter(&db->db_mtx); | |
2267 | /* | |
2268 | * XXX make this true for indirects too? The problem is that | |
2269 | * transactions created with dmu_tx_create_assigned() from | |
2270 | * syncing context don't bother holding ahead. | |
2271 | */ | |
2272 | ASSERT(db->db_level != 0 || | |
b128c09f BB |
2273 | db->db_state == DB_CACHED || db->db_state == DB_FILL || |
2274 | db->db_state == DB_NOFILL); | |
34dc7c2f BB |
2275 | |
2276 | mutex_enter(&dn->dn_mtx); | |
28caa74b | 2277 | dnode_set_dirtyctx(dn, tx, db); |
edc1e713 TC |
2278 | if (tx->tx_txg > dn->dn_dirty_txg) |
2279 | dn->dn_dirty_txg = tx->tx_txg; | |
34dc7c2f BB |
2280 | mutex_exit(&dn->dn_mtx); |
2281 | ||
428870ff BB |
2282 | if (db->db_blkid == DMU_SPILL_BLKID) |
2283 | dn->dn_have_spill = B_TRUE; | |
2284 | ||
34dc7c2f BB |
2285 | /* |
2286 | * If this buffer is already dirty, we're done. | |
2287 | */ | |
cccbed9f MM |
2288 | dr_head = list_head(&db->db_dirty_records); |
2289 | ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg || | |
34dc7c2f | 2290 | db->db.db_object == DMU_META_DNODE_OBJECT); |
cccbed9f MM |
2291 | dr_next = dbuf_find_dirty_lte(db, tx->tx_txg); |
2292 | if (dr_next && dr_next->dr_txg == tx->tx_txg) { | |
572e2857 BB |
2293 | DB_DNODE_EXIT(db); |
2294 | ||
cccbed9f | 2295 | dbuf_redirty(dr_next); |
34dc7c2f | 2296 | mutex_exit(&db->db_mtx); |
cccbed9f | 2297 | return (dr_next); |
34dc7c2f BB |
2298 | } |
2299 | ||
2300 | /* | |
2301 | * Only valid if not already dirty. | |
2302 | */ | |
9babb374 BB |
2303 | ASSERT(dn->dn_object == 0 || |
2304 | dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == | |
34dc7c2f BB |
2305 | (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); |
2306 | ||
2307 | ASSERT3U(dn->dn_nlevels, >, db->db_level); | |
34dc7c2f BB |
2308 | |
2309 | /* | |
2310 | * We should only be dirtying in syncing context if it's the | |
9babb374 BB |
2311 | * mos or we're initializing the os or it's a special object. |
2312 | * However, we are allowed to dirty in syncing context provided | |
2313 | * we already dirtied it in open context. Hence we must make | |
2314 | * this assertion only if we're not already dirty. | |
34dc7c2f | 2315 | */ |
572e2857 | 2316 | os = dn->dn_objset; |
3b7f360c | 2317 | VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); |
6d8da841 | 2318 | #ifdef ZFS_DEBUG |
cc9bb3e5 GM |
2319 | if (dn->dn_objset->os_dsl_dataset != NULL) |
2320 | rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); | |
9babb374 BB |
2321 | ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || |
2322 | os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); | |
cc9bb3e5 GM |
2323 | if (dn->dn_objset->os_dsl_dataset != NULL) |
2324 | rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); | |
2325 | #endif | |
34dc7c2f BB |
2326 | ASSERT(db->db.db_size != 0); |
2327 | ||
2328 | dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); | |
2329 | ||
67a1b037 | 2330 | if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) { |
3ec3bc21 | 2331 | dmu_objset_willuse_space(os, db->db.db_size, tx); |
34dc7c2f BB |
2332 | } |
2333 | ||
2334 | /* | |
2335 | * If this buffer is dirty in an old transaction group we need | |
2336 | * to make a copy of it so that the changes we make in this | |
2337 | * transaction group won't leak out when we sync the older txg. | |
2338 | */ | |
79c76d5b | 2339 | dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); |
98f72a53 | 2340 | list_link_init(&dr->dr_dirty_node); |
cccbed9f | 2341 | list_link_init(&dr->dr_dbuf_node); |
ba67d821 | 2342 | dr->dr_dnode = dn; |
34dc7c2f BB |
2343 | if (db->db_level == 0) { |
2344 | void *data_old = db->db_buf; | |
2345 | ||
b128c09f | 2346 | if (db->db_state != DB_NOFILL) { |
428870ff | 2347 | if (db->db_blkid == DMU_BONUS_BLKID) { |
b128c09f BB |
2348 | dbuf_fix_old_data(db, tx->tx_txg); |
2349 | data_old = db->db.db_data; | |
2350 | } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { | |
2351 | /* | |
2352 | * Release the data buffer from the cache so | |
2353 | * that we can modify it without impacting | |
2354 | * possible other users of this cached data | |
2355 | * block. Note that indirect blocks and | |
2356 | * private objects are not released until the | |
2357 | * syncing state (since they are only modified | |
2358 | * then). | |
2359 | */ | |
2360 | arc_release(db->db_buf, db); | |
2361 | dbuf_fix_old_data(db, tx->tx_txg); | |
2362 | data_old = db->db_buf; | |
2363 | } | |
2364 | ASSERT(data_old != NULL); | |
34dc7c2f | 2365 | } |
34dc7c2f BB |
2366 | dr->dt.dl.dr_data = data_old; |
2367 | } else { | |
448d7aaa | 2368 | mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL); |
34dc7c2f BB |
2369 | list_create(&dr->dt.di.dr_children, |
2370 | sizeof (dbuf_dirty_record_t), | |
2371 | offsetof(dbuf_dirty_record_t, dr_dirty_node)); | |
2372 | } | |
67a1b037 | 2373 | if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) { |
e8b96c60 | 2374 | dr->dr_accounted = db->db.db_size; |
67a1b037 | 2375 | } |
34dc7c2f BB |
2376 | dr->dr_dbuf = db; |
2377 | dr->dr_txg = tx->tx_txg; | |
cccbed9f | 2378 | list_insert_before(&db->db_dirty_records, dr_next, dr); |
34dc7c2f BB |
2379 | |
2380 | /* | |
2381 | * We could have been freed_in_flight between the dbuf_noread | |
2382 | * and dbuf_dirty. We win, as though the dbuf_noread() had | |
2383 | * happened after the free. | |
2384 | */ | |
428870ff BB |
2385 | if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && |
2386 | db->db_blkid != DMU_SPILL_BLKID) { | |
34dc7c2f | 2387 | mutex_enter(&dn->dn_mtx); |
9bd274dd MA |
2388 | if (dn->dn_free_ranges[txgoff] != NULL) { |
2389 | range_tree_clear(dn->dn_free_ranges[txgoff], | |
2390 | db->db_blkid, 1); | |
2391 | } | |
34dc7c2f BB |
2392 | mutex_exit(&dn->dn_mtx); |
2393 | db->db_freed_in_flight = FALSE; | |
2394 | } | |
2395 | ||
2396 | /* | |
2397 | * This buffer is now part of this txg | |
2398 | */ | |
2399 | dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); | |
2400 | db->db_dirtycnt += 1; | |
2401 | ASSERT3U(db->db_dirtycnt, <=, 3); | |
2402 | ||
2403 | mutex_exit(&db->db_mtx); | |
2404 | ||
428870ff BB |
2405 | if (db->db_blkid == DMU_BONUS_BLKID || |
2406 | db->db_blkid == DMU_SPILL_BLKID) { | |
34dc7c2f BB |
2407 | mutex_enter(&dn->dn_mtx); |
2408 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
2409 | list_insert_tail(&dn->dn_dirty_records[txgoff], dr); | |
2410 | mutex_exit(&dn->dn_mtx); | |
2411 | dnode_setdirty(dn, tx); | |
572e2857 | 2412 | DB_DNODE_EXIT(db); |
34dc7c2f | 2413 | return (dr); |
98ace739 MA |
2414 | } |
2415 | ||
98ace739 MA |
2416 | if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { |
2417 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
f664f1ee PD |
2418 | drop_struct_rwlock = B_TRUE; |
2419 | } | |
2420 | ||
2421 | /* | |
2422 | * If we are overwriting a dedup BP, then unless it is snapshotted, | |
2423 | * when we get to syncing context we will need to decrement its | |
2424 | * refcount in the DDT. Prefetch the relevant DDT block so that | |
2425 | * syncing context won't have to wait for the i/o. | |
2426 | */ | |
2427 | if (db->db_blkptr != NULL) { | |
2428 | db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); | |
2429 | ddt_prefetch(os->os_spa, db->db_blkptr); | |
2430 | dmu_buf_unlock_parent(db, dblt, FTAG); | |
98ace739 MA |
2431 | } |
2432 | ||
2ade4a99 MA |
2433 | /* |
2434 | * We need to hold the dn_struct_rwlock to make this assertion, | |
2435 | * because it protects dn_phys / dn_next_nlevels from changing. | |
2436 | */ | |
2437 | ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || | |
2438 | dn->dn_phys->dn_nlevels > db->db_level || | |
2439 | dn->dn_next_nlevels[txgoff] > db->db_level || | |
2440 | dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || | |
2441 | dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); | |
2442 | ||
34dc7c2f | 2443 | |
b128c09f | 2444 | if (db->db_level == 0) { |
69830602 TC |
2445 | ASSERT(!db->db_objset->os_raw_receive || |
2446 | dn->dn_maxblkid >= db->db_blkid); | |
369aa501 | 2447 | dnode_new_blkid(dn, db->db_blkid, tx, |
f664f1ee | 2448 | drop_struct_rwlock, B_FALSE); |
b128c09f BB |
2449 | ASSERT(dn->dn_maxblkid >= db->db_blkid); |
2450 | } | |
2451 | ||
34dc7c2f BB |
2452 | if (db->db_level+1 < dn->dn_nlevels) { |
2453 | dmu_buf_impl_t *parent = db->db_parent; | |
2454 | dbuf_dirty_record_t *di; | |
2455 | int parent_held = FALSE; | |
2456 | ||
2457 | if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { | |
2458 | int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
f664f1ee | 2459 | parent = dbuf_hold_level(dn, db->db_level + 1, |
34dc7c2f | 2460 | db->db_blkid >> epbs, FTAG); |
428870ff | 2461 | ASSERT(parent != NULL); |
34dc7c2f BB |
2462 | parent_held = TRUE; |
2463 | } | |
f664f1ee | 2464 | if (drop_struct_rwlock) |
34dc7c2f | 2465 | rw_exit(&dn->dn_struct_rwlock); |
f664f1ee | 2466 | ASSERT3U(db->db_level + 1, ==, parent->db_level); |
34dc7c2f BB |
2467 | di = dbuf_dirty(parent, tx); |
2468 | if (parent_held) | |
2469 | dbuf_rele(parent, FTAG); | |
2470 | ||
2471 | mutex_enter(&db->db_mtx); | |
e8b96c60 MA |
2472 | /* |
2473 | * Since we've dropped the mutex, it's possible that | |
2474 | * dbuf_undirty() might have changed this out from under us. | |
2475 | */ | |
cccbed9f | 2476 | if (list_head(&db->db_dirty_records) == dr || |
34dc7c2f BB |
2477 | dn->dn_object == DMU_META_DNODE_OBJECT) { |
2478 | mutex_enter(&di->dt.di.dr_mtx); | |
2479 | ASSERT3U(di->dr_txg, ==, tx->tx_txg); | |
2480 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
2481 | list_insert_tail(&di->dt.di.dr_children, dr); | |
2482 | mutex_exit(&di->dt.di.dr_mtx); | |
2483 | dr->dr_parent = di; | |
2484 | } | |
2485 | mutex_exit(&db->db_mtx); | |
2486 | } else { | |
f664f1ee | 2487 | ASSERT(db->db_level + 1 == dn->dn_nlevels); |
34dc7c2f | 2488 | ASSERT(db->db_blkid < dn->dn_nblkptr); |
572e2857 | 2489 | ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); |
34dc7c2f BB |
2490 | mutex_enter(&dn->dn_mtx); |
2491 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
2492 | list_insert_tail(&dn->dn_dirty_records[txgoff], dr); | |
2493 | mutex_exit(&dn->dn_mtx); | |
f664f1ee | 2494 | if (drop_struct_rwlock) |
34dc7c2f BB |
2495 | rw_exit(&dn->dn_struct_rwlock); |
2496 | } | |
2497 | ||
2498 | dnode_setdirty(dn, tx); | |
572e2857 | 2499 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
2500 | return (dr); |
2501 | } | |
2502 | ||
fa3922df MM |
2503 | static void |
2504 | dbuf_undirty_bonus(dbuf_dirty_record_t *dr) | |
2505 | { | |
2506 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
2507 | ||
2508 | if (dr->dt.dl.dr_data != db->db.db_data) { | |
ba67d821 | 2509 | struct dnode *dn = dr->dr_dnode; |
fa3922df MM |
2510 | int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); |
2511 | ||
2512 | kmem_free(dr->dt.dl.dr_data, max_bonuslen); | |
2513 | arc_space_return(max_bonuslen, ARC_SPACE_BONUS); | |
2514 | } | |
2515 | db->db_data_pending = NULL; | |
2516 | ASSERT(list_next(&db->db_dirty_records, dr) == NULL); | |
2517 | list_remove(&db->db_dirty_records, dr); | |
2518 | if (dr->dr_dbuf->db_level != 0) { | |
2519 | mutex_destroy(&dr->dt.di.dr_mtx); | |
2520 | list_destroy(&dr->dt.di.dr_children); | |
2521 | } | |
2522 | kmem_free(dr, sizeof (dbuf_dirty_record_t)); | |
2523 | ASSERT3U(db->db_dirtycnt, >, 0); | |
2524 | db->db_dirtycnt -= 1; | |
2525 | } | |
2526 | ||
13fe0198 | 2527 | /* |
e49f1e20 WA |
2528 | * Undirty a buffer in the transaction group referenced by the given |
2529 | * transaction. Return whether this evicted the dbuf. | |
13fe0198 | 2530 | */ |
ce0e1cc4 | 2531 | boolean_t |
34dc7c2f BB |
2532 | dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) |
2533 | { | |
34dc7c2f | 2534 | uint64_t txg = tx->tx_txg; |
67a1b037 | 2535 | boolean_t brtwrite; |
34dc7c2f BB |
2536 | |
2537 | ASSERT(txg != 0); | |
4bda3bd0 MA |
2538 | |
2539 | /* | |
2540 | * Due to our use of dn_nlevels below, this can only be called | |
2541 | * in open context, unless we are operating on the MOS. | |
2542 | * From syncing context, dn_nlevels may be different from the | |
2543 | * dn_nlevels used when dbuf was dirtied. | |
2544 | */ | |
2545 | ASSERT(db->db_objset == | |
2546 | dmu_objset_pool(db->db_objset)->dp_meta_objset || | |
2547 | txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); | |
428870ff | 2548 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
13fe0198 MA |
2549 | ASSERT0(db->db_level); |
2550 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
34dc7c2f | 2551 | |
34dc7c2f BB |
2552 | /* |
2553 | * If this buffer is not dirty, we're done. | |
2554 | */ | |
ba67d821 | 2555 | dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg); |
cccbed9f | 2556 | if (dr == NULL) |
13fe0198 | 2557 | return (B_FALSE); |
428870ff | 2558 | ASSERT(dr->dr_dbuf == db); |
34dc7c2f | 2559 | |
67a1b037 PJD |
2560 | brtwrite = dr->dt.dl.dr_brtwrite; |
2561 | if (brtwrite) { | |
2562 | /* | |
2563 | * We are freeing a block that we cloned in the same | |
2564 | * transaction group. | |
2565 | */ | |
2566 | brt_pending_remove(dmu_objset_spa(db->db_objset), | |
2567 | &dr->dt.dl.dr_overridden_by, tx); | |
2568 | } | |
2569 | ||
ba67d821 | 2570 | dnode_t *dn = dr->dr_dnode; |
572e2857 | 2571 | |
34dc7c2f BB |
2572 | dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); |
2573 | ||
2574 | ASSERT(db->db.db_size != 0); | |
2575 | ||
4bda3bd0 MA |
2576 | dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), |
2577 | dr->dr_accounted, txg); | |
34dc7c2f | 2578 | |
cccbed9f | 2579 | list_remove(&db->db_dirty_records, dr); |
34dc7c2f | 2580 | |
ef3c1dea GR |
2581 | /* |
2582 | * Note that there are three places in dbuf_dirty() | |
2583 | * where this dirty record may be put on a list. | |
2584 | * Make sure to do a list_remove corresponding to | |
2585 | * every one of those list_insert calls. | |
2586 | */ | |
34dc7c2f BB |
2587 | if (dr->dr_parent) { |
2588 | mutex_enter(&dr->dr_parent->dt.di.dr_mtx); | |
2589 | list_remove(&dr->dr_parent->dt.di.dr_children, dr); | |
2590 | mutex_exit(&dr->dr_parent->dt.di.dr_mtx); | |
ef3c1dea | 2591 | } else if (db->db_blkid == DMU_SPILL_BLKID || |
4bda3bd0 | 2592 | db->db_level + 1 == dn->dn_nlevels) { |
b128c09f | 2593 | ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); |
34dc7c2f BB |
2594 | mutex_enter(&dn->dn_mtx); |
2595 | list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); | |
2596 | mutex_exit(&dn->dn_mtx); | |
2597 | } | |
2598 | ||
67a1b037 | 2599 | if (db->db_state != DB_NOFILL && !brtwrite) { |
13fe0198 | 2600 | dbuf_unoverride(dr); |
34dc7c2f | 2601 | |
34dc7c2f | 2602 | ASSERT(db->db_buf != NULL); |
13fe0198 MA |
2603 | ASSERT(dr->dt.dl.dr_data != NULL); |
2604 | if (dr->dt.dl.dr_data != db->db_buf) | |
d3c2ae1c | 2605 | arc_buf_destroy(dr->dt.dl.dr_data, db); |
34dc7c2f | 2606 | } |
58c4aa00 | 2607 | |
34dc7c2f BB |
2608 | kmem_free(dr, sizeof (dbuf_dirty_record_t)); |
2609 | ||
2610 | ASSERT(db->db_dirtycnt > 0); | |
2611 | db->db_dirtycnt -= 1; | |
2612 | ||
424fd7c3 | 2613 | if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { |
67a1b037 PJD |
2614 | ASSERT(db->db_state == DB_NOFILL || brtwrite || |
2615 | arc_released(db->db_buf)); | |
d3c2ae1c | 2616 | dbuf_destroy(db); |
13fe0198 | 2617 | return (B_TRUE); |
34dc7c2f BB |
2618 | } |
2619 | ||
13fe0198 | 2620 | return (B_FALSE); |
34dc7c2f BB |
2621 | } |
2622 | ||
b5256303 TC |
2623 | static void |
2624 | dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx) | |
34dc7c2f | 2625 | { |
b0bc7a84 | 2626 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; |
555ef90c | 2627 | boolean_t undirty = B_FALSE; |
34dc7c2f BB |
2628 | |
2629 | ASSERT(tx->tx_txg != 0); | |
424fd7c3 | 2630 | ASSERT(!zfs_refcount_is_zero(&db->db_holds)); |
34dc7c2f | 2631 | |
5a28a973 | 2632 | /* |
e1cfd73f | 2633 | * Quick check for dirtiness. For already dirty blocks, this |
5a28a973 MA |
2634 | * reduces runtime of this function by >90%, and overall performance |
2635 | * by 50% for some workloads (e.g. file deletion with indirect blocks | |
2636 | * cached). | |
2637 | */ | |
2638 | mutex_enter(&db->db_mtx); | |
2639 | ||
555ef90c | 2640 | if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) { |
cccbed9f | 2641 | dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg); |
5a28a973 MA |
2642 | /* |
2643 | * It's possible that it is already dirty but not cached, | |
2644 | * because there are some calls to dbuf_dirty() that don't | |
2645 | * go through dmu_buf_will_dirty(). | |
2646 | */ | |
cccbed9f | 2647 | if (dr != NULL) { |
555ef90c PJD |
2648 | if (dr->dt.dl.dr_brtwrite) { |
2649 | /* | |
2650 | * Block cloning: If we are dirtying a cloned | |
2651 | * block, we cannot simply redirty it, because | |
2652 | * this dr has no data associated with it. | |
2653 | * We will go through a full undirtying below, | |
2654 | * before dirtying it again. | |
2655 | */ | |
2656 | undirty = B_TRUE; | |
2657 | } else { | |
2658 | /* This dbuf is already dirty and cached. */ | |
2659 | dbuf_redirty(dr); | |
2660 | mutex_exit(&db->db_mtx); | |
2661 | return; | |
2662 | } | |
5a28a973 MA |
2663 | } |
2664 | } | |
2665 | mutex_exit(&db->db_mtx); | |
2666 | ||
572e2857 BB |
2667 | DB_DNODE_ENTER(db); |
2668 | if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) | |
b5256303 | 2669 | flags |= DB_RF_HAVESTRUCT; |
572e2857 | 2670 | DB_DNODE_EXIT(db); |
555ef90c PJD |
2671 | |
2672 | /* | |
2673 | * Block cloning: Do the dbuf_read() before undirtying the dbuf, as we | |
2674 | * want to make sure dbuf_read() will read the pending cloned block and | |
2675 | * not the uderlying block that is being replaced. dbuf_undirty() will | |
2676 | * do dbuf_unoverride(), so we will end up with cloned block content, | |
2677 | * without overridden BP. | |
2678 | */ | |
b5256303 | 2679 | (void) dbuf_read(db, NULL, flags); |
555ef90c PJD |
2680 | if (undirty) { |
2681 | mutex_enter(&db->db_mtx); | |
2682 | VERIFY(!dbuf_undirty(db, tx)); | |
2683 | mutex_exit(&db->db_mtx); | |
2684 | } | |
34dc7c2f BB |
2685 | (void) dbuf_dirty(db, tx); |
2686 | } | |
2687 | ||
b5256303 TC |
2688 | void |
2689 | dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) | |
2690 | { | |
2691 | dmu_buf_will_dirty_impl(db_fake, | |
2692 | DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx); | |
2693 | } | |
2694 | ||
a73e8fdb PZ |
2695 | boolean_t |
2696 | dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) | |
2697 | { | |
2698 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
cccbed9f | 2699 | dbuf_dirty_record_t *dr; |
a73e8fdb PZ |
2700 | |
2701 | mutex_enter(&db->db_mtx); | |
cccbed9f | 2702 | dr = dbuf_find_dirty_eq(db, tx->tx_txg); |
a73e8fdb | 2703 | mutex_exit(&db->db_mtx); |
cccbed9f | 2704 | return (dr != NULL); |
a73e8fdb PZ |
2705 | } |
2706 | ||
555ef90c PJD |
2707 | void |
2708 | dmu_buf_will_clone(dmu_buf_t *db_fake, dmu_tx_t *tx) | |
2709 | { | |
2710 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
2711 | ||
2712 | /* | |
2713 | * Block cloning: We are going to clone into this block, so undirty | |
2714 | * modifications done to this block so far in this txg. This includes | |
2715 | * writes and clones into this block. | |
2716 | */ | |
2717 | mutex_enter(&db->db_mtx); | |
688514e4 | 2718 | DBUF_VERIFY(db); |
555ef90c | 2719 | VERIFY(!dbuf_undirty(db, tx)); |
5f1479d9 | 2720 | ASSERT0P(dbuf_find_dirty_eq(db, tx->tx_txg)); |
555ef90c PJD |
2721 | if (db->db_buf != NULL) { |
2722 | arc_buf_destroy(db->db_buf, db); | |
2723 | db->db_buf = NULL; | |
688514e4 | 2724 | dbuf_clear_data(db); |
555ef90c | 2725 | } |
688514e4 RN |
2726 | |
2727 | db->db_state = DB_NOFILL; | |
2728 | DTRACE_SET_STATE(db, "allocating NOFILL buffer for clone"); | |
2729 | ||
2730 | DBUF_VERIFY(db); | |
555ef90c PJD |
2731 | mutex_exit(&db->db_mtx); |
2732 | ||
688514e4 RN |
2733 | dbuf_noread(db); |
2734 | (void) dbuf_dirty(db, tx); | |
555ef90c PJD |
2735 | } |
2736 | ||
b128c09f BB |
2737 | void |
2738 | dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) | |
2739 | { | |
2740 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
2741 | ||
ad0a5546 | 2742 | mutex_enter(&db->db_mtx); |
b128c09f | 2743 | db->db_state = DB_NOFILL; |
8b3547a4 | 2744 | DTRACE_SET_STATE(db, "allocating NOFILL buffer"); |
ad0a5546 | 2745 | mutex_exit(&db->db_mtx); |
555ef90c PJD |
2746 | |
2747 | dbuf_noread(db); | |
2748 | (void) dbuf_dirty(db, tx); | |
b128c09f BB |
2749 | } |
2750 | ||
34dc7c2f | 2751 | void |
9b1677fb | 2752 | dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail) |
34dc7c2f BB |
2753 | { |
2754 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
2755 | ||
428870ff | 2756 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f BB |
2757 | ASSERT(tx->tx_txg != 0); |
2758 | ASSERT(db->db_level == 0); | |
424fd7c3 | 2759 | ASSERT(!zfs_refcount_is_zero(&db->db_holds)); |
34dc7c2f BB |
2760 | |
2761 | ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || | |
2762 | dmu_tx_private_ok(tx)); | |
2763 | ||
ad0a5546 | 2764 | mutex_enter(&db->db_mtx); |
555ef90c PJD |
2765 | if (db->db_state == DB_NOFILL) { |
2766 | /* | |
2767 | * Block cloning: We will be completely overwriting a block | |
2768 | * cloned in this transaction group, so let's undirty the | |
2769 | * pending clone and mark the block as uncached. This will be | |
9b1677fb AM |
2770 | * as if the clone was never done. But if the fill can fail |
2771 | * we should have a way to return back to the cloned data. | |
555ef90c | 2772 | */ |
9b1677fb AM |
2773 | if (canfail && dbuf_find_dirty_eq(db, tx->tx_txg) != NULL) { |
2774 | mutex_exit(&db->db_mtx); | |
2775 | dmu_buf_will_dirty(db_fake, tx); | |
2776 | return; | |
2777 | } | |
555ef90c | 2778 | VERIFY(!dbuf_undirty(db, tx)); |
555ef90c PJD |
2779 | db->db_state = DB_UNCACHED; |
2780 | } | |
ad0a5546 | 2781 | mutex_exit(&db->db_mtx); |
555ef90c | 2782 | |
34dc7c2f BB |
2783 | dbuf_noread(db); |
2784 | (void) dbuf_dirty(db, tx); | |
2785 | } | |
2786 | ||
b5256303 TC |
2787 | /* |
2788 | * This function is effectively the same as dmu_buf_will_dirty(), but | |
0c03d21a MA |
2789 | * indicates the caller expects raw encrypted data in the db, and provides |
2790 | * the crypt params (byteorder, salt, iv, mac) which should be stored in the | |
2791 | * blkptr_t when this dbuf is written. This is only used for blocks of | |
2792 | * dnodes, during raw receive. | |
b5256303 TC |
2793 | */ |
2794 | void | |
0c03d21a MA |
2795 | dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder, |
2796 | const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx) | |
b5256303 TC |
2797 | { |
2798 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
2799 | dbuf_dirty_record_t *dr; | |
2800 | ||
0c03d21a MA |
2801 | /* |
2802 | * dr_has_raw_params is only processed for blocks of dnodes | |
2803 | * (see dbuf_sync_dnode_leaf_crypt()). | |
2804 | */ | |
2805 | ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); | |
2806 | ASSERT3U(db->db_level, ==, 0); | |
2807 | ASSERT(db->db_objset->os_raw_receive); | |
2808 | ||
b5256303 TC |
2809 | dmu_buf_will_dirty_impl(db_fake, |
2810 | DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx); | |
2811 | ||
cccbed9f | 2812 | dr = dbuf_find_dirty_eq(db, tx->tx_txg); |
b5256303 TC |
2813 | |
2814 | ASSERT3P(dr, !=, NULL); | |
0c03d21a MA |
2815 | |
2816 | dr->dt.dl.dr_has_raw_params = B_TRUE; | |
2817 | dr->dt.dl.dr_byteorder = byteorder; | |
861166b0 AZ |
2818 | memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN); |
2819 | memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN); | |
2820 | memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN); | |
b5256303 TC |
2821 | } |
2822 | ||
30af21b0 PD |
2823 | static void |
2824 | dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx) | |
2825 | { | |
2826 | struct dirty_leaf *dl; | |
cccbed9f | 2827 | dbuf_dirty_record_t *dr; |
30af21b0 | 2828 | |
cccbed9f | 2829 | dr = list_head(&db->db_dirty_records); |
a6ccb36b | 2830 | ASSERT3P(dr, !=, NULL); |
cccbed9f MM |
2831 | ASSERT3U(dr->dr_txg, ==, tx->tx_txg); |
2832 | dl = &dr->dt.dl; | |
30af21b0 PD |
2833 | dl->dr_overridden_by = *bp; |
2834 | dl->dr_override_state = DR_OVERRIDDEN; | |
cccbed9f | 2835 | dl->dr_overridden_by.blk_birth = dr->dr_txg; |
30af21b0 PD |
2836 | } |
2837 | ||
9b1677fb AM |
2838 | boolean_t |
2839 | dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx, boolean_t failed) | |
34dc7c2f | 2840 | { |
14e4e3cb | 2841 | (void) tx; |
30af21b0 | 2842 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; |
34dc7c2f BB |
2843 | mutex_enter(&db->db_mtx); |
2844 | DBUF_VERIFY(db); | |
2845 | ||
9b1677fb | 2846 | if (db->db_state == DB_FILL) { |
34dc7c2f | 2847 | if (db->db_level == 0 && db->db_freed_in_flight) { |
428870ff | 2848 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f BB |
2849 | /* we were freed while filling */ |
2850 | /* XXX dbuf_undirty? */ | |
861166b0 | 2851 | memset(db->db.db_data, 0, db->db.db_size); |
34dc7c2f | 2852 | db->db_freed_in_flight = FALSE; |
9b1677fb | 2853 | db->db_state = DB_CACHED; |
8b3547a4 MM |
2854 | DTRACE_SET_STATE(db, |
2855 | "fill done handling freed in flight"); | |
9b1677fb AM |
2856 | failed = B_FALSE; |
2857 | } else if (failed) { | |
2858 | VERIFY(!dbuf_undirty(db, tx)); | |
2859 | db->db_buf = NULL; | |
2860 | dbuf_clear_data(db); | |
2861 | DTRACE_SET_STATE(db, "fill failed"); | |
8b3547a4 | 2862 | } else { |
9b1677fb | 2863 | db->db_state = DB_CACHED; |
8b3547a4 | 2864 | DTRACE_SET_STATE(db, "fill done"); |
34dc7c2f | 2865 | } |
34dc7c2f | 2866 | cv_broadcast(&db->db_changed); |
9b1677fb AM |
2867 | } else { |
2868 | db->db_state = DB_CACHED; | |
2869 | failed = B_FALSE; | |
34dc7c2f BB |
2870 | } |
2871 | mutex_exit(&db->db_mtx); | |
9b1677fb | 2872 | return (failed); |
34dc7c2f BB |
2873 | } |
2874 | ||
9b67f605 MA |
2875 | void |
2876 | dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, | |
2877 | bp_embedded_type_t etype, enum zio_compress comp, | |
2878 | int uncompressed_size, int compressed_size, int byteorder, | |
2879 | dmu_tx_t *tx) | |
2880 | { | |
2881 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; | |
2882 | struct dirty_leaf *dl; | |
2883 | dmu_object_type_t type; | |
cccbed9f | 2884 | dbuf_dirty_record_t *dr; |
9b67f605 | 2885 | |
241b5415 MA |
2886 | if (etype == BP_EMBEDDED_TYPE_DATA) { |
2887 | ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), | |
2888 | SPA_FEATURE_EMBEDDED_DATA)); | |
2889 | } | |
2890 | ||
9b67f605 MA |
2891 | DB_DNODE_ENTER(db); |
2892 | type = DB_DNODE(db)->dn_type; | |
2893 | DB_DNODE_EXIT(db); | |
2894 | ||
2895 | ASSERT0(db->db_level); | |
2896 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); | |
2897 | ||
2898 | dmu_buf_will_not_fill(dbuf, tx); | |
2899 | ||
cccbed9f | 2900 | dr = list_head(&db->db_dirty_records); |
a6ccb36b | 2901 | ASSERT3P(dr, !=, NULL); |
cccbed9f MM |
2902 | ASSERT3U(dr->dr_txg, ==, tx->tx_txg); |
2903 | dl = &dr->dt.dl; | |
9b67f605 MA |
2904 | encode_embedded_bp_compressed(&dl->dr_overridden_by, |
2905 | data, comp, uncompressed_size, compressed_size); | |
2906 | BPE_SET_ETYPE(&dl->dr_overridden_by, etype); | |
2907 | BP_SET_TYPE(&dl->dr_overridden_by, type); | |
2908 | BP_SET_LEVEL(&dl->dr_overridden_by, 0); | |
2909 | BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); | |
2910 | ||
2911 | dl->dr_override_state = DR_OVERRIDDEN; | |
cccbed9f | 2912 | dl->dr_overridden_by.blk_birth = dr->dr_txg; |
9b67f605 MA |
2913 | } |
2914 | ||
30af21b0 PD |
2915 | void |
2916 | dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx) | |
2917 | { | |
2918 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; | |
2919 | dmu_object_type_t type; | |
2920 | ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset, | |
2921 | SPA_FEATURE_REDACTED_DATASETS)); | |
2922 | ||
2923 | DB_DNODE_ENTER(db); | |
2924 | type = DB_DNODE(db)->dn_type; | |
2925 | DB_DNODE_EXIT(db); | |
2926 | ||
2927 | ASSERT0(db->db_level); | |
2928 | dmu_buf_will_not_fill(dbuf, tx); | |
2929 | ||
2930 | blkptr_t bp = { { { {0} } } }; | |
2931 | BP_SET_TYPE(&bp, type); | |
2932 | BP_SET_LEVEL(&bp, 0); | |
2933 | BP_SET_BIRTH(&bp, tx->tx_txg, 0); | |
2934 | BP_SET_REDACTED(&bp); | |
2935 | BPE_SET_LSIZE(&bp, dbuf->db_size); | |
2936 | ||
2937 | dbuf_override_impl(db, &bp, tx); | |
2938 | } | |
2939 | ||
9babb374 BB |
2940 | /* |
2941 | * Directly assign a provided arc buf to a given dbuf if it's not referenced | |
2942 | * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. | |
2943 | */ | |
2944 | void | |
2945 | dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) | |
2946 | { | |
424fd7c3 | 2947 | ASSERT(!zfs_refcount_is_zero(&db->db_holds)); |
428870ff | 2948 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
9babb374 | 2949 | ASSERT(db->db_level == 0); |
2aa34383 | 2950 | ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); |
9babb374 | 2951 | ASSERT(buf != NULL); |
caf9dd20 | 2952 | ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size); |
9babb374 BB |
2953 | ASSERT(tx->tx_txg != 0); |
2954 | ||
2955 | arc_return_buf(buf, db); | |
2956 | ASSERT(arc_released(buf)); | |
2957 | ||
2958 | mutex_enter(&db->db_mtx); | |
2959 | ||
2960 | while (db->db_state == DB_READ || db->db_state == DB_FILL) | |
2961 | cv_wait(&db->db_changed, &db->db_mtx); | |
2962 | ||
86063d90 AM |
2963 | ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED || |
2964 | db->db_state == DB_NOFILL); | |
9babb374 BB |
2965 | |
2966 | if (db->db_state == DB_CACHED && | |
424fd7c3 | 2967 | zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { |
440a3eb9 TC |
2968 | /* |
2969 | * In practice, we will never have a case where we have an | |
2970 | * encrypted arc buffer while additional holds exist on the | |
2971 | * dbuf. We don't handle this here so we simply assert that | |
2972 | * fact instead. | |
2973 | */ | |
2974 | ASSERT(!arc_is_encrypted(buf)); | |
9babb374 BB |
2975 | mutex_exit(&db->db_mtx); |
2976 | (void) dbuf_dirty(db, tx); | |
861166b0 | 2977 | memcpy(db->db.db_data, buf->b_data, db->db.db_size); |
d3c2ae1c | 2978 | arc_buf_destroy(buf, db); |
9babb374 BB |
2979 | return; |
2980 | } | |
2981 | ||
2982 | if (db->db_state == DB_CACHED) { | |
cccbed9f | 2983 | dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); |
9babb374 BB |
2984 | |
2985 | ASSERT(db->db_buf != NULL); | |
2986 | if (dr != NULL && dr->dr_txg == tx->tx_txg) { | |
2987 | ASSERT(dr->dt.dl.dr_data == db->db_buf); | |
440a3eb9 | 2988 | |
9babb374 BB |
2989 | if (!arc_released(db->db_buf)) { |
2990 | ASSERT(dr->dt.dl.dr_override_state == | |
2991 | DR_OVERRIDDEN); | |
2992 | arc_release(db->db_buf, db); | |
2993 | } | |
2994 | dr->dt.dl.dr_data = buf; | |
d3c2ae1c | 2995 | arc_buf_destroy(db->db_buf, db); |
9babb374 BB |
2996 | } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { |
2997 | arc_release(db->db_buf, db); | |
d3c2ae1c | 2998 | arc_buf_destroy(db->db_buf, db); |
9babb374 BB |
2999 | } |
3000 | db->db_buf = NULL; | |
86063d90 AM |
3001 | } else if (db->db_state == DB_NOFILL) { |
3002 | /* | |
3003 | * We will be completely replacing the cloned block. In case | |
3004 | * it was cloned in this transaction group, let's undirty the | |
3005 | * pending clone and mark the block as uncached. This will be | |
3006 | * as if the clone was never done. | |
3007 | */ | |
3008 | VERIFY(!dbuf_undirty(db, tx)); | |
3009 | db->db_state = DB_UNCACHED; | |
9babb374 BB |
3010 | } |
3011 | ASSERT(db->db_buf == NULL); | |
3012 | dbuf_set_data(db, buf); | |
3013 | db->db_state = DB_FILL; | |
8b3547a4 | 3014 | DTRACE_SET_STATE(db, "filling assigned arcbuf"); |
9babb374 BB |
3015 | mutex_exit(&db->db_mtx); |
3016 | (void) dbuf_dirty(db, tx); | |
9b1677fb | 3017 | dmu_buf_fill_done(&db->db, tx, B_FALSE); |
9babb374 BB |
3018 | } |
3019 | ||
34dc7c2f | 3020 | void |
d3c2ae1c | 3021 | dbuf_destroy(dmu_buf_impl_t *db) |
34dc7c2f | 3022 | { |
572e2857 | 3023 | dnode_t *dn; |
34dc7c2f | 3024 | dmu_buf_impl_t *parent = db->db_parent; |
572e2857 | 3025 | dmu_buf_impl_t *dndb; |
34dc7c2f BB |
3026 | |
3027 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
424fd7c3 | 3028 | ASSERT(zfs_refcount_is_zero(&db->db_holds)); |
34dc7c2f | 3029 | |
d3c2ae1c GW |
3030 | if (db->db_buf != NULL) { |
3031 | arc_buf_destroy(db->db_buf, db); | |
3032 | db->db_buf = NULL; | |
3033 | } | |
34dc7c2f | 3034 | |
d3c2ae1c GW |
3035 | if (db->db_blkid == DMU_BONUS_BLKID) { |
3036 | int slots = DB_DNODE(db)->dn_num_slots; | |
3037 | int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); | |
b5256303 TC |
3038 | if (db->db.db_data != NULL) { |
3039 | kmem_free(db->db.db_data, bonuslen); | |
3040 | arc_space_return(bonuslen, ARC_SPACE_BONUS); | |
3041 | db->db_state = DB_UNCACHED; | |
8b3547a4 | 3042 | DTRACE_SET_STATE(db, "buffer cleared"); |
b5256303 | 3043 | } |
34dc7c2f BB |
3044 | } |
3045 | ||
d3c2ae1c GW |
3046 | dbuf_clear_data(db); |
3047 | ||
3048 | if (multilist_link_active(&db->db_cache_link)) { | |
2e5dc449 MA |
3049 | ASSERT(db->db_caching_status == DB_DBUF_CACHE || |
3050 | db->db_caching_status == DB_DBUF_METADATA_CACHE); | |
3051 | ||
ffdf019c | 3052 | multilist_remove(&dbuf_caches[db->db_caching_status].cache, db); |
92dc4ad8 RN |
3053 | |
3054 | ASSERT0(dmu_buf_user_size(&db->db)); | |
424fd7c3 | 3055 | (void) zfs_refcount_remove_many( |
2e5dc449 | 3056 | &dbuf_caches[db->db_caching_status].size, |
d3c2ae1c | 3057 | db->db.db_size, db); |
2e5dc449 MA |
3058 | |
3059 | if (db->db_caching_status == DB_DBUF_METADATA_CACHE) { | |
3060 | DBUF_STAT_BUMPDOWN(metadata_cache_count); | |
3061 | } else { | |
3062 | DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); | |
3063 | DBUF_STAT_BUMPDOWN(cache_count); | |
3064 | DBUF_STAT_DECR(cache_levels_bytes[db->db_level], | |
3065 | db->db.db_size); | |
3066 | } | |
3067 | db->db_caching_status = DB_NO_CACHE; | |
d3c2ae1c GW |
3068 | } |
3069 | ||
b128c09f | 3070 | ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); |
34dc7c2f | 3071 | ASSERT(db->db_data_pending == NULL); |
28caa74b | 3072 | ASSERT(list_is_empty(&db->db_dirty_records)); |
34dc7c2f BB |
3073 | |
3074 | db->db_state = DB_EVICTING; | |
8b3547a4 | 3075 | DTRACE_SET_STATE(db, "buffer eviction started"); |
34dc7c2f BB |
3076 | db->db_blkptr = NULL; |
3077 | ||
d3c2ae1c GW |
3078 | /* |
3079 | * Now that db_state is DB_EVICTING, nobody else can find this via | |
3080 | * the hash table. We can now drop db_mtx, which allows us to | |
3081 | * acquire the dn_dbufs_mtx. | |
3082 | */ | |
3083 | mutex_exit(&db->db_mtx); | |
3084 | ||
572e2857 BB |
3085 | DB_DNODE_ENTER(db); |
3086 | dn = DB_DNODE(db); | |
3087 | dndb = dn->dn_dbuf; | |
d3c2ae1c GW |
3088 | if (db->db_blkid != DMU_BONUS_BLKID) { |
3089 | boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); | |
3090 | if (needlock) | |
a649768a JD |
3091 | mutex_enter_nested(&dn->dn_dbufs_mtx, |
3092 | NESTED_SINGLE); | |
8951cb8d | 3093 | avl_remove(&dn->dn_dbufs, db); |
572e2857 BB |
3094 | membar_producer(); |
3095 | DB_DNODE_EXIT(db); | |
d3c2ae1c GW |
3096 | if (needlock) |
3097 | mutex_exit(&dn->dn_dbufs_mtx); | |
572e2857 BB |
3098 | /* |
3099 | * Decrementing the dbuf count means that the hold corresponding | |
3100 | * to the removed dbuf is no longer discounted in dnode_move(), | |
3101 | * so the dnode cannot be moved until after we release the hold. | |
3102 | * The membar_producer() ensures visibility of the decremented | |
3103 | * value in dnode_move(), since DB_DNODE_EXIT doesn't actually | |
3104 | * release any lock. | |
3105 | */ | |
1fac63e5 | 3106 | mutex_enter(&dn->dn_mtx); |
3d503a76 | 3107 | dnode_rele_and_unlock(dn, db, B_TRUE); |
572e2857 | 3108 | db->db_dnode_handle = NULL; |
d3c2ae1c GW |
3109 | |
3110 | dbuf_hash_remove(db); | |
572e2857 BB |
3111 | } else { |
3112 | DB_DNODE_EXIT(db); | |
34dc7c2f BB |
3113 | } |
3114 | ||
424fd7c3 | 3115 | ASSERT(zfs_refcount_is_zero(&db->db_holds)); |
34dc7c2f | 3116 | |
d3c2ae1c GW |
3117 | db->db_parent = NULL; |
3118 | ||
3119 | ASSERT(db->db_buf == NULL); | |
3120 | ASSERT(db->db.db_data == NULL); | |
3121 | ASSERT(db->db_hash_next == NULL); | |
3122 | ASSERT(db->db_blkptr == NULL); | |
3123 | ASSERT(db->db_data_pending == NULL); | |
2e5dc449 | 3124 | ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); |
d3c2ae1c GW |
3125 | ASSERT(!multilist_link_active(&db->db_cache_link)); |
3126 | ||
34dc7c2f | 3127 | /* |
572e2857 | 3128 | * If this dbuf is referenced from an indirect dbuf, |
34dc7c2f BB |
3129 | * decrement the ref count on the indirect dbuf. |
3130 | */ | |
1fac63e5 MA |
3131 | if (parent && parent != dndb) { |
3132 | mutex_enter(&parent->db_mtx); | |
3d503a76 | 3133 | dbuf_rele_and_unlock(parent, db, B_TRUE); |
1fac63e5 | 3134 | } |
c175f5eb BB |
3135 | |
3136 | kmem_cache_free(dbuf_kmem_cache, db); | |
3137 | arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); | |
34dc7c2f BB |
3138 | } |
3139 | ||
fcff0f35 PD |
3140 | /* |
3141 | * Note: While bpp will always be updated if the function returns success, | |
3142 | * parentp will not be updated if the dnode does not have dn_dbuf filled in; | |
9c5167d1 | 3143 | * this happens when the dnode is the meta-dnode, or {user|group|project}used |
fcff0f35 PD |
3144 | * object. |
3145 | */ | |
bf701a83 BB |
3146 | __attribute__((always_inline)) |
3147 | static inline int | |
34dc7c2f | 3148 | dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, |
adb726eb | 3149 | dmu_buf_impl_t **parentp, blkptr_t **bpp) |
34dc7c2f | 3150 | { |
34dc7c2f BB |
3151 | *parentp = NULL; |
3152 | *bpp = NULL; | |
3153 | ||
428870ff BB |
3154 | ASSERT(blkid != DMU_BONUS_BLKID); |
3155 | ||
3156 | if (blkid == DMU_SPILL_BLKID) { | |
3157 | mutex_enter(&dn->dn_mtx); | |
3158 | if (dn->dn_have_spill && | |
3159 | (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) | |
50c957f7 | 3160 | *bpp = DN_SPILL_BLKPTR(dn->dn_phys); |
428870ff BB |
3161 | else |
3162 | *bpp = NULL; | |
3163 | dbuf_add_ref(dn->dn_dbuf, NULL); | |
3164 | *parentp = dn->dn_dbuf; | |
3165 | mutex_exit(&dn->dn_mtx); | |
3166 | return (0); | |
3167 | } | |
34dc7c2f | 3168 | |
1c27024e | 3169 | int nlevels = |
32d41fb7 | 3170 | (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; |
1c27024e | 3171 | int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; |
34dc7c2f BB |
3172 | |
3173 | ASSERT3U(level * epbs, <, 64); | |
3174 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); | |
32d41fb7 PD |
3175 | /* |
3176 | * This assertion shouldn't trip as long as the max indirect block size | |
3177 | * is less than 1M. The reason for this is that up to that point, | |
3178 | * the number of levels required to address an entire object with blocks | |
3179 | * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In | |
3180 | * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 | |
3181 | * (i.e. we can address the entire object), objects will all use at most | |
3182 | * N-1 levels and the assertion won't overflow. However, once epbs is | |
3183 | * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be | |
3184 | * enough to address an entire object, so objects will have 5 levels, | |
3185 | * but then this assertion will overflow. | |
3186 | * | |
3187 | * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we | |
3188 | * need to redo this logic to handle overflows. | |
3189 | */ | |
3190 | ASSERT(level >= nlevels || | |
3191 | ((nlevels - level - 1) * epbs) + | |
3192 | highbit64(dn->dn_phys->dn_nblkptr) <= 64); | |
34dc7c2f | 3193 | if (level >= nlevels || |
32d41fb7 PD |
3194 | blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << |
3195 | ((nlevels - level - 1) * epbs)) || | |
3196 | (fail_sparse && | |
3197 | blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { | |
34dc7c2f | 3198 | /* the buffer has no parent yet */ |
2e528b49 | 3199 | return (SET_ERROR(ENOENT)); |
34dc7c2f BB |
3200 | } else if (level < nlevels-1) { |
3201 | /* this block is referenced from an indirect block */ | |
fc5bb51f | 3202 | int err; |
64b6c47d TN |
3203 | |
3204 | err = dbuf_hold_impl(dn, level + 1, | |
adb726eb | 3205 | blkid >> epbs, fail_sparse, FALSE, NULL, parentp); |
64b6c47d | 3206 | |
34dc7c2f BB |
3207 | if (err) |
3208 | return (err); | |
3209 | err = dbuf_read(*parentp, NULL, | |
3210 | (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); | |
3211 | if (err) { | |
3212 | dbuf_rele(*parentp, NULL); | |
3213 | *parentp = NULL; | |
3214 | return (err); | |
3215 | } | |
f664f1ee | 3216 | rw_enter(&(*parentp)->db_rwlock, RW_READER); |
34dc7c2f BB |
3217 | *bpp = ((blkptr_t *)(*parentp)->db.db_data) + |
3218 | (blkid & ((1ULL << epbs) - 1)); | |
32d41fb7 PD |
3219 | if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) |
3220 | ASSERT(BP_IS_HOLE(*bpp)); | |
f664f1ee | 3221 | rw_exit(&(*parentp)->db_rwlock); |
34dc7c2f BB |
3222 | return (0); |
3223 | } else { | |
3224 | /* the block is referenced from the dnode */ | |
3225 | ASSERT3U(level, ==, nlevels-1); | |
3226 | ASSERT(dn->dn_phys->dn_nblkptr == 0 || | |
3227 | blkid < dn->dn_phys->dn_nblkptr); | |
3228 | if (dn->dn_dbuf) { | |
3229 | dbuf_add_ref(dn->dn_dbuf, NULL); | |
3230 | *parentp = dn->dn_dbuf; | |
3231 | } | |
3232 | *bpp = &dn->dn_phys->dn_blkptr[blkid]; | |
3233 | return (0); | |
3234 | } | |
3235 | } | |
3236 | ||
3237 | static dmu_buf_impl_t * | |
3238 | dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, | |
3236c0b8 | 3239 | dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash) |
34dc7c2f | 3240 | { |
428870ff | 3241 | objset_t *os = dn->dn_objset; |
34dc7c2f BB |
3242 | dmu_buf_impl_t *db, *odb; |
3243 | ||
3244 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); | |
3245 | ASSERT(dn->dn_type != DMU_OT_NONE); | |
3246 | ||
d3c2ae1c | 3247 | db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); |
34dc7c2f | 3248 | |
cccbed9f MM |
3249 | list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t), |
3250 | offsetof(dbuf_dirty_record_t, dr_dbuf_node)); | |
3251 | ||
34dc7c2f BB |
3252 | db->db_objset = os; |
3253 | db->db.db_object = dn->dn_object; | |
3254 | db->db_level = level; | |
3255 | db->db_blkid = blkid; | |
34dc7c2f | 3256 | db->db_dirtycnt = 0; |
572e2857 | 3257 | db->db_dnode_handle = dn->dn_handle; |
34dc7c2f BB |
3258 | db->db_parent = parent; |
3259 | db->db_blkptr = blkptr; | |
3236c0b8 | 3260 | db->db_hash = hash; |
34dc7c2f | 3261 | |
0c66c32d | 3262 | db->db_user = NULL; |
bc4501f7 JG |
3263 | db->db_user_immediate_evict = FALSE; |
3264 | db->db_freed_in_flight = FALSE; | |
3265 | db->db_pending_evict = FALSE; | |
34dc7c2f | 3266 | |
428870ff | 3267 | if (blkid == DMU_BONUS_BLKID) { |
34dc7c2f | 3268 | ASSERT3P(parent, ==, dn->dn_dbuf); |
50c957f7 | 3269 | db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - |
34dc7c2f BB |
3270 | (dn->dn_nblkptr-1) * sizeof (blkptr_t); |
3271 | ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); | |
428870ff | 3272 | db->db.db_offset = DMU_BONUS_BLKID; |
34dc7c2f | 3273 | db->db_state = DB_UNCACHED; |
8b3547a4 | 3274 | DTRACE_SET_STATE(db, "bonus buffer created"); |
2e5dc449 | 3275 | db->db_caching_status = DB_NO_CACHE; |
34dc7c2f | 3276 | /* the bonus dbuf is not placed in the hash table */ |
25458cbe | 3277 | arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); |
34dc7c2f | 3278 | return (db); |
428870ff BB |
3279 | } else if (blkid == DMU_SPILL_BLKID) { |
3280 | db->db.db_size = (blkptr != NULL) ? | |
3281 | BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; | |
3282 | db->db.db_offset = 0; | |
34dc7c2f BB |
3283 | } else { |
3284 | int blocksize = | |
e8b96c60 | 3285 | db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; |
34dc7c2f BB |
3286 | db->db.db_size = blocksize; |
3287 | db->db.db_offset = db->db_blkid * blocksize; | |
3288 | } | |
3289 | ||
3290 | /* | |
3291 | * Hold the dn_dbufs_mtx while we get the new dbuf | |
3292 | * in the hash table *and* added to the dbufs list. | |
3293 | * This prevents a possible deadlock with someone | |
e1cfd73f | 3294 | * trying to look up this dbuf before it's added to the |
34dc7c2f BB |
3295 | * dn_dbufs list. |
3296 | */ | |
3297 | mutex_enter(&dn->dn_dbufs_mtx); | |
8b3547a4 | 3298 | db->db_state = DB_EVICTING; /* not worth logging this state change */ |
34dc7c2f BB |
3299 | if ((odb = dbuf_hash_insert(db)) != NULL) { |
3300 | /* someone else inserted it first */ | |
34dc7c2f | 3301 | mutex_exit(&dn->dn_dbufs_mtx); |
490c845e | 3302 | kmem_cache_free(dbuf_kmem_cache, db); |
5e021f56 | 3303 | DBUF_STAT_BUMP(hash_insert_race); |
34dc7c2f BB |
3304 | return (odb); |
3305 | } | |
8951cb8d | 3306 | avl_add(&dn->dn_dbufs, db); |
9c9531cb | 3307 | |
34dc7c2f | 3308 | db->db_state = DB_UNCACHED; |
8b3547a4 | 3309 | DTRACE_SET_STATE(db, "regular buffer created"); |
2e5dc449 | 3310 | db->db_caching_status = DB_NO_CACHE; |
34dc7c2f | 3311 | mutex_exit(&dn->dn_dbufs_mtx); |
25458cbe | 3312 | arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); |
34dc7c2f BB |
3313 | |
3314 | if (parent && parent != dn->dn_dbuf) | |
3315 | dbuf_add_ref(parent, db); | |
3316 | ||
3317 | ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || | |
424fd7c3 | 3318 | zfs_refcount_count(&dn->dn_holds) > 0); |
c13060e4 | 3319 | (void) zfs_refcount_add(&dn->dn_holds, db); |
34dc7c2f BB |
3320 | |
3321 | dprintf_dbuf(db, "db=%p\n", db); | |
3322 | ||
3323 | return (db); | |
3324 | } | |
3325 | ||
30af21b0 PD |
3326 | /* |
3327 | * This function returns a block pointer and information about the object, | |
3328 | * given a dnode and a block. This is a publicly accessible version of | |
3329 | * dbuf_findbp that only returns some information, rather than the | |
3330 | * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock | |
3331 | * should be locked as (at least) a reader. | |
3332 | */ | |
3333 | int | |
3334 | dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid, | |
3335 | blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift) | |
3336 | { | |
3337 | dmu_buf_impl_t *dbp = NULL; | |
3338 | blkptr_t *bp2; | |
3339 | int err = 0; | |
3340 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); | |
3341 | ||
3342 | err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2); | |
3343 | if (err == 0) { | |
f9e10922 | 3344 | ASSERT3P(bp2, !=, NULL); |
30af21b0 PD |
3345 | *bp = *bp2; |
3346 | if (dbp != NULL) | |
3347 | dbuf_rele(dbp, NULL); | |
3348 | if (datablkszsec != NULL) | |
3349 | *datablkszsec = dn->dn_phys->dn_datablkszsec; | |
3350 | if (indblkshift != NULL) | |
3351 | *indblkshift = dn->dn_phys->dn_indblkshift; | |
3352 | } | |
3353 | ||
3354 | return (err); | |
3355 | } | |
3356 | ||
fcff0f35 PD |
3357 | typedef struct dbuf_prefetch_arg { |
3358 | spa_t *dpa_spa; /* The spa to issue the prefetch in. */ | |
3359 | zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ | |
3360 | int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ | |
3361 | int dpa_curlevel; /* The current level that we're reading */ | |
d3c2ae1c | 3362 | dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ |
fcff0f35 PD |
3363 | zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ |
3364 | zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ | |
3365 | arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ | |
af20b970 MM |
3366 | dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */ |
3367 | void *dpa_arg; /* prefetch completion arg */ | |
fcff0f35 PD |
3368 | } dbuf_prefetch_arg_t; |
3369 | ||
af20b970 MM |
3370 | static void |
3371 | dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done) | |
3372 | { | |
6aa8c21a AM |
3373 | if (dpa->dpa_cb != NULL) { |
3374 | dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level, | |
3375 | dpa->dpa_zb.zb_blkid, io_done); | |
3376 | } | |
af20b970 MM |
3377 | kmem_free(dpa, sizeof (*dpa)); |
3378 | } | |
3379 | ||
3380 | static void | |
3381 | dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb, | |
3382 | const blkptr_t *iobp, arc_buf_t *abuf, void *private) | |
3383 | { | |
14e4e3cb | 3384 | (void) zio, (void) zb, (void) iobp; |
af20b970 MM |
3385 | dbuf_prefetch_arg_t *dpa = private; |
3386 | ||
af20b970 MM |
3387 | if (abuf != NULL) |
3388 | arc_buf_destroy(abuf, private); | |
9619bcde BB |
3389 | |
3390 | dbuf_prefetch_fini(dpa, B_TRUE); | |
af20b970 MM |
3391 | } |
3392 | ||
fcff0f35 PD |
3393 | /* |
3394 | * Actually issue the prefetch read for the block given. | |
3395 | */ | |
3396 | static void | |
3397 | dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) | |
3398 | { | |
30af21b0 PD |
3399 | ASSERT(!BP_IS_REDACTED(bp) || |
3400 | dsl_dataset_feature_is_active( | |
3401 | dpa->dpa_dnode->dn_objset->os_dsl_dataset, | |
3402 | SPA_FEATURE_REDACTED_DATASETS)); | |
3403 | ||
3404 | if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp)) | |
af20b970 | 3405 | return (dbuf_prefetch_fini(dpa, B_FALSE)); |
fcff0f35 | 3406 | |
4515b1d0 | 3407 | int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; |
1c27024e | 3408 | arc_flags_t aflags = |
1e4732cb MM |
3409 | dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | |
3410 | ARC_FLAG_NO_BUF; | |
fcff0f35 | 3411 | |
4515b1d0 TC |
3412 | /* dnodes are always read as raw and then converted later */ |
3413 | if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) && | |
3414 | dpa->dpa_curlevel == 0) | |
3415 | zio_flags |= ZIO_FLAG_RAW; | |
3416 | ||
fcff0f35 PD |
3417 | ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); |
3418 | ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); | |
3419 | ASSERT(dpa->dpa_zio != NULL); | |
af20b970 MM |
3420 | (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, |
3421 | dbuf_issue_final_prefetch_done, dpa, | |
4515b1d0 | 3422 | dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb); |
fcff0f35 PD |
3423 | } |
3424 | ||
3425 | /* | |
3426 | * Called when an indirect block above our prefetch target is read in. This | |
3427 | * will either read in the next indirect block down the tree or issue the actual | |
3428 | * prefetch if the next block down is our target. | |
3429 | */ | |
3430 | static void | |
d4a72f23 TC |
3431 | dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, |
3432 | const blkptr_t *iobp, arc_buf_t *abuf, void *private) | |
fcff0f35 | 3433 | { |
14e4e3cb | 3434 | (void) zb, (void) iobp; |
fcff0f35 | 3435 | dbuf_prefetch_arg_t *dpa = private; |
fcff0f35 PD |
3436 | |
3437 | ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); | |
3438 | ASSERT3S(dpa->dpa_curlevel, >, 0); | |
d3c2ae1c | 3439 | |
c3bd3fb4 TC |
3440 | if (abuf == NULL) { |
3441 | ASSERT(zio == NULL || zio->io_error != 0); | |
d954ca19 RY |
3442 | dbuf_prefetch_fini(dpa, B_TRUE); |
3443 | return; | |
c3bd3fb4 TC |
3444 | } |
3445 | ASSERT(zio == NULL || zio->io_error == 0); | |
3446 | ||
d3c2ae1c GW |
3447 | /* |
3448 | * The dpa_dnode is only valid if we are called with a NULL | |
3449 | * zio. This indicates that the arc_read() returned without | |
3450 | * first calling zio_read() to issue a physical read. Once | |
3451 | * a physical read is made the dpa_dnode must be invalidated | |
3452 | * as the locks guarding it may have been dropped. If the | |
3453 | * dpa_dnode is still valid, then we want to add it to the dbuf | |
3454 | * cache. To do so, we must hold the dbuf associated with the block | |
3455 | * we just prefetched, read its contents so that we associate it | |
3456 | * with an arc_buf_t, and then release it. | |
3457 | */ | |
fcff0f35 PD |
3458 | if (zio != NULL) { |
3459 | ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); | |
b5256303 | 3460 | if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) { |
d3c2ae1c GW |
3461 | ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); |
3462 | } else { | |
3463 | ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); | |
3464 | } | |
fcff0f35 | 3465 | ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); |
d3c2ae1c GW |
3466 | |
3467 | dpa->dpa_dnode = NULL; | |
3468 | } else if (dpa->dpa_dnode != NULL) { | |
3469 | uint64_t curblkid = dpa->dpa_zb.zb_blkid >> | |
3470 | (dpa->dpa_epbs * (dpa->dpa_curlevel - | |
3471 | dpa->dpa_zb.zb_level)); | |
3472 | dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, | |
3473 | dpa->dpa_curlevel, curblkid, FTAG); | |
305781da | 3474 | if (db == NULL) { |
305781da | 3475 | arc_buf_destroy(abuf, private); |
d954ca19 RY |
3476 | dbuf_prefetch_fini(dpa, B_TRUE); |
3477 | return; | |
305781da | 3478 | } |
d3c2ae1c GW |
3479 | (void) dbuf_read(db, NULL, |
3480 | DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); | |
3481 | dbuf_rele(db, FTAG); | |
fcff0f35 PD |
3482 | } |
3483 | ||
d4a72f23 | 3484 | dpa->dpa_curlevel--; |
1c27024e | 3485 | uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> |
fcff0f35 | 3486 | (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); |
1c27024e | 3487 | blkptr_t *bp = ((blkptr_t *)abuf->b_data) + |
fcff0f35 | 3488 | P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); |
d4a72f23 | 3489 | |
97fac0fb | 3490 | ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode && |
30af21b0 PD |
3491 | dsl_dataset_feature_is_active( |
3492 | dpa->dpa_dnode->dn_objset->os_dsl_dataset, | |
97fac0fb | 3493 | SPA_FEATURE_REDACTED_DATASETS))); |
30af21b0 | 3494 | if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) { |
d954ca19 | 3495 | arc_buf_destroy(abuf, private); |
af20b970 | 3496 | dbuf_prefetch_fini(dpa, B_TRUE); |
d954ca19 | 3497 | return; |
fcff0f35 PD |
3498 | } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { |
3499 | ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); | |
3500 | dbuf_issue_final_prefetch(dpa, bp); | |
fcff0f35 PD |
3501 | } else { |
3502 | arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; | |
3503 | zbookmark_phys_t zb; | |
3504 | ||
7c351e31 | 3505 | /* flag if L2ARC eligible, l2arc_noprefetch then decides */ |
3506 | if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) | |
3507 | iter_aflags |= ARC_FLAG_L2CACHE; | |
3508 | ||
fcff0f35 PD |
3509 | ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); |
3510 | ||
3511 | SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, | |
3512 | dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); | |
3513 | ||
3514 | (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, | |
6aa8c21a AM |
3515 | bp, dbuf_prefetch_indirect_done, dpa, |
3516 | ZIO_PRIORITY_SYNC_READ, | |
fcff0f35 PD |
3517 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, |
3518 | &iter_aflags, &zb); | |
3519 | } | |
d3c2ae1c GW |
3520 | |
3521 | arc_buf_destroy(abuf, private); | |
fcff0f35 PD |
3522 | } |
3523 | ||
3524 | /* | |
3525 | * Issue prefetch reads for the given block on the given level. If the indirect | |
3526 | * blocks above that block are not in memory, we will read them in | |
3527 | * asynchronously. As a result, this call never blocks waiting for a read to | |
b5256303 TC |
3528 | * complete. Note that the prefetch might fail if the dataset is encrypted and |
3529 | * the encryption key is unmapped before the IO completes. | |
fcff0f35 | 3530 | */ |
af20b970 MM |
3531 | int |
3532 | dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid, | |
3533 | zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb, | |
3534 | void *arg) | |
34dc7c2f | 3535 | { |
fcff0f35 PD |
3536 | blkptr_t bp; |
3537 | int epbs, nlevels, curlevel; | |
3538 | uint64_t curblkid; | |
34dc7c2f | 3539 | |
428870ff | 3540 | ASSERT(blkid != DMU_BONUS_BLKID); |
34dc7c2f BB |
3541 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); |
3542 | ||
7f60329a | 3543 | if (blkid > dn->dn_maxblkid) |
af20b970 | 3544 | goto no_issue; |
7f60329a | 3545 | |
f664f1ee | 3546 | if (level == 0 && dnode_block_freed(dn, blkid)) |
af20b970 | 3547 | goto no_issue; |
34dc7c2f | 3548 | |
fcff0f35 PD |
3549 | /* |
3550 | * This dnode hasn't been written to disk yet, so there's nothing to | |
3551 | * prefetch. | |
3552 | */ | |
3553 | nlevels = dn->dn_phys->dn_nlevels; | |
3554 | if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) | |
af20b970 | 3555 | goto no_issue; |
fcff0f35 PD |
3556 | |
3557 | epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
3558 | if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) | |
af20b970 | 3559 | goto no_issue; |
fcff0f35 | 3560 | |
1c27024e | 3561 | dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, |
3236c0b8 | 3562 | level, blkid, NULL); |
fcff0f35 PD |
3563 | if (db != NULL) { |
3564 | mutex_exit(&db->db_mtx); | |
572e2857 | 3565 | /* |
fcff0f35 PD |
3566 | * This dbuf already exists. It is either CACHED, or |
3567 | * (we assume) about to be read or filled. | |
572e2857 | 3568 | */ |
af20b970 | 3569 | goto no_issue; |
34dc7c2f BB |
3570 | } |
3571 | ||
fcff0f35 PD |
3572 | /* |
3573 | * Find the closest ancestor (indirect block) of the target block | |
3574 | * that is present in the cache. In this indirect block, we will | |
3575 | * find the bp that is at curlevel, curblkid. | |
3576 | */ | |
3577 | curlevel = level; | |
3578 | curblkid = blkid; | |
3579 | while (curlevel < nlevels - 1) { | |
3580 | int parent_level = curlevel + 1; | |
3581 | uint64_t parent_blkid = curblkid >> epbs; | |
3582 | dmu_buf_impl_t *db; | |
3583 | ||
3584 | if (dbuf_hold_impl(dn, parent_level, parent_blkid, | |
3585 | FALSE, TRUE, FTAG, &db) == 0) { | |
3586 | blkptr_t *bpp = db->db_buf->b_data; | |
3587 | bp = bpp[P2PHASE(curblkid, 1 << epbs)]; | |
3588 | dbuf_rele(db, FTAG); | |
3589 | break; | |
3590 | } | |
428870ff | 3591 | |
fcff0f35 PD |
3592 | curlevel = parent_level; |
3593 | curblkid = parent_blkid; | |
3594 | } | |
34dc7c2f | 3595 | |
fcff0f35 PD |
3596 | if (curlevel == nlevels - 1) { |
3597 | /* No cached indirect blocks found. */ | |
3598 | ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); | |
3599 | bp = dn->dn_phys->dn_blkptr[curblkid]; | |
34dc7c2f | 3600 | } |
30af21b0 PD |
3601 | ASSERT(!BP_IS_REDACTED(&bp) || |
3602 | dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset, | |
3603 | SPA_FEATURE_REDACTED_DATASETS)); | |
3604 | if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp)) | |
af20b970 | 3605 | goto no_issue; |
fcff0f35 PD |
3606 | |
3607 | ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); | |
3608 | ||
1c27024e | 3609 | zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, |
fcff0f35 PD |
3610 | ZIO_FLAG_CANFAIL); |
3611 | ||
1c27024e DB |
3612 | dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); |
3613 | dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; | |
fcff0f35 PD |
3614 | SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, |
3615 | dn->dn_object, level, blkid); | |
3616 | dpa->dpa_curlevel = curlevel; | |
3617 | dpa->dpa_prio = prio; | |
3618 | dpa->dpa_aflags = aflags; | |
3619 | dpa->dpa_spa = dn->dn_objset->os_spa; | |
d3c2ae1c | 3620 | dpa->dpa_dnode = dn; |
fcff0f35 PD |
3621 | dpa->dpa_epbs = epbs; |
3622 | dpa->dpa_zio = pio; | |
af20b970 MM |
3623 | dpa->dpa_cb = cb; |
3624 | dpa->dpa_arg = arg; | |
fcff0f35 | 3625 | |
ed2f7ba0 AM |
3626 | if (!DNODE_LEVEL_IS_CACHEABLE(dn, level)) |
3627 | dpa->dpa_aflags |= ARC_FLAG_UNCACHED; | |
3628 | else if (dnode_level_is_l2cacheable(&bp, dn, level)) | |
7c351e31 | 3629 | dpa->dpa_aflags |= ARC_FLAG_L2CACHE; |
3630 | ||
fcff0f35 PD |
3631 | /* |
3632 | * If we have the indirect just above us, no need to do the asynchronous | |
3633 | * prefetch chain; we'll just run the last step ourselves. If we're at | |
3634 | * a higher level, though, we want to issue the prefetches for all the | |
3635 | * indirect blocks asynchronously, so we can go on with whatever we were | |
3636 | * doing. | |
3637 | */ | |
3638 | if (curlevel == level) { | |
3639 | ASSERT3U(curblkid, ==, blkid); | |
3640 | dbuf_issue_final_prefetch(dpa, &bp); | |
fcff0f35 PD |
3641 | } else { |
3642 | arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; | |
3643 | zbookmark_phys_t zb; | |
3644 | ||
7c351e31 | 3645 | /* flag if L2ARC eligible, l2arc_noprefetch then decides */ |
c9d62d13 | 3646 | if (dnode_level_is_l2cacheable(&bp, dn, level)) |
7c351e31 | 3647 | iter_aflags |= ARC_FLAG_L2CACHE; |
3648 | ||
fcff0f35 PD |
3649 | SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, |
3650 | dn->dn_object, curlevel, curblkid); | |
3651 | (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, | |
6aa8c21a AM |
3652 | &bp, dbuf_prefetch_indirect_done, dpa, |
3653 | ZIO_PRIORITY_SYNC_READ, | |
fcff0f35 PD |
3654 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, |
3655 | &iter_aflags, &zb); | |
3656 | } | |
3657 | /* | |
3658 | * We use pio here instead of dpa_zio since it's possible that | |
3659 | * dpa may have already been freed. | |
3660 | */ | |
3661 | zio_nowait(pio); | |
af20b970 MM |
3662 | return (1); |
3663 | no_issue: | |
3664 | if (cb != NULL) | |
6aa8c21a | 3665 | cb(arg, level, blkid, B_FALSE); |
af20b970 MM |
3666 | return (0); |
3667 | } | |
3668 | ||
3669 | int | |
3670 | dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, | |
3671 | arc_flags_t aflags) | |
3672 | { | |
3673 | ||
3674 | return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL)); | |
34dc7c2f BB |
3675 | } |
3676 | ||
71a24c3c | 3677 | /* |
64b6c47d | 3678 | * Helper function for dbuf_hold_impl() to copy a buffer. Handles |
71a24c3c TC |
3679 | * the case of encrypted, compressed and uncompressed buffers by |
3680 | * allocating the new buffer, respectively, with arc_alloc_raw_buf(), | |
3681 | * arc_alloc_compressed_buf() or arc_alloc_buf().* | |
3682 | * | |
64b6c47d | 3683 | * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl(). |
71a24c3c TC |
3684 | */ |
3685 | noinline static void | |
64b6c47d | 3686 | dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db) |
71a24c3c | 3687 | { |
64b6c47d | 3688 | dbuf_dirty_record_t *dr = db->db_data_pending; |
a81b8124 AR |
3689 | arc_buf_t *data = dr->dt.dl.dr_data; |
3690 | enum zio_compress compress_type = arc_get_compression(data); | |
3691 | uint8_t complevel = arc_get_complevel(data); | |
3692 | ||
3693 | if (arc_is_encrypted(data)) { | |
3694 | boolean_t byteorder; | |
3695 | uint8_t salt[ZIO_DATA_SALT_LEN]; | |
3696 | uint8_t iv[ZIO_DATA_IV_LEN]; | |
3697 | uint8_t mac[ZIO_DATA_MAC_LEN]; | |
3698 | ||
3699 | arc_get_raw_params(data, &byteorder, salt, iv, mac); | |
3700 | dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db, | |
3701 | dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac, | |
3702 | dn->dn_type, arc_buf_size(data), arc_buf_lsize(data), | |
3703 | compress_type, complevel)); | |
3704 | } else if (compress_type != ZIO_COMPRESS_OFF) { | |
3705 | dbuf_set_data(db, arc_alloc_compressed_buf( | |
3706 | dn->dn_objset->os_spa, db, arc_buf_size(data), | |
3707 | arc_buf_lsize(data), compress_type, complevel)); | |
3708 | } else { | |
3709 | dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db, | |
3710 | DBUF_GET_BUFC_TYPE(db), db->db.db_size)); | |
3711 | } | |
71a24c3c | 3712 | |
f664f1ee | 3713 | rw_enter(&db->db_rwlock, RW_WRITER); |
861166b0 | 3714 | memcpy(db->db.db_data, data->b_data, arc_buf_size(data)); |
f664f1ee | 3715 | rw_exit(&db->db_rwlock); |
71a24c3c TC |
3716 | } |
3717 | ||
34dc7c2f BB |
3718 | /* |
3719 | * Returns with db_holds incremented, and db_mtx not held. | |
3720 | * Note: dn_struct_rwlock must be held. | |
3721 | */ | |
64b6c47d TN |
3722 | int |
3723 | dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, | |
3724 | boolean_t fail_sparse, boolean_t fail_uncached, | |
a926aab9 | 3725 | const void *tag, dmu_buf_impl_t **dbp) |
34dc7c2f | 3726 | { |
64b6c47d | 3727 | dmu_buf_impl_t *db, *parent = NULL; |
3236c0b8 | 3728 | uint64_t hv; |
d3c2ae1c | 3729 | |
37f03da8 | 3730 | /* If the pool has been created, verify the tx_sync_lock is not held */ |
64b6c47d | 3731 | spa_t *spa = dn->dn_objset->os_spa; |
37f03da8 SH |
3732 | dsl_pool_t *dp = spa->spa_dsl_pool; |
3733 | if (dp != NULL) { | |
3734 | ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock)); | |
3735 | } | |
3736 | ||
64b6c47d TN |
3737 | ASSERT(blkid != DMU_BONUS_BLKID); |
3738 | ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); | |
3739 | ASSERT3U(dn->dn_nlevels, >, level); | |
3740 | ||
3741 | *dbp = NULL; | |
3742 | ||
34dc7c2f | 3743 | /* dbuf_find() returns with db_mtx held */ |
3236c0b8 | 3744 | db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv); |
fc5bb51f | 3745 | |
64b6c47d TN |
3746 | if (db == NULL) { |
3747 | blkptr_t *bp = NULL; | |
3748 | int err; | |
fc5bb51f | 3749 | |
64b6c47d | 3750 | if (fail_uncached) |
fcff0f35 PD |
3751 | return (SET_ERROR(ENOENT)); |
3752 | ||
64b6c47d TN |
3753 | ASSERT3P(parent, ==, NULL); |
3754 | err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); | |
3755 | if (fail_sparse) { | |
3756 | if (err == 0 && bp && BP_IS_HOLE(bp)) | |
3757 | err = SET_ERROR(ENOENT); | |
3758 | if (err) { | |
3759 | if (parent) | |
3760 | dbuf_rele(parent, NULL); | |
3761 | return (err); | |
34dc7c2f BB |
3762 | } |
3763 | } | |
64b6c47d TN |
3764 | if (err && err != ENOENT) |
3765 | return (err); | |
3236c0b8 | 3766 | db = dbuf_create(dn, level, blkid, parent, bp, hv); |
34dc7c2f BB |
3767 | } |
3768 | ||
64b6c47d TN |
3769 | if (fail_uncached && db->db_state != DB_CACHED) { |
3770 | mutex_exit(&db->db_mtx); | |
fcff0f35 PD |
3771 | return (SET_ERROR(ENOENT)); |
3772 | } | |
3773 | ||
64b6c47d TN |
3774 | if (db->db_buf != NULL) { |
3775 | arc_buf_access(db->db_buf); | |
3776 | ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); | |
0873bb63 | 3777 | } |
34dc7c2f | 3778 | |
64b6c47d | 3779 | ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); |
34dc7c2f BB |
3780 | |
3781 | /* | |
e1cfd73f | 3782 | * If this buffer is currently syncing out, and we are |
34dc7c2f BB |
3783 | * still referencing it from db_data, we need to make a copy |
3784 | * of it in case we decide we want to dirty it again in this txg. | |
3785 | */ | |
64b6c47d TN |
3786 | if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && |
3787 | dn->dn_object != DMU_META_DNODE_OBJECT && | |
3788 | db->db_state == DB_CACHED && db->db_data_pending) { | |
3789 | dbuf_dirty_record_t *dr = db->db_data_pending; | |
5cc49509 RY |
3790 | if (dr->dt.dl.dr_data == db->db_buf) { |
3791 | ASSERT3P(db->db_buf, !=, NULL); | |
64b6c47d | 3792 | dbuf_hold_copy(dn, db); |
5cc49509 | 3793 | } |
64b6c47d TN |
3794 | } |
3795 | ||
3796 | if (multilist_link_active(&db->db_cache_link)) { | |
3797 | ASSERT(zfs_refcount_is_zero(&db->db_holds)); | |
3798 | ASSERT(db->db_caching_status == DB_DBUF_CACHE || | |
3799 | db->db_caching_status == DB_DBUF_METADATA_CACHE); | |
3800 | ||
ffdf019c | 3801 | multilist_remove(&dbuf_caches[db->db_caching_status].cache, db); |
92dc4ad8 RN |
3802 | |
3803 | uint64_t size = db->db.db_size + dmu_buf_user_size(&db->db); | |
424fd7c3 | 3804 | (void) zfs_refcount_remove_many( |
92dc4ad8 | 3805 | &dbuf_caches[db->db_caching_status].size, size, db); |
2e5dc449 | 3806 | |
64b6c47d | 3807 | if (db->db_caching_status == DB_DBUF_METADATA_CACHE) { |
2e5dc449 MA |
3808 | DBUF_STAT_BUMPDOWN(metadata_cache_count); |
3809 | } else { | |
64b6c47d | 3810 | DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); |
2e5dc449 | 3811 | DBUF_STAT_BUMPDOWN(cache_count); |
92dc4ad8 | 3812 | DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size); |
2e5dc449 | 3813 | } |
64b6c47d | 3814 | db->db_caching_status = DB_NO_CACHE; |
d3c2ae1c | 3815 | } |
64b6c47d TN |
3816 | (void) zfs_refcount_add(&db->db_holds, tag); |
3817 | DBUF_VERIFY(db); | |
3818 | mutex_exit(&db->db_mtx); | |
34dc7c2f BB |
3819 | |
3820 | /* NOTE: we can't rele the parent until after we drop the db_mtx */ | |
64b6c47d TN |
3821 | if (parent) |
3822 | dbuf_rele(parent, NULL); | |
34dc7c2f | 3823 | |
64b6c47d TN |
3824 | ASSERT3P(DB_DNODE(db), ==, dn); |
3825 | ASSERT3U(db->db_blkid, ==, blkid); | |
3826 | ASSERT3U(db->db_level, ==, level); | |
3827 | *dbp = db; | |
34dc7c2f BB |
3828 | |
3829 | return (0); | |
3830 | } | |
3831 | ||
3832 | dmu_buf_impl_t * | |
a926aab9 | 3833 | dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag) |
34dc7c2f | 3834 | { |
fcff0f35 | 3835 | return (dbuf_hold_level(dn, 0, blkid, tag)); |
34dc7c2f BB |
3836 | } |
3837 | ||
3838 | dmu_buf_impl_t * | |
a926aab9 | 3839 | dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag) |
34dc7c2f BB |
3840 | { |
3841 | dmu_buf_impl_t *db; | |
fcff0f35 | 3842 | int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); |
34dc7c2f BB |
3843 | return (err ? NULL : db); |
3844 | } | |
3845 | ||
3846 | void | |
3847 | dbuf_create_bonus(dnode_t *dn) | |
3848 | { | |
3849 | ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); | |
3850 | ||
3851 | ASSERT(dn->dn_bonus == NULL); | |
3236c0b8 RY |
3852 | dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL, |
3853 | dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID)); | |
428870ff BB |
3854 | } |
3855 | ||
3856 | int | |
3857 | dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) | |
3858 | { | |
3859 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
572e2857 | 3860 | |
428870ff | 3861 | if (db->db_blkid != DMU_SPILL_BLKID) |
2e528b49 | 3862 | return (SET_ERROR(ENOTSUP)); |
428870ff BB |
3863 | if (blksz == 0) |
3864 | blksz = SPA_MINBLOCKSIZE; | |
f1512ee6 MA |
3865 | ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); |
3866 | blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); | |
428870ff | 3867 | |
428870ff | 3868 | dbuf_new_size(db, blksz, tx); |
428870ff BB |
3869 | |
3870 | return (0); | |
3871 | } | |
3872 | ||
3873 | void | |
3874 | dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) | |
3875 | { | |
3876 | dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); | |
34dc7c2f BB |
3877 | } |
3878 | ||
3879 | #pragma weak dmu_buf_add_ref = dbuf_add_ref | |
3880 | void | |
a926aab9 | 3881 | dbuf_add_ref(dmu_buf_impl_t *db, const void *tag) |
34dc7c2f | 3882 | { |
c13060e4 | 3883 | int64_t holds = zfs_refcount_add(&db->db_holds, tag); |
d3c2ae1c | 3884 | VERIFY3S(holds, >, 1); |
34dc7c2f BB |
3885 | } |
3886 | ||
6ebebace JG |
3887 | #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref |
3888 | boolean_t | |
3889 | dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, | |
a926aab9 | 3890 | const void *tag) |
6ebebace JG |
3891 | { |
3892 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
3893 | dmu_buf_impl_t *found_db; | |
3894 | boolean_t result = B_FALSE; | |
3895 | ||
d617648c | 3896 | if (blkid == DMU_BONUS_BLKID) |
6ebebace JG |
3897 | found_db = dbuf_find_bonus(os, obj); |
3898 | else | |
3236c0b8 | 3899 | found_db = dbuf_find(os, obj, 0, blkid, NULL); |
6ebebace JG |
3900 | |
3901 | if (found_db != NULL) { | |
3902 | if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { | |
c13060e4 | 3903 | (void) zfs_refcount_add(&db->db_holds, tag); |
6ebebace JG |
3904 | result = B_TRUE; |
3905 | } | |
d617648c | 3906 | mutex_exit(&found_db->db_mtx); |
6ebebace JG |
3907 | } |
3908 | return (result); | |
3909 | } | |
3910 | ||
572e2857 BB |
3911 | /* |
3912 | * If you call dbuf_rele() you had better not be referencing the dnode handle | |
3913 | * unless you have some other direct or indirect hold on the dnode. (An indirect | |
3914 | * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) | |
3915 | * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the | |
3916 | * dnode's parent dbuf evicting its dnode handles. | |
3917 | */ | |
34dc7c2f | 3918 | void |
a926aab9 | 3919 | dbuf_rele(dmu_buf_impl_t *db, const void *tag) |
428870ff BB |
3920 | { |
3921 | mutex_enter(&db->db_mtx); | |
3d503a76 | 3922 | dbuf_rele_and_unlock(db, tag, B_FALSE); |
428870ff BB |
3923 | } |
3924 | ||
b0bc7a84 | 3925 | void |
a926aab9 | 3926 | dmu_buf_rele(dmu_buf_t *db, const void *tag) |
b0bc7a84 MG |
3927 | { |
3928 | dbuf_rele((dmu_buf_impl_t *)db, tag); | |
3929 | } | |
3930 | ||
428870ff BB |
3931 | /* |
3932 | * dbuf_rele() for an already-locked dbuf. This is necessary to allow | |
1fac63e5 MA |
3933 | * db_dirtycnt and db_holds to be updated atomically. The 'evicting' |
3934 | * argument should be set if we are already in the dbuf-evicting code | |
3935 | * path, in which case we don't want to recursively evict. This allows us to | |
3936 | * avoid deeply nested stacks that would have a call flow similar to this: | |
3937 | * | |
3938 | * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() | |
3939 | * ^ | | |
3940 | * | | | |
3941 | * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ | |
3942 | * | |
428870ff BB |
3943 | */ |
3944 | void | |
a926aab9 | 3945 | dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting) |
34dc7c2f BB |
3946 | { |
3947 | int64_t holds; | |
cbd8f5b7 | 3948 | uint64_t size; |
34dc7c2f | 3949 | |
428870ff | 3950 | ASSERT(MUTEX_HELD(&db->db_mtx)); |
34dc7c2f BB |
3951 | DBUF_VERIFY(db); |
3952 | ||
572e2857 BB |
3953 | /* |
3954 | * Remove the reference to the dbuf before removing its hold on the | |
3955 | * dnode so we can guarantee in dnode_move() that a referenced bonus | |
3956 | * buffer has a corresponding dnode hold. | |
3957 | */ | |
424fd7c3 | 3958 | holds = zfs_refcount_remove(&db->db_holds, tag); |
34dc7c2f BB |
3959 | ASSERT(holds >= 0); |
3960 | ||
3961 | /* | |
3962 | * We can't freeze indirects if there is a possibility that they | |
3963 | * may be modified in the current syncing context. | |
3964 | */ | |
d3c2ae1c GW |
3965 | if (db->db_buf != NULL && |
3966 | holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { | |
34dc7c2f | 3967 | arc_buf_freeze(db->db_buf); |
d3c2ae1c | 3968 | } |
34dc7c2f BB |
3969 | |
3970 | if (holds == db->db_dirtycnt && | |
bc4501f7 | 3971 | db->db_level == 0 && db->db_user_immediate_evict) |
34dc7c2f BB |
3972 | dbuf_evict_user(db); |
3973 | ||
3974 | if (holds == 0) { | |
428870ff | 3975 | if (db->db_blkid == DMU_BONUS_BLKID) { |
4c7b7eed | 3976 | dnode_t *dn; |
bc4501f7 | 3977 | boolean_t evict_dbuf = db->db_pending_evict; |
572e2857 BB |
3978 | |
3979 | /* | |
4c7b7eed JG |
3980 | * If the dnode moves here, we cannot cross this |
3981 | * barrier until the move completes. | |
572e2857 BB |
3982 | */ |
3983 | DB_DNODE_ENTER(db); | |
4c7b7eed JG |
3984 | |
3985 | dn = DB_DNODE(db); | |
3986 | atomic_dec_32(&dn->dn_dbufs_count); | |
3987 | ||
3988 | /* | |
3989 | * Decrementing the dbuf count means that the bonus | |
3990 | * buffer's dnode hold is no longer discounted in | |
3991 | * dnode_move(). The dnode cannot move until after | |
bc4501f7 | 3992 | * the dnode_rele() below. |
4c7b7eed | 3993 | */ |
572e2857 | 3994 | DB_DNODE_EXIT(db); |
4c7b7eed JG |
3995 | |
3996 | /* | |
3997 | * Do not reference db after its lock is dropped. | |
3998 | * Another thread may evict it. | |
3999 | */ | |
4000 | mutex_exit(&db->db_mtx); | |
4001 | ||
bc4501f7 | 4002 | if (evict_dbuf) |
4c7b7eed | 4003 | dnode_evict_bonus(dn); |
bc4501f7 JG |
4004 | |
4005 | dnode_rele(dn, db); | |
34dc7c2f BB |
4006 | } else if (db->db_buf == NULL) { |
4007 | /* | |
4008 | * This is a special case: we never associated this | |
4009 | * dbuf with any data allocated from the ARC. | |
4010 | */ | |
b128c09f BB |
4011 | ASSERT(db->db_state == DB_UNCACHED || |
4012 | db->db_state == DB_NOFILL); | |
d3c2ae1c | 4013 | dbuf_destroy(db); |
34dc7c2f | 4014 | } else if (arc_released(db->db_buf)) { |
34dc7c2f BB |
4015 | /* |
4016 | * This dbuf has anonymous data associated with it. | |
4017 | */ | |
d3c2ae1c | 4018 | dbuf_destroy(db); |
ed2f7ba0 AM |
4019 | } else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) || |
4020 | db->db_pending_evict) { | |
4021 | dbuf_destroy(db); | |
4022 | } else if (!multilist_link_active(&db->db_cache_link)) { | |
4023 | ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); | |
4024 | ||
4025 | dbuf_cached_state_t dcs = | |
4026 | dbuf_include_in_metadata_cache(db) ? | |
4027 | DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE; | |
4028 | db->db_caching_status = dcs; | |
4029 | ||
4030 | multilist_insert(&dbuf_caches[dcs].cache, db); | |
92dc4ad8 RN |
4031 | uint64_t db_size = db->db.db_size + |
4032 | dmu_buf_user_size(&db->db); | |
ed2f7ba0 AM |
4033 | size = zfs_refcount_add_many( |
4034 | &dbuf_caches[dcs].size, db_size, db); | |
4035 | uint8_t db_level = db->db_level; | |
4036 | mutex_exit(&db->db_mtx); | |
d3c2ae1c | 4037 | |
ed2f7ba0 AM |
4038 | if (dcs == DB_DBUF_METADATA_CACHE) { |
4039 | DBUF_STAT_BUMP(metadata_cache_count); | |
4040 | DBUF_STAT_MAX(metadata_cache_size_bytes_max, | |
4041 | size); | |
4042 | } else { | |
4043 | DBUF_STAT_BUMP(cache_count); | |
4044 | DBUF_STAT_MAX(cache_size_bytes_max, size); | |
4045 | DBUF_STAT_BUMP(cache_levels[db_level]); | |
4046 | DBUF_STAT_INCR(cache_levels_bytes[db_level], | |
4047 | db_size); | |
bd089c54 | 4048 | } |
d3c2ae1c | 4049 | |
ed2f7ba0 AM |
4050 | if (dcs == DB_DBUF_CACHE && !evicting) |
4051 | dbuf_evict_notify(size); | |
34dc7c2f BB |
4052 | } |
4053 | } else { | |
4054 | mutex_exit(&db->db_mtx); | |
4055 | } | |
d3c2ae1c | 4056 | |
34dc7c2f BB |
4057 | } |
4058 | ||
4059 | #pragma weak dmu_buf_refcount = dbuf_refcount | |
4060 | uint64_t | |
4061 | dbuf_refcount(dmu_buf_impl_t *db) | |
4062 | { | |
424fd7c3 | 4063 | return (zfs_refcount_count(&db->db_holds)); |
34dc7c2f BB |
4064 | } |
4065 | ||
cd32e5db TC |
4066 | uint64_t |
4067 | dmu_buf_user_refcount(dmu_buf_t *db_fake) | |
4068 | { | |
4069 | uint64_t holds; | |
4070 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
4071 | ||
4072 | mutex_enter(&db->db_mtx); | |
424fd7c3 TS |
4073 | ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt); |
4074 | holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt; | |
cd32e5db TC |
4075 | mutex_exit(&db->db_mtx); |
4076 | ||
4077 | return (holds); | |
4078 | } | |
4079 | ||
34dc7c2f | 4080 | void * |
0c66c32d JG |
4081 | dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, |
4082 | dmu_buf_user_t *new_user) | |
34dc7c2f | 4083 | { |
0c66c32d JG |
4084 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; |
4085 | ||
4086 | mutex_enter(&db->db_mtx); | |
4087 | dbuf_verify_user(db, DBVU_NOT_EVICTING); | |
4088 | if (db->db_user == old_user) | |
4089 | db->db_user = new_user; | |
4090 | else | |
4091 | old_user = db->db_user; | |
4092 | dbuf_verify_user(db, DBVU_NOT_EVICTING); | |
4093 | mutex_exit(&db->db_mtx); | |
4094 | ||
4095 | return (old_user); | |
34dc7c2f BB |
4096 | } |
4097 | ||
4098 | void * | |
0c66c32d | 4099 | dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) |
34dc7c2f | 4100 | { |
0c66c32d | 4101 | return (dmu_buf_replace_user(db_fake, NULL, user)); |
34dc7c2f BB |
4102 | } |
4103 | ||
4104 | void * | |
0c66c32d | 4105 | dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) |
34dc7c2f BB |
4106 | { |
4107 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
34dc7c2f | 4108 | |
bc4501f7 | 4109 | db->db_user_immediate_evict = TRUE; |
0c66c32d JG |
4110 | return (dmu_buf_set_user(db_fake, user)); |
4111 | } | |
34dc7c2f | 4112 | |
0c66c32d JG |
4113 | void * |
4114 | dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) | |
4115 | { | |
4116 | return (dmu_buf_replace_user(db_fake, user, NULL)); | |
34dc7c2f BB |
4117 | } |
4118 | ||
4119 | void * | |
4120 | dmu_buf_get_user(dmu_buf_t *db_fake) | |
4121 | { | |
4122 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
34dc7c2f | 4123 | |
0c66c32d JG |
4124 | dbuf_verify_user(db, DBVU_NOT_EVICTING); |
4125 | return (db->db_user); | |
4126 | } | |
4127 | ||
92dc4ad8 RN |
4128 | uint64_t |
4129 | dmu_buf_user_size(dmu_buf_t *db_fake) | |
4130 | { | |
4131 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
4132 | if (db->db_user == NULL) | |
4133 | return (0); | |
4134 | return (atomic_load_64(&db->db_user->dbu_size)); | |
4135 | } | |
4136 | ||
4137 | void | |
4138 | dmu_buf_add_user_size(dmu_buf_t *db_fake, uint64_t nadd) | |
4139 | { | |
4140 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
4141 | ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); | |
4142 | ASSERT3P(db->db_user, !=, NULL); | |
4143 | ASSERT3U(atomic_load_64(&db->db_user->dbu_size), <, UINT64_MAX - nadd); | |
4144 | atomic_add_64(&db->db_user->dbu_size, nadd); | |
4145 | } | |
4146 | ||
4147 | void | |
4148 | dmu_buf_sub_user_size(dmu_buf_t *db_fake, uint64_t nsub) | |
4149 | { | |
4150 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; | |
4151 | ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); | |
4152 | ASSERT3P(db->db_user, !=, NULL); | |
4153 | ASSERT3U(atomic_load_64(&db->db_user->dbu_size), >=, nsub); | |
4154 | atomic_sub_64(&db->db_user->dbu_size, nsub); | |
4155 | } | |
4156 | ||
0c66c32d | 4157 | void |
493b6e56 | 4158 | dmu_buf_user_evict_wait(void) |
0c66c32d JG |
4159 | { |
4160 | taskq_wait(dbu_evict_taskq); | |
34dc7c2f BB |
4161 | } |
4162 | ||
03c6040b GW |
4163 | blkptr_t * |
4164 | dmu_buf_get_blkptr(dmu_buf_t *db) | |
4165 | { | |
4166 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
4167 | return (dbi->db_blkptr); | |
4168 | } | |
4169 | ||
8bea9815 MA |
4170 | objset_t * |
4171 | dmu_buf_get_objset(dmu_buf_t *db) | |
4172 | { | |
4173 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
4174 | return (dbi->db_objset); | |
4175 | } | |
4176 | ||
2bce8049 MA |
4177 | dnode_t * |
4178 | dmu_buf_dnode_enter(dmu_buf_t *db) | |
4179 | { | |
4180 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
4181 | DB_DNODE_ENTER(dbi); | |
4182 | return (DB_DNODE(dbi)); | |
4183 | } | |
4184 | ||
4185 | void | |
4186 | dmu_buf_dnode_exit(dmu_buf_t *db) | |
4187 | { | |
4188 | dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; | |
4189 | DB_DNODE_EXIT(dbi); | |
4190 | } | |
4191 | ||
34dc7c2f BB |
4192 | static void |
4193 | dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) | |
4194 | { | |
4195 | /* ASSERT(dmu_tx_is_syncing(tx) */ | |
4196 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
4197 | ||
4198 | if (db->db_blkptr != NULL) | |
4199 | return; | |
4200 | ||
428870ff | 4201 | if (db->db_blkid == DMU_SPILL_BLKID) { |
50c957f7 | 4202 | db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); |
428870ff BB |
4203 | BP_ZERO(db->db_blkptr); |
4204 | return; | |
4205 | } | |
34dc7c2f BB |
4206 | if (db->db_level == dn->dn_phys->dn_nlevels-1) { |
4207 | /* | |
4208 | * This buffer was allocated at a time when there was | |
4209 | * no available blkptrs from the dnode, or it was | |
e1cfd73f | 4210 | * inappropriate to hook it in (i.e., nlevels mismatch). |
34dc7c2f BB |
4211 | */ |
4212 | ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); | |
4213 | ASSERT(db->db_parent == NULL); | |
4214 | db->db_parent = dn->dn_dbuf; | |
4215 | db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; | |
4216 | DBUF_VERIFY(db); | |
4217 | } else { | |
4218 | dmu_buf_impl_t *parent = db->db_parent; | |
4219 | int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
4220 | ||
4221 | ASSERT(dn->dn_phys->dn_nlevels > 1); | |
4222 | if (parent == NULL) { | |
4223 | mutex_exit(&db->db_mtx); | |
4224 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
fcff0f35 PD |
4225 | parent = dbuf_hold_level(dn, db->db_level + 1, |
4226 | db->db_blkid >> epbs, db); | |
34dc7c2f BB |
4227 | rw_exit(&dn->dn_struct_rwlock); |
4228 | mutex_enter(&db->db_mtx); | |
4229 | db->db_parent = parent; | |
4230 | } | |
4231 | db->db_blkptr = (blkptr_t *)parent->db.db_data + | |
4232 | (db->db_blkid & ((1ULL << epbs) - 1)); | |
4233 | DBUF_VERIFY(db); | |
4234 | } | |
4235 | } | |
4236 | ||
fa3922df MM |
4237 | static void |
4238 | dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx) | |
4239 | { | |
4240 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
4241 | void *data = dr->dt.dl.dr_data; | |
4242 | ||
4243 | ASSERT0(db->db_level); | |
4244 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
fa3922df MM |
4245 | ASSERT(db->db_blkid == DMU_BONUS_BLKID); |
4246 | ASSERT(data != NULL); | |
4247 | ||
ba67d821 | 4248 | dnode_t *dn = dr->dr_dnode; |
fa3922df MM |
4249 | ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=, |
4250 | DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); | |
861166b0 | 4251 | memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys)); |
fa3922df MM |
4252 | |
4253 | dbuf_sync_leaf_verify_bonus_dnode(dr); | |
4254 | ||
4255 | dbuf_undirty_bonus(dr); | |
4256 | dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); | |
4257 | } | |
4258 | ||
b5256303 | 4259 | /* |
0c03d21a MA |
4260 | * When syncing out a blocks of dnodes, adjust the block to deal with |
4261 | * encryption. Normally, we make sure the block is decrypted before writing | |
4262 | * it. If we have crypt params, then we are writing a raw (encrypted) block, | |
4263 | * from a raw receive. In this case, set the ARC buf's crypt params so | |
4264 | * that the BP will be filled with the correct byteorder, salt, iv, and mac. | |
b5256303 TC |
4265 | */ |
4266 | static void | |
0c03d21a | 4267 | dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr) |
b5256303 TC |
4268 | { |
4269 | int err; | |
4270 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
4271 | ||
4272 | ASSERT(MUTEX_HELD(&db->db_mtx)); | |
0c03d21a MA |
4273 | ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); |
4274 | ASSERT3U(db->db_level, ==, 0); | |
b5256303 | 4275 | |
0c03d21a | 4276 | if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) { |
a2c2ed1b TC |
4277 | zbookmark_phys_t zb; |
4278 | ||
b5256303 TC |
4279 | /* |
4280 | * Unfortunately, there is currently no mechanism for | |
4281 | * syncing context to handle decryption errors. An error | |
4282 | * here is only possible if an attacker maliciously | |
4283 | * changed a dnode block and updated the associated | |
4284 | * checksums going up the block tree. | |
4285 | */ | |
a2c2ed1b TC |
4286 | SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), |
4287 | db->db.db_object, db->db_level, db->db_blkid); | |
b5256303 | 4288 | err = arc_untransform(db->db_buf, db->db_objset->os_spa, |
a2c2ed1b | 4289 | &zb, B_TRUE); |
b5256303 TC |
4290 | if (err) |
4291 | panic("Invalid dnode block MAC"); | |
0c03d21a MA |
4292 | } else if (dr->dt.dl.dr_has_raw_params) { |
4293 | (void) arc_release(dr->dt.dl.dr_data, db); | |
4294 | arc_convert_to_raw(dr->dt.dl.dr_data, | |
4295 | dmu_objset_id(db->db_objset), | |
4296 | dr->dt.dl.dr_byteorder, DMU_OT_DNODE, | |
4297 | dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac); | |
b5256303 TC |
4298 | } |
4299 | } | |
4300 | ||
d1d7e268 MK |
4301 | /* |
4302 | * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it | |
60948de1 BB |
4303 | * is critical the we not allow the compiler to inline this function in to |
4304 | * dbuf_sync_list() thereby drastically bloating the stack usage. | |
4305 | */ | |
4306 | noinline static void | |
34dc7c2f BB |
4307 | dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) |
4308 | { | |
4309 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
ba67d821 | 4310 | dnode_t *dn = dr->dr_dnode; |
34dc7c2f BB |
4311 | |
4312 | ASSERT(dmu_tx_is_syncing(tx)); | |
4313 | ||
4314 | dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); | |
4315 | ||
4316 | mutex_enter(&db->db_mtx); | |
4317 | ||
4318 | ASSERT(db->db_level > 0); | |
4319 | DBUF_VERIFY(db); | |
4320 | ||
e49f1e20 | 4321 | /* Read the block if it hasn't been read yet. */ |
34dc7c2f BB |
4322 | if (db->db_buf == NULL) { |
4323 | mutex_exit(&db->db_mtx); | |
4324 | (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); | |
4325 | mutex_enter(&db->db_mtx); | |
4326 | } | |
4327 | ASSERT3U(db->db_state, ==, DB_CACHED); | |
34dc7c2f BB |
4328 | ASSERT(db->db_buf != NULL); |
4329 | ||
e49f1e20 | 4330 | /* Indirect block size must match what the dnode thinks it is. */ |
572e2857 | 4331 | ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); |
34dc7c2f BB |
4332 | dbuf_check_blkptr(dn, db); |
4333 | ||
e49f1e20 | 4334 | /* Provide the pending dirty record to child dbufs */ |
34dc7c2f BB |
4335 | db->db_data_pending = dr; |
4336 | ||
34dc7c2f | 4337 | mutex_exit(&db->db_mtx); |
a1d477c2 | 4338 | |
b128c09f | 4339 | dbuf_write(dr, db->db_buf, tx); |
34dc7c2f | 4340 | |
ba67d821 | 4341 | zio_t *zio = dr->dr_zio; |
34dc7c2f | 4342 | mutex_enter(&dr->dt.di.dr_mtx); |
4bda3bd0 | 4343 | dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); |
34dc7c2f BB |
4344 | ASSERT(list_head(&dr->dt.di.dr_children) == NULL); |
4345 | mutex_exit(&dr->dt.di.dr_mtx); | |
4346 | zio_nowait(zio); | |
4347 | } | |
4348 | ||
0e37a0f4 SD |
4349 | /* |
4350 | * Verify that the size of the data in our bonus buffer does not exceed | |
4351 | * its recorded size. | |
4352 | * | |
4353 | * The purpose of this verification is to catch any cases in development | |
4354 | * where the size of a phys structure (i.e space_map_phys_t) grows and, | |
4355 | * due to incorrect feature management, older pools expect to read more | |
4356 | * data even though they didn't actually write it to begin with. | |
4357 | * | |
4358 | * For a example, this would catch an error in the feature logic where we | |
4359 | * open an older pool and we expect to write the space map histogram of | |
4360 | * a space map with size SPACE_MAP_SIZE_V0. | |
4361 | */ | |
4362 | static void | |
4363 | dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr) | |
4364 | { | |
fa3922df | 4365 | #ifdef ZFS_DEBUG |
ba67d821 | 4366 | dnode_t *dn = dr->dr_dnode; |
0e37a0f4 SD |
4367 | |
4368 | /* | |
4369 | * Encrypted bonus buffers can have data past their bonuslen. | |
4370 | * Skip the verification of these blocks. | |
4371 | */ | |
4372 | if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)) | |
4373 | return; | |
4374 | ||
4375 | uint16_t bonuslen = dn->dn_phys->dn_bonuslen; | |
4376 | uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); | |
4377 | ASSERT3U(bonuslen, <=, maxbonuslen); | |
4378 | ||
4379 | arc_buf_t *datap = dr->dt.dl.dr_data; | |
4380 | char *datap_end = ((char *)datap) + bonuslen; | |
4381 | char *datap_max = ((char *)datap) + maxbonuslen; | |
4382 | ||
4383 | /* ensure that everything is zero after our data */ | |
4384 | for (; datap_end < datap_max; datap_end++) | |
4385 | ASSERT(*datap_end == 0); | |
0e37a0f4 | 4386 | #endif |
fa3922df | 4387 | } |
0e37a0f4 | 4388 | |
ba67d821 MA |
4389 | static blkptr_t * |
4390 | dbuf_lightweight_bp(dbuf_dirty_record_t *dr) | |
4391 | { | |
4392 | /* This must be a lightweight dirty record. */ | |
4393 | ASSERT3P(dr->dr_dbuf, ==, NULL); | |
4394 | dnode_t *dn = dr->dr_dnode; | |
4395 | ||
4396 | if (dn->dn_phys->dn_nlevels == 1) { | |
4397 | VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr); | |
4398 | return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]); | |
4399 | } else { | |
4400 | dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf; | |
4401 | int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
4402 | VERIFY3U(parent_db->db_level, ==, 1); | |
4403 | VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn); | |
4404 | VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid); | |
4405 | blkptr_t *bp = parent_db->db.db_data; | |
4406 | return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]); | |
4407 | } | |
4408 | } | |
4409 | ||
4410 | static void | |
4411 | dbuf_lightweight_ready(zio_t *zio) | |
4412 | { | |
4413 | dbuf_dirty_record_t *dr = zio->io_private; | |
4414 | blkptr_t *bp = zio->io_bp; | |
4415 | ||
4416 | if (zio->io_error != 0) | |
4417 | return; | |
4418 | ||
4419 | dnode_t *dn = dr->dr_dnode; | |
4420 | ||
4421 | blkptr_t *bp_orig = dbuf_lightweight_bp(dr); | |
4422 | spa_t *spa = dmu_objset_spa(dn->dn_objset); | |
4423 | int64_t delta = bp_get_dsize_sync(spa, bp) - | |
4424 | bp_get_dsize_sync(spa, bp_orig); | |
4425 | dnode_diduse_space(dn, delta); | |
4426 | ||
4427 | uint64_t blkid = dr->dt.dll.dr_blkid; | |
4428 | mutex_enter(&dn->dn_mtx); | |
4429 | if (blkid > dn->dn_phys->dn_maxblkid) { | |
4430 | ASSERT0(dn->dn_objset->os_raw_receive); | |
4431 | dn->dn_phys->dn_maxblkid = blkid; | |
4432 | } | |
4433 | mutex_exit(&dn->dn_mtx); | |
4434 | ||
4435 | if (!BP_IS_EMBEDDED(bp)) { | |
4436 | uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1; | |
4437 | BP_SET_FILL(bp, fill); | |
4438 | } | |
4439 | ||
4440 | dmu_buf_impl_t *parent_db; | |
4441 | EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1); | |
4442 | if (dr->dr_parent == NULL) { | |
4443 | parent_db = dn->dn_dbuf; | |
4444 | } else { | |
4445 | parent_db = dr->dr_parent->dr_dbuf; | |
4446 | } | |
4447 | rw_enter(&parent_db->db_rwlock, RW_WRITER); | |
4448 | *bp_orig = *bp; | |
4449 | rw_exit(&parent_db->db_rwlock); | |
4450 | } | |
4451 | ||
ba67d821 MA |
4452 | static void |
4453 | dbuf_lightweight_done(zio_t *zio) | |
4454 | { | |
4455 | dbuf_dirty_record_t *dr = zio->io_private; | |
4456 | ||
4457 | VERIFY0(zio->io_error); | |
4458 | ||
4459 | objset_t *os = dr->dr_dnode->dn_objset; | |
4460 | dmu_tx_t *tx = os->os_synctx; | |
4461 | ||
4462 | if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { | |
4463 | ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); | |
4464 | } else { | |
4465 | dsl_dataset_t *ds = os->os_dsl_dataset; | |
4466 | (void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE); | |
4467 | dsl_dataset_block_born(ds, zio->io_bp, tx); | |
4468 | } | |
4469 | ||
ccec7fbe AM |
4470 | dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted, |
4471 | zio->io_txg); | |
ba67d821 MA |
4472 | |
4473 | abd_free(dr->dt.dll.dr_abd); | |
4474 | kmem_free(dr, sizeof (*dr)); | |
4475 | } | |
4476 | ||
4477 | noinline static void | |
4478 | dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx) | |
4479 | { | |
4480 | dnode_t *dn = dr->dr_dnode; | |
4481 | zio_t *pio; | |
4482 | if (dn->dn_phys->dn_nlevels == 1) { | |
4483 | pio = dn->dn_zio; | |
4484 | } else { | |
4485 | pio = dr->dr_parent->dr_zio; | |
4486 | } | |
4487 | ||
4488 | zbookmark_phys_t zb = { | |
4489 | .zb_objset = dmu_objset_id(dn->dn_objset), | |
4490 | .zb_object = dn->dn_object, | |
4491 | .zb_level = 0, | |
4492 | .zb_blkid = dr->dt.dll.dr_blkid, | |
4493 | }; | |
4494 | ||
4495 | /* | |
4496 | * See comment in dbuf_write(). This is so that zio->io_bp_orig | |
4497 | * will have the old BP in dbuf_lightweight_done(). | |
4498 | */ | |
4499 | dr->dr_bp_copy = *dbuf_lightweight_bp(dr); | |
4500 | ||
4501 | dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset), | |
4502 | dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd, | |
4503 | dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd), | |
4504 | &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL, | |
ccec7fbe | 4505 | dbuf_lightweight_done, dr, ZIO_PRIORITY_ASYNC_WRITE, |
ba67d821 MA |
4506 | ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb); |
4507 | ||
4508 | zio_nowait(dr->dr_zio); | |
4509 | } | |
4510 | ||
d1d7e268 MK |
4511 | /* |
4512 | * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is | |
60948de1 BB |
4513 | * critical the we not allow the compiler to inline this function in to |
4514 | * dbuf_sync_list() thereby drastically bloating the stack usage. | |
4515 | */ | |
4516 | noinline static void | |
34dc7c2f BB |
4517 | dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) |
4518 | { | |
4519 | arc_buf_t **datap = &dr->dt.dl.dr_data; | |
4520 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
ba67d821 | 4521 | dnode_t *dn = dr->dr_dnode; |
572e2857 | 4522 | objset_t *os; |
34dc7c2f | 4523 | uint64_t txg = tx->tx_txg; |
34dc7c2f BB |
4524 | |
4525 | ASSERT(dmu_tx_is_syncing(tx)); | |
4526 | ||
4527 | dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); | |
4528 | ||
4529 | mutex_enter(&db->db_mtx); | |
4530 | /* | |
4531 | * To be synced, we must be dirtied. But we | |
4532 | * might have been freed after the dirty. | |
4533 | */ | |
4534 | if (db->db_state == DB_UNCACHED) { | |
4535 | /* This buffer has been freed since it was dirtied */ | |
4536 | ASSERT(db->db.db_data == NULL); | |
4537 | } else if (db->db_state == DB_FILL) { | |
4538 | /* This buffer was freed and is now being re-filled */ | |
4539 | ASSERT(db->db.db_data != dr->dt.dl.dr_data); | |
f6facd24 RN |
4540 | } else if (db->db_state == DB_READ) { |
4541 | /* | |
4542 | * This buffer has a clone we need to write, and an in-flight | |
4543 | * read on the BP we're about to clone. Its safe to issue the | |
4544 | * write here because the read has already been issued and the | |
4545 | * contents won't change. | |
4546 | */ | |
4547 | ASSERT(dr->dt.dl.dr_brtwrite && | |
4548 | dr->dt.dl.dr_override_state == DR_OVERRIDDEN); | |
34dc7c2f | 4549 | } else { |
b128c09f | 4550 | ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); |
34dc7c2f BB |
4551 | } |
4552 | DBUF_VERIFY(db); | |
4553 | ||
428870ff BB |
4554 | if (db->db_blkid == DMU_SPILL_BLKID) { |
4555 | mutex_enter(&dn->dn_mtx); | |
81edd3e8 P |
4556 | if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { |
4557 | /* | |
4558 | * In the previous transaction group, the bonus buffer | |
4559 | * was entirely used to store the attributes for the | |
4560 | * dnode which overrode the dn_spill field. However, | |
4561 | * when adding more attributes to the file a spill | |
4562 | * block was required to hold the extra attributes. | |
4563 | * | |
4564 | * Make sure to clear the garbage left in the dn_spill | |
4565 | * field from the previous attributes in the bonus | |
4566 | * buffer. Otherwise, after writing out the spill | |
4567 | * block to the new allocated dva, it will free | |
4568 | * the old block pointed to by the invalid dn_spill. | |
4569 | */ | |
4570 | db->db_blkptr = NULL; | |
4571 | } | |
428870ff BB |
4572 | dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; |
4573 | mutex_exit(&dn->dn_mtx); | |
4574 | } | |
4575 | ||
34dc7c2f BB |
4576 | /* |
4577 | * If this is a bonus buffer, simply copy the bonus data into the | |
4578 | * dnode. It will be written out when the dnode is synced (and it | |
4579 | * will be synced, since it must have been dirty for dbuf_sync to | |
4580 | * be called). | |
4581 | */ | |
428870ff | 4582 | if (db->db_blkid == DMU_BONUS_BLKID) { |
428870ff | 4583 | ASSERT(dr->dr_dbuf == db); |
fa3922df | 4584 | dbuf_sync_bonus(dr, tx); |
34dc7c2f BB |
4585 | return; |
4586 | } | |
4587 | ||
572e2857 BB |
4588 | os = dn->dn_objset; |
4589 | ||
34dc7c2f BB |
4590 | /* |
4591 | * This function may have dropped the db_mtx lock allowing a dmu_sync | |
4592 | * operation to sneak in. As a result, we need to ensure that we | |
4593 | * don't check the dr_override_state until we have returned from | |
4594 | * dbuf_check_blkptr. | |
4595 | */ | |
4596 | dbuf_check_blkptr(dn, db); | |
4597 | ||
4598 | /* | |
572e2857 | 4599 | * If this buffer is in the middle of an immediate write, |
34dc7c2f BB |
4600 | * wait for the synchronous IO to complete. |
4601 | */ | |
4602 | while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { | |
4603 | ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); | |
4604 | cv_wait(&db->db_changed, &db->db_mtx); | |
34dc7c2f BB |
4605 | } |
4606 | ||
b5256303 TC |
4607 | /* |
4608 | * If this is a dnode block, ensure it is appropriately encrypted | |
4609 | * or decrypted, depending on what we are writing to it this txg. | |
4610 | */ | |
4611 | if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT) | |
0c03d21a | 4612 | dbuf_prepare_encrypted_dnode_leaf(dr); |
b5256303 | 4613 | |
9babb374 BB |
4614 | if (db->db_state != DB_NOFILL && |
4615 | dn->dn_object != DMU_META_DNODE_OBJECT && | |
424fd7c3 | 4616 | zfs_refcount_count(&db->db_holds) > 1 && |
428870ff | 4617 | dr->dt.dl.dr_override_state != DR_OVERRIDDEN && |
9babb374 BB |
4618 | *datap == db->db_buf) { |
4619 | /* | |
4620 | * If this buffer is currently "in use" (i.e., there | |
4621 | * are active holds and db_data still references it), | |
4622 | * then make a copy before we start the write so that | |
4623 | * any modifications from the open txg will not leak | |
4624 | * into this write. | |
4625 | * | |
4626 | * NOTE: this copy does not need to be made for | |
4627 | * objects only modified in the syncing context (e.g. | |
4628 | * DNONE_DNODE blocks). | |
4629 | */ | |
a81b8124 AR |
4630 | int psize = arc_buf_size(*datap); |
4631 | int lsize = arc_buf_lsize(*datap); | |
4632 | arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); | |
4633 | enum zio_compress compress_type = arc_get_compression(*datap); | |
4634 | uint8_t complevel = arc_get_complevel(*datap); | |
4635 | ||
4636 | if (arc_is_encrypted(*datap)) { | |
4637 | boolean_t byteorder; | |
4638 | uint8_t salt[ZIO_DATA_SALT_LEN]; | |
4639 | uint8_t iv[ZIO_DATA_IV_LEN]; | |
4640 | uint8_t mac[ZIO_DATA_MAC_LEN]; | |
4641 | ||
4642 | arc_get_raw_params(*datap, &byteorder, salt, iv, mac); | |
4643 | *datap = arc_alloc_raw_buf(os->os_spa, db, | |
4644 | dmu_objset_id(os), byteorder, salt, iv, mac, | |
4645 | dn->dn_type, psize, lsize, compress_type, | |
4646 | complevel); | |
4647 | } else if (compress_type != ZIO_COMPRESS_OFF) { | |
4648 | ASSERT3U(type, ==, ARC_BUFC_DATA); | |
4649 | *datap = arc_alloc_compressed_buf(os->os_spa, db, | |
4650 | psize, lsize, compress_type, complevel); | |
4651 | } else { | |
4652 | *datap = arc_alloc_buf(os->os_spa, db, type, psize); | |
4653 | } | |
861166b0 | 4654 | memcpy((*datap)->b_data, db->db.db_data, psize); |
b128c09f | 4655 | } |
34dc7c2f BB |
4656 | db->db_data_pending = dr; |
4657 | ||
4658 | mutex_exit(&db->db_mtx); | |
4659 | ||
b128c09f | 4660 | dbuf_write(dr, *datap, tx); |
34dc7c2f BB |
4661 | |
4662 | ASSERT(!list_link_active(&dr->dr_dirty_node)); | |
572e2857 | 4663 | if (dn->dn_object == DMU_META_DNODE_OBJECT) { |
3fa93bb8 | 4664 | list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr); |
572e2857 | 4665 | } else { |
34dc7c2f | 4666 | zio_nowait(dr->dr_zio); |
572e2857 | 4667 | } |
34dc7c2f BB |
4668 | } |
4669 | ||
3bd4df38 EN |
4670 | /* |
4671 | * Syncs out a range of dirty records for indirect or leaf dbufs. May be | |
4672 | * called recursively from dbuf_sync_indirect(). | |
4673 | */ | |
34dc7c2f | 4674 | void |
4bda3bd0 | 4675 | dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) |
34dc7c2f BB |
4676 | { |
4677 | dbuf_dirty_record_t *dr; | |
4678 | ||
c65aa5b2 | 4679 | while ((dr = list_head(list))) { |
34dc7c2f BB |
4680 | if (dr->dr_zio != NULL) { |
4681 | /* | |
4682 | * If we find an already initialized zio then we | |
4683 | * are processing the meta-dnode, and we have finished. | |
4684 | * The dbufs for all dnodes are put back on the list | |
4685 | * during processing, so that we can zio_wait() | |
4686 | * these IOs after initiating all child IOs. | |
4687 | */ | |
4688 | ASSERT3U(dr->dr_dbuf->db.db_object, ==, | |
4689 | DMU_META_DNODE_OBJECT); | |
4690 | break; | |
4691 | } | |
4692 | list_remove(list, dr); | |
ba67d821 MA |
4693 | if (dr->dr_dbuf == NULL) { |
4694 | dbuf_sync_lightweight(dr, tx); | |
4695 | } else { | |
4696 | if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && | |
4697 | dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { | |
4698 | VERIFY3U(dr->dr_dbuf->db_level, ==, level); | |
4699 | } | |
4700 | if (dr->dr_dbuf->db_level > 0) | |
4701 | dbuf_sync_indirect(dr, tx); | |
4702 | else | |
4703 | dbuf_sync_leaf(dr, tx); | |
4704 | } | |
34dc7c2f BB |
4705 | } |
4706 | } | |
4707 | ||
34dc7c2f BB |
4708 | static void |
4709 | dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | |
4710 | { | |
14e4e3cb | 4711 | (void) buf; |
34dc7c2f | 4712 | dmu_buf_impl_t *db = vdb; |
572e2857 | 4713 | dnode_t *dn; |
b128c09f | 4714 | blkptr_t *bp = zio->io_bp; |
34dc7c2f | 4715 | blkptr_t *bp_orig = &zio->io_bp_orig; |
428870ff BB |
4716 | spa_t *spa = zio->io_spa; |
4717 | int64_t delta; | |
34dc7c2f | 4718 | uint64_t fill = 0; |
428870ff | 4719 | int i; |
34dc7c2f | 4720 | |
463a8cfe AR |
4721 | ASSERT3P(db->db_blkptr, !=, NULL); |
4722 | ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); | |
b128c09f | 4723 | |
572e2857 BB |
4724 | DB_DNODE_ENTER(db); |
4725 | dn = DB_DNODE(db); | |
428870ff BB |
4726 | delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); |
4727 | dnode_diduse_space(dn, delta - zio->io_prev_space_delta); | |
4728 | zio->io_prev_space_delta = delta; | |
34dc7c2f | 4729 | |
b0bc7a84 MG |
4730 | if (bp->blk_birth != 0) { |
4731 | ASSERT((db->db_blkid != DMU_SPILL_BLKID && | |
4732 | BP_GET_TYPE(bp) == dn->dn_type) || | |
4733 | (db->db_blkid == DMU_SPILL_BLKID && | |
9b67f605 MA |
4734 | BP_GET_TYPE(bp) == dn->dn_bonustype) || |
4735 | BP_IS_EMBEDDED(bp)); | |
b0bc7a84 | 4736 | ASSERT(BP_GET_LEVEL(bp) == db->db_level); |
34dc7c2f BB |
4737 | } |
4738 | ||
4739 | mutex_enter(&db->db_mtx); | |
4740 | ||
428870ff BB |
4741 | #ifdef ZFS_DEBUG |
4742 | if (db->db_blkid == DMU_SPILL_BLKID) { | |
428870ff | 4743 | ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); |
463a8cfe | 4744 | ASSERT(!(BP_IS_HOLE(bp)) && |
50c957f7 | 4745 | db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); |
428870ff BB |
4746 | } |
4747 | #endif | |
4748 | ||
34dc7c2f BB |
4749 | if (db->db_level == 0) { |
4750 | mutex_enter(&dn->dn_mtx); | |
428870ff | 4751 | if (db->db_blkid > dn->dn_phys->dn_maxblkid && |
69830602 TC |
4752 | db->db_blkid != DMU_SPILL_BLKID) { |
4753 | ASSERT0(db->db_objset->os_raw_receive); | |
34dc7c2f | 4754 | dn->dn_phys->dn_maxblkid = db->db_blkid; |
69830602 | 4755 | } |
34dc7c2f BB |
4756 | mutex_exit(&dn->dn_mtx); |
4757 | ||
4758 | if (dn->dn_type == DMU_OT_DNODE) { | |
50c957f7 NB |
4759 | i = 0; |
4760 | while (i < db->db.db_size) { | |
817b1b6e MA |
4761 | dnode_phys_t *dnp = |
4762 | (void *)(((char *)db->db.db_data) + i); | |
50c957f7 NB |
4763 | |
4764 | i += DNODE_MIN_SIZE; | |
4765 | if (dnp->dn_type != DMU_OT_NONE) { | |
34dc7c2f | 4766 | fill++; |
3095ca91 MA |
4767 | for (int j = 0; j < dnp->dn_nblkptr; |
4768 | j++) { | |
4769 | (void) zfs_blkptr_verify(spa, | |
4770 | &dnp->dn_blkptr[j], | |
4771 | BLK_CONFIG_SKIP, | |
4772 | BLK_VERIFY_HALT); | |
4773 | } | |
4774 | if (dnp->dn_flags & | |
4775 | DNODE_FLAG_SPILL_BLKPTR) { | |
4776 | (void) zfs_blkptr_verify(spa, | |
4777 | DN_SPILL_BLKPTR(dnp), | |
4778 | BLK_CONFIG_SKIP, | |
4779 | BLK_VERIFY_HALT); | |
4780 | } | |
50c957f7 NB |
4781 | i += dnp->dn_extra_slots * |
4782 | DNODE_MIN_SIZE; | |
4783 | } | |
34dc7c2f BB |
4784 | } |
4785 | } else { | |
b0bc7a84 MG |
4786 | if (BP_IS_HOLE(bp)) { |
4787 | fill = 0; | |
4788 | } else { | |
4789 | fill = 1; | |
4790 | } | |
34dc7c2f BB |
4791 | } |
4792 | } else { | |
b128c09f | 4793 | blkptr_t *ibp = db->db.db_data; |
34dc7c2f | 4794 | ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); |
b128c09f BB |
4795 | for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { |
4796 | if (BP_IS_HOLE(ibp)) | |
34dc7c2f | 4797 | continue; |
3095ca91 MA |
4798 | (void) zfs_blkptr_verify(spa, ibp, |
4799 | BLK_CONFIG_SKIP, BLK_VERIFY_HALT); | |
9b67f605 | 4800 | fill += BP_GET_FILL(ibp); |
34dc7c2f BB |
4801 | } |
4802 | } | |
572e2857 | 4803 | DB_DNODE_EXIT(db); |
34dc7c2f | 4804 | |
9b67f605 | 4805 | if (!BP_IS_EMBEDDED(bp)) |
b5256303 | 4806 | BP_SET_FILL(bp, fill); |
34dc7c2f BB |
4807 | |
4808 | mutex_exit(&db->db_mtx); | |
463a8cfe | 4809 | |
f664f1ee | 4810 | db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG); |
463a8cfe | 4811 | *db->db_blkptr = *bp; |
f664f1ee | 4812 | dmu_buf_unlock_parent(db, dblt, FTAG); |
34dc7c2f BB |
4813 | } |
4814 | ||
bc77ba73 PD |
4815 | /* |
4816 | * This function gets called just prior to running through the compression | |
4817 | * stage of the zio pipeline. If we're an indirect block comprised of only | |
4818 | * holes, then we want this indirect to be compressed away to a hole. In | |
4819 | * order to do that we must zero out any information about the holes that | |
4820 | * this indirect points to prior to before we try to compress it. | |
4821 | */ | |
4822 | static void | |
4823 | dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | |
4824 | { | |
14e4e3cb | 4825 | (void) zio, (void) buf; |
bc77ba73 PD |
4826 | dmu_buf_impl_t *db = vdb; |
4827 | dnode_t *dn; | |
4828 | blkptr_t *bp; | |
721ed0ee | 4829 | unsigned int epbs, i; |
bc77ba73 PD |
4830 | |
4831 | ASSERT3U(db->db_level, >, 0); | |
4832 | DB_DNODE_ENTER(db); | |
4833 | dn = DB_DNODE(db); | |
4834 | epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
721ed0ee | 4835 | ASSERT3U(epbs, <, 31); |
bc77ba73 PD |
4836 | |
4837 | /* Determine if all our children are holes */ | |
3f93077b | 4838 | for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) { |
bc77ba73 PD |
4839 | if (!BP_IS_HOLE(bp)) |
4840 | break; | |
4841 | } | |
4842 | ||
4843 | /* | |
4844 | * If all the children are holes, then zero them all out so that | |
4845 | * we may get compressed away. | |
4846 | */ | |
3f93077b | 4847 | if (i == 1ULL << epbs) { |
721ed0ee GM |
4848 | /* |
4849 | * We only found holes. Grab the rwlock to prevent | |
4850 | * anybody from reading the blocks we're about to | |
4851 | * zero out. | |
4852 | */ | |
f664f1ee | 4853 | rw_enter(&db->db_rwlock, RW_WRITER); |
861166b0 | 4854 | memset(db->db.db_data, 0, db->db.db_size); |
f664f1ee | 4855 | rw_exit(&db->db_rwlock); |
bc77ba73 PD |
4856 | } |
4857 | DB_DNODE_EXIT(db); | |
4858 | } | |
4859 | ||
34dc7c2f BB |
4860 | static void |
4861 | dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) | |
4862 | { | |
14e4e3cb | 4863 | (void) buf; |
34dc7c2f | 4864 | dmu_buf_impl_t *db = vdb; |
428870ff | 4865 | blkptr_t *bp_orig = &zio->io_bp_orig; |
b0bc7a84 MG |
4866 | blkptr_t *bp = db->db_blkptr; |
4867 | objset_t *os = db->db_objset; | |
4868 | dmu_tx_t *tx = os->os_synctx; | |
34dc7c2f | 4869 | |
c99c9001 | 4870 | ASSERT0(zio->io_error); |
428870ff BB |
4871 | ASSERT(db->db_blkptr == bp); |
4872 | ||
03c6040b GW |
4873 | /* |
4874 | * For nopwrites and rewrites we ensure that the bp matches our | |
4875 | * original and bypass all the accounting. | |
4876 | */ | |
4877 | if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { | |
428870ff BB |
4878 | ASSERT(BP_EQUAL(bp, bp_orig)); |
4879 | } else { | |
b0bc7a84 | 4880 | dsl_dataset_t *ds = os->os_dsl_dataset; |
428870ff BB |
4881 | (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); |
4882 | dsl_dataset_block_born(ds, bp, tx); | |
4883 | } | |
34dc7c2f BB |
4884 | |
4885 | mutex_enter(&db->db_mtx); | |
4886 | ||
428870ff BB |
4887 | DBUF_VERIFY(db); |
4888 | ||
ba67d821 MA |
4889 | dbuf_dirty_record_t *dr = db->db_data_pending; |
4890 | dnode_t *dn = dr->dr_dnode; | |
34dc7c2f | 4891 | ASSERT(!list_link_active(&dr->dr_dirty_node)); |
428870ff | 4892 | ASSERT(dr->dr_dbuf == db); |
cccbed9f MM |
4893 | ASSERT(list_next(&db->db_dirty_records, dr) == NULL); |
4894 | list_remove(&db->db_dirty_records, dr); | |
34dc7c2f | 4895 | |
428870ff BB |
4896 | #ifdef ZFS_DEBUG |
4897 | if (db->db_blkid == DMU_SPILL_BLKID) { | |
428870ff BB |
4898 | ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); |
4899 | ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && | |
50c957f7 | 4900 | db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); |
428870ff BB |
4901 | } |
4902 | #endif | |
4903 | ||
34dc7c2f | 4904 | if (db->db_level == 0) { |
428870ff | 4905 | ASSERT(db->db_blkid != DMU_BONUS_BLKID); |
34dc7c2f | 4906 | ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); |
b128c09f | 4907 | if (db->db_state != DB_NOFILL) { |
67a1b037 PJD |
4908 | if (dr->dt.dl.dr_data != NULL && |
4909 | dr->dt.dl.dr_data != db->db_buf) { | |
d3c2ae1c | 4910 | arc_buf_destroy(dr->dt.dl.dr_data, db); |
67a1b037 | 4911 | } |
b128c09f | 4912 | } |
34dc7c2f | 4913 | } else { |
34dc7c2f | 4914 | ASSERT(list_head(&dr->dt.di.dr_children) == NULL); |
b0bc7a84 | 4915 | ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); |
34dc7c2f | 4916 | if (!BP_IS_HOLE(db->db_blkptr)) { |
2a8ba608 MM |
4917 | int epbs __maybe_unused = dn->dn_phys->dn_indblkshift - |
4918 | SPA_BLKPTRSHIFT; | |
b0bc7a84 MG |
4919 | ASSERT3U(db->db_blkid, <=, |
4920 | dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); | |
34dc7c2f BB |
4921 | ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, |
4922 | db->db.db_size); | |
34dc7c2f BB |
4923 | } |
4924 | mutex_destroy(&dr->dt.di.dr_mtx); | |
4925 | list_destroy(&dr->dt.di.dr_children); | |
4926 | } | |
34dc7c2f BB |
4927 | |
4928 | cv_broadcast(&db->db_changed); | |
4929 | ASSERT(db->db_dirtycnt > 0); | |
4930 | db->db_dirtycnt -= 1; | |
4931 | db->db_data_pending = NULL; | |
3d503a76 | 4932 | dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); |
0f8ff49e | 4933 | |
ccec7fbe AM |
4934 | dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted, |
4935 | zio->io_txg); | |
0f8ff49e SD |
4936 | |
4937 | kmem_free(dr, sizeof (dbuf_dirty_record_t)); | |
428870ff BB |
4938 | } |
4939 | ||
4940 | static void | |
4941 | dbuf_write_nofill_ready(zio_t *zio) | |
4942 | { | |
4943 | dbuf_write_ready(zio, NULL, zio->io_private); | |
4944 | } | |
4945 | ||
4946 | static void | |
4947 | dbuf_write_nofill_done(zio_t *zio) | |
4948 | { | |
4949 | dbuf_write_done(zio, NULL, zio->io_private); | |
4950 | } | |
4951 | ||
4952 | static void | |
4953 | dbuf_write_override_ready(zio_t *zio) | |
4954 | { | |
4955 | dbuf_dirty_record_t *dr = zio->io_private; | |
4956 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
4957 | ||
4958 | dbuf_write_ready(zio, NULL, db); | |
4959 | } | |
4960 | ||
4961 | static void | |
4962 | dbuf_write_override_done(zio_t *zio) | |
4963 | { | |
4964 | dbuf_dirty_record_t *dr = zio->io_private; | |
4965 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
4966 | blkptr_t *obp = &dr->dt.dl.dr_overridden_by; | |
4967 | ||
4968 | mutex_enter(&db->db_mtx); | |
4969 | if (!BP_EQUAL(zio->io_bp, obp)) { | |
4970 | if (!BP_IS_HOLE(obp)) | |
4971 | dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); | |
4972 | arc_release(dr->dt.dl.dr_data, db); | |
4973 | } | |
34dc7c2f BB |
4974 | mutex_exit(&db->db_mtx); |
4975 | ||
428870ff | 4976 | dbuf_write_done(zio, NULL, db); |
a6255b7f DQ |
4977 | |
4978 | if (zio->io_abd != NULL) | |
e2af2acc | 4979 | abd_free(zio->io_abd); |
428870ff BB |
4980 | } |
4981 | ||
a1d477c2 MA |
4982 | typedef struct dbuf_remap_impl_callback_arg { |
4983 | objset_t *drica_os; | |
4984 | uint64_t drica_blk_birth; | |
4985 | dmu_tx_t *drica_tx; | |
4986 | } dbuf_remap_impl_callback_arg_t; | |
4987 | ||
4988 | static void | |
4989 | dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, | |
4990 | void *arg) | |
4991 | { | |
4992 | dbuf_remap_impl_callback_arg_t *drica = arg; | |
4993 | objset_t *os = drica->drica_os; | |
4994 | spa_t *spa = dmu_objset_spa(os); | |
4995 | dmu_tx_t *tx = drica->drica_tx; | |
4996 | ||
4997 | ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); | |
4998 | ||
4999 | if (os == spa_meta_objset(spa)) { | |
5000 | spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); | |
5001 | } else { | |
5002 | dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, | |
5003 | size, drica->drica_blk_birth, tx); | |
5004 | } | |
5005 | } | |
5006 | ||
5007 | static void | |
f664f1ee | 5008 | dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx) |
a1d477c2 MA |
5009 | { |
5010 | blkptr_t bp_copy = *bp; | |
5011 | spa_t *spa = dmu_objset_spa(dn->dn_objset); | |
5012 | dbuf_remap_impl_callback_arg_t drica; | |
5013 | ||
5014 | ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); | |
5015 | ||
5016 | drica.drica_os = dn->dn_objset; | |
5017 | drica.drica_blk_birth = bp->blk_birth; | |
5018 | drica.drica_tx = tx; | |
5019 | if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, | |
5020 | &drica)) { | |
37f03da8 SH |
5021 | /* |
5022 | * If the blkptr being remapped is tracked by a livelist, | |
5023 | * then we need to make sure the livelist reflects the update. | |
5024 | * First, cancel out the old blkptr by appending a 'FREE' | |
5025 | * entry. Next, add an 'ALLOC' to track the new version. This | |
5026 | * way we avoid trying to free an inaccurate blkptr at delete. | |
5027 | * Note that embedded blkptrs are not tracked in livelists. | |
5028 | */ | |
5029 | if (dn->dn_objset != spa_meta_objset(spa)) { | |
5030 | dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset); | |
5031 | if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) && | |
5032 | bp->blk_birth > ds->ds_dir->dd_origin_txg) { | |
5033 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
5034 | ASSERT(dsl_dir_is_clone(ds->ds_dir)); | |
5035 | ASSERT(spa_feature_is_enabled(spa, | |
5036 | SPA_FEATURE_LIVELIST)); | |
5037 | bplist_append(&ds->ds_dir->dd_pending_frees, | |
5038 | bp); | |
5039 | bplist_append(&ds->ds_dir->dd_pending_allocs, | |
5040 | &bp_copy); | |
5041 | } | |
5042 | } | |
5043 | ||
a1d477c2 | 5044 | /* |
f664f1ee | 5045 | * The db_rwlock prevents dbuf_read_impl() from |
a1d477c2 MA |
5046 | * dereferencing the BP while we are changing it. To |
5047 | * avoid lock contention, only grab it when we are actually | |
5048 | * changing the BP. | |
5049 | */ | |
f664f1ee PD |
5050 | if (rw != NULL) |
5051 | rw_enter(rw, RW_WRITER); | |
a1d477c2 | 5052 | *bp = bp_copy; |
f664f1ee PD |
5053 | if (rw != NULL) |
5054 | rw_exit(rw); | |
a1d477c2 MA |
5055 | } |
5056 | } | |
5057 | ||
a1d477c2 MA |
5058 | /* |
5059 | * Remap any existing BP's to concrete vdevs, if possible. | |
5060 | */ | |
5061 | static void | |
5062 | dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) | |
5063 | { | |
5064 | spa_t *spa = dmu_objset_spa(db->db_objset); | |
5065 | ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); | |
5066 | ||
5067 | if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) | |
5068 | return; | |
5069 | ||
5070 | if (db->db_level > 0) { | |
5071 | blkptr_t *bp = db->db.db_data; | |
5072 | for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { | |
f664f1ee | 5073 | dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx); |
a1d477c2 MA |
5074 | } |
5075 | } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { | |
5076 | dnode_phys_t *dnp = db->db.db_data; | |
5077 | ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, | |
5078 | DMU_OT_DNODE); | |
5079 | for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; | |
5080 | i += dnp[i].dn_extra_slots + 1) { | |
5081 | for (int j = 0; j < dnp[i].dn_nblkptr; j++) { | |
f664f1ee PD |
5082 | krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL : |
5083 | &dn->dn_dbuf->db_rwlock); | |
5084 | dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock, | |
5085 | tx); | |
a1d477c2 MA |
5086 | } |
5087 | } | |
5088 | } | |
5089 | } | |
5090 | ||
5091 | ||
3bd4df38 EN |
5092 | /* |
5093 | * Populate dr->dr_zio with a zio to commit a dirty buffer to disk. | |
5094 | * Caller is responsible for issuing the zio_[no]wait(dr->dr_zio). | |
5095 | */ | |
428870ff BB |
5096 | static void |
5097 | dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) | |
5098 | { | |
5099 | dmu_buf_impl_t *db = dr->dr_dbuf; | |
ba67d821 | 5100 | dnode_t *dn = dr->dr_dnode; |
572e2857 | 5101 | objset_t *os; |
428870ff BB |
5102 | dmu_buf_impl_t *parent = db->db_parent; |
5103 | uint64_t txg = tx->tx_txg; | |
5dbd68a3 | 5104 | zbookmark_phys_t zb; |
428870ff | 5105 | zio_prop_t zp; |
28caa74b | 5106 | zio_t *pio; /* parent I/O */ |
428870ff | 5107 | int wp_flag = 0; |
34dc7c2f | 5108 | |
463a8cfe AR |
5109 | ASSERT(dmu_tx_is_syncing(tx)); |
5110 | ||
572e2857 BB |
5111 | os = dn->dn_objset; |
5112 | ||
428870ff BB |
5113 | if (db->db_state != DB_NOFILL) { |
5114 | if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { | |
5115 | /* | |
5116 | * Private object buffers are released here rather | |
5117 | * than in dbuf_dirty() since they are only modified | |
5118 | * in the syncing context and we don't want the | |
5119 | * overhead of making multiple copies of the data. | |
5120 | */ | |
5121 | if (BP_IS_HOLE(db->db_blkptr)) { | |
5122 | arc_buf_thaw(data); | |
5123 | } else { | |
5124 | dbuf_release_bp(db); | |
5125 | } | |
a1d477c2 | 5126 | dbuf_remap(dn, db, tx); |
428870ff BB |
5127 | } |
5128 | } | |
5129 | ||
5130 | if (parent != dn->dn_dbuf) { | |
e49f1e20 WA |
5131 | /* Our parent is an indirect block. */ |
5132 | /* We have a dirty parent that has been scheduled for write. */ | |
428870ff | 5133 | ASSERT(parent && parent->db_data_pending); |
e49f1e20 | 5134 | /* Our parent's buffer is one level closer to the dnode. */ |
428870ff | 5135 | ASSERT(db->db_level == parent->db_level-1); |
e49f1e20 WA |
5136 | /* |
5137 | * We're about to modify our parent's db_data by modifying | |
5138 | * our block pointer, so the parent must be released. | |
5139 | */ | |
428870ff | 5140 | ASSERT(arc_released(parent->db_buf)); |
28caa74b | 5141 | pio = parent->db_data_pending->dr_zio; |
428870ff | 5142 | } else { |
e49f1e20 | 5143 | /* Our parent is the dnode itself. */ |
428870ff BB |
5144 | ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && |
5145 | db->db_blkid != DMU_SPILL_BLKID) || | |
5146 | (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); | |
5147 | if (db->db_blkid != DMU_SPILL_BLKID) | |
5148 | ASSERT3P(db->db_blkptr, ==, | |
5149 | &dn->dn_phys->dn_blkptr[db->db_blkid]); | |
28caa74b | 5150 | pio = dn->dn_zio; |
428870ff BB |
5151 | } |
5152 | ||
5153 | ASSERT(db->db_level == 0 || data == db->db_buf); | |
5154 | ASSERT3U(db->db_blkptr->blk_birth, <=, txg); | |
28caa74b | 5155 | ASSERT(pio); |
428870ff BB |
5156 | |
5157 | SET_BOOKMARK(&zb, os->os_dsl_dataset ? | |
5158 | os->os_dsl_dataset->ds_object : DMU_META_OBJSET, | |
5159 | db->db.db_object, db->db_level, db->db_blkid); | |
5160 | ||
5161 | if (db->db_blkid == DMU_SPILL_BLKID) | |
5162 | wp_flag = WP_SPILL; | |
5163 | wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; | |
5164 | ||
82644107 | 5165 | dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); |
428870ff | 5166 | |
463a8cfe AR |
5167 | /* |
5168 | * We copy the blkptr now (rather than when we instantiate the dirty | |
5169 | * record), because its value can change between open context and | |
5170 | * syncing context. We do not need to hold dn_struct_rwlock to read | |
5171 | * db_blkptr because we are in syncing context. | |
5172 | */ | |
5173 | dr->dr_bp_copy = *db->db_blkptr; | |
5174 | ||
9b67f605 MA |
5175 | if (db->db_level == 0 && |
5176 | dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { | |
5177 | /* | |
5178 | * The BP for this block has been provided by open context | |
5179 | * (by dmu_sync() or dmu_buf_write_embedded()). | |
5180 | */ | |
a6255b7f DQ |
5181 | abd_t *contents = (data != NULL) ? |
5182 | abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; | |
9b67f605 | 5183 | |
28caa74b MM |
5184 | dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy, |
5185 | contents, db->db.db_size, db->db.db_size, &zp, | |
ccec7fbe | 5186 | dbuf_write_override_ready, NULL, |
bc77ba73 | 5187 | dbuf_write_override_done, |
e8b96c60 | 5188 | dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); |
428870ff BB |
5189 | mutex_enter(&db->db_mtx); |
5190 | dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; | |
5191 | zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, | |
67a1b037 PJD |
5192 | dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite, |
5193 | dr->dt.dl.dr_brtwrite); | |
428870ff BB |
5194 | mutex_exit(&db->db_mtx); |
5195 | } else if (db->db_state == DB_NOFILL) { | |
3c67d83a TH |
5196 | ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || |
5197 | zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); | |
28caa74b | 5198 | dr->dr_zio = zio_write(pio, os->os_spa, txg, |
2aa34383 | 5199 | &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, |
ccec7fbe | 5200 | dbuf_write_nofill_ready, NULL, |
bc77ba73 | 5201 | dbuf_write_nofill_done, db, |
428870ff BB |
5202 | ZIO_PRIORITY_ASYNC_WRITE, |
5203 | ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); | |
5204 | } else { | |
5205 | ASSERT(arc_released(data)); | |
bc77ba73 PD |
5206 | |
5207 | /* | |
5208 | * For indirect blocks, we want to setup the children | |
5209 | * ready callback so that we can properly handle an indirect | |
5210 | * block that only contains holes. | |
5211 | */ | |
1c27024e | 5212 | arc_write_done_func_t *children_ready_cb = NULL; |
bc77ba73 PD |
5213 | if (db->db_level != 0) |
5214 | children_ready_cb = dbuf_write_children_ready; | |
5215 | ||
28caa74b | 5216 | dr->dr_zio = arc_write(pio, os->os_spa, txg, |
ed2f7ba0 AM |
5217 | &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db), |
5218 | dbuf_is_l2cacheable(db), &zp, dbuf_write_ready, | |
ccec7fbe AM |
5219 | children_ready_cb, dbuf_write_done, db, |
5220 | ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); | |
428870ff | 5221 | } |
34dc7c2f | 5222 | } |
c28b2279 | 5223 | |
8f576c23 BB |
5224 | EXPORT_SYMBOL(dbuf_find); |
5225 | EXPORT_SYMBOL(dbuf_is_metadata); | |
d3c2ae1c | 5226 | EXPORT_SYMBOL(dbuf_destroy); |
8f576c23 BB |
5227 | EXPORT_SYMBOL(dbuf_loan_arcbuf); |
5228 | EXPORT_SYMBOL(dbuf_whichblock); | |
5229 | EXPORT_SYMBOL(dbuf_read); | |
5230 | EXPORT_SYMBOL(dbuf_unoverride); | |
5231 | EXPORT_SYMBOL(dbuf_free_range); | |
5232 | EXPORT_SYMBOL(dbuf_new_size); | |
5233 | EXPORT_SYMBOL(dbuf_release_bp); | |
5234 | EXPORT_SYMBOL(dbuf_dirty); | |
0c03d21a | 5235 | EXPORT_SYMBOL(dmu_buf_set_crypt_params); |
c28b2279 | 5236 | EXPORT_SYMBOL(dmu_buf_will_dirty); |
a73e8fdb | 5237 | EXPORT_SYMBOL(dmu_buf_is_dirty); |
555ef90c | 5238 | EXPORT_SYMBOL(dmu_buf_will_clone); |
8f576c23 BB |
5239 | EXPORT_SYMBOL(dmu_buf_will_not_fill); |
5240 | EXPORT_SYMBOL(dmu_buf_will_fill); | |
5241 | EXPORT_SYMBOL(dmu_buf_fill_done); | |
4047414a | 5242 | EXPORT_SYMBOL(dmu_buf_rele); |
8f576c23 | 5243 | EXPORT_SYMBOL(dbuf_assign_arcbuf); |
8f576c23 BB |
5244 | EXPORT_SYMBOL(dbuf_prefetch); |
5245 | EXPORT_SYMBOL(dbuf_hold_impl); | |
5246 | EXPORT_SYMBOL(dbuf_hold); | |
5247 | EXPORT_SYMBOL(dbuf_hold_level); | |
5248 | EXPORT_SYMBOL(dbuf_create_bonus); | |
5249 | EXPORT_SYMBOL(dbuf_spill_set_blksz); | |
5250 | EXPORT_SYMBOL(dbuf_rm_spill); | |
5251 | EXPORT_SYMBOL(dbuf_add_ref); | |
5252 | EXPORT_SYMBOL(dbuf_rele); | |
5253 | EXPORT_SYMBOL(dbuf_rele_and_unlock); | |
5254 | EXPORT_SYMBOL(dbuf_refcount); | |
5255 | EXPORT_SYMBOL(dbuf_sync_list); | |
5256 | EXPORT_SYMBOL(dmu_buf_set_user); | |
5257 | EXPORT_SYMBOL(dmu_buf_set_user_ie); | |
8f576c23 | 5258 | EXPORT_SYMBOL(dmu_buf_get_user); |
0f699108 | 5259 | EXPORT_SYMBOL(dmu_buf_get_blkptr); |
d3c2ae1c | 5260 | |
ab8d9c17 | 5261 | ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW, |
02730c33 | 5262 | "Maximum size in bytes of the dbuf cache."); |
d3c2ae1c | 5263 | |
03fdcb9a | 5264 | ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW, |
7ada752a | 5265 | "Percentage over dbuf_cache_max_bytes for direct dbuf eviction."); |
d3c2ae1c | 5266 | |
03fdcb9a | 5267 | ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW, |
7ada752a | 5268 | "Percentage below dbuf_cache_max_bytes when dbuf eviction stops."); |
d3c2ae1c | 5269 | |
ab8d9c17 | 5270 | ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW, |
7ada752a | 5271 | "Maximum size in bytes of dbuf metadata cache."); |
2e5dc449 | 5272 | |
fdc2d303 | 5273 | ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW, |
7ada752a | 5274 | "Set size of dbuf cache to log2 fraction of arc size."); |
2e5dc449 | 5275 | |
fdc2d303 | 5276 | ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW, |
7ada752a | 5277 | "Set size of dbuf metadata cache to log2 fraction of arc size."); |
505df8d1 BB |
5278 | |
5279 | ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD, | |
5280 | "Set size of dbuf cache mutex array as log2 shift."); |