4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
24 * Copyright 2014 HybridCluster. All rights reserved.
29 #include <sys/dmu_objset.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dnode.h>
33 #include <sys/zfeature.h>
34 #include <sys/dsl_dataset.h>
37 * Each of the concurrent object allocators will grab
38 * 2^dmu_object_alloc_chunk_shift dnode slots at a time. The default is to
39 * grab 128 slots, which is 4 blocks worth. This was experimentally
40 * determined to be the lowest value that eliminates the measurable effect
41 * of lock contention from this code path.
43 int dmu_object_alloc_chunk_shift
= 7;
46 dmu_object_alloc_impl(objset_t
*os
, dmu_object_type_t ot
, int blocksize
,
47 int indirect_blockshift
, dmu_object_type_t bonustype
, int bonuslen
,
48 int dnodesize
, dnode_t
**allocated_dnode
, void *tag
, dmu_tx_t
*tx
)
51 uint64_t L1_dnode_count
= DNODES_PER_BLOCK
<<
52 (DMU_META_DNODE(os
)->dn_indblkshift
- SPA_BLKPTRSHIFT
);
54 int dn_slots
= dnodesize
>> DNODE_SHIFT
;
55 boolean_t restarted
= B_FALSE
;
56 uint64_t *cpuobj
= NULL
;
57 int dnodes_per_chunk
= 1 << dmu_object_alloc_chunk_shift
;
61 cpuobj
= &os
->os_obj_next_percpu
[CPU_SEQID
%
62 os
->os_obj_next_percpu_len
];
66 dn_slots
= DNODE_MIN_SLOTS
;
68 ASSERT3S(dn_slots
, >=, DNODE_MIN_SLOTS
);
69 ASSERT3S(dn_slots
, <=, DNODE_MAX_SLOTS
);
73 * The "chunk" of dnodes that is assigned to a CPU-specific
74 * allocator needs to be at least one block's worth, to avoid
75 * lock contention on the dbuf. It can be at most one L1 block's
76 * worth, so that the "rescan after polishing off a L1's worth"
77 * logic below will be sure to kick in.
79 if (dnodes_per_chunk
< DNODES_PER_BLOCK
)
80 dnodes_per_chunk
= DNODES_PER_BLOCK
;
81 if (dnodes_per_chunk
> L1_dnode_count
)
82 dnodes_per_chunk
= L1_dnode_count
;
85 * The caller requested the dnode be returned as a performance
86 * optimization in order to avoid releasing the hold only to
87 * immediately reacquire it. Since they caller is responsible
88 * for releasing the hold they must provide the tag.
90 if (allocated_dnode
!= NULL
) {
91 ASSERT3P(tag
, !=, NULL
);
93 ASSERT3P(tag
, ==, NULL
);
100 * If we finished a chunk of dnodes, get a new one from
101 * the global allocator.
103 if ((P2PHASE(object
, dnodes_per_chunk
) == 0) ||
104 (P2PHASE(object
+ dn_slots
- 1, dnodes_per_chunk
) <
106 DNODE_STAT_BUMP(dnode_alloc_next_chunk
);
107 mutex_enter(&os
->os_obj_lock
);
108 ASSERT0(P2PHASE(os
->os_obj_next_chunk
,
110 object
= os
->os_obj_next_chunk
;
113 * Each time we polish off a L1 bp worth of dnodes
114 * (2^12 objects), move to another L1 bp that's
115 * still reasonably sparse (at most 1/4 full). Look
116 * from the beginning at most once per txg. If we
117 * still can't allocate from that L1 block, search
118 * for an empty L0 block, which will quickly skip
119 * to the end of the metadnode if no nearby L0
120 * blocks are empty. This fallback avoids a
121 * pathology where full dnode blocks containing
122 * large dnodes appear sparse because they have a
123 * low blk_fill, leading to many failed allocation
124 * attempts. In the long term a better mechanism to
125 * search for sparse metadnode regions, such as
126 * spacemaps, could be implemented.
128 * os_scan_dnodes is set during txg sync if enough
129 * objects have been freed since the previous
130 * rescan to justify backfilling again.
132 * Note that dmu_traverse depends on the behavior
133 * that we use multiple blocks of the dnode object
134 * before going back to reuse objects. Any change
135 * to this algorithm should preserve that property
136 * or find another solution to the issues described
137 * in traverse_visitbp.
139 if (P2PHASE(object
, L1_dnode_count
) == 0) {
143 if (os
->os_rescan_dnodes
) {
145 os
->os_rescan_dnodes
= B_FALSE
;
147 offset
= object
<< DNODE_SHIFT
;
149 blkfill
= restarted
? 1 : DNODES_PER_BLOCK
>> 2;
150 minlvl
= restarted
? 1 : 2;
152 error
= dnode_next_offset(DMU_META_DNODE(os
),
153 DNODE_FIND_HOLE
, &offset
, minlvl
,
156 object
= offset
>> DNODE_SHIFT
;
160 * Note: if "restarted", we may find a L0 that
161 * is not suitably aligned.
163 os
->os_obj_next_chunk
=
164 P2ALIGN(object
, dnodes_per_chunk
) +
166 (void) atomic_swap_64(cpuobj
, object
);
167 mutex_exit(&os
->os_obj_lock
);
171 * The value of (*cpuobj) before adding dn_slots is the object
172 * ID assigned to us. The value afterwards is the object ID
173 * assigned to whoever wants to do an allocation next.
175 object
= atomic_add_64_nv(cpuobj
, dn_slots
) - dn_slots
;
178 * XXX We should check for an i/o error here and return
179 * up to our caller. Actually we should pre-read it in
180 * dmu_tx_assign(), but there is currently no mechanism
183 error
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_FREE
,
186 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
188 * Another thread could have allocated it; check
189 * again now that we have the struct lock.
191 if (dn
->dn_type
== DMU_OT_NONE
) {
192 dnode_allocate(dn
, ot
, blocksize
,
193 indirect_blockshift
, bonustype
,
194 bonuslen
, dn_slots
, tx
);
195 rw_exit(&dn
->dn_struct_rwlock
);
196 dmu_tx_add_new_object(tx
, dn
);
199 * Caller requested the allocated dnode be
200 * returned and is responsible for the hold.
202 if (allocated_dnode
!= NULL
)
203 *allocated_dnode
= dn
;
209 rw_exit(&dn
->dn_struct_rwlock
);
211 DNODE_STAT_BUMP(dnode_alloc_race
);
215 * Skip to next known valid starting point on error. This
216 * is the start of the next block of dnodes.
218 if (dmu_object_next(os
, &object
, B_TRUE
, 0) != 0) {
219 object
= P2ROUNDUP(object
+ 1, DNODES_PER_BLOCK
);
220 DNODE_STAT_BUMP(dnode_alloc_next_block
);
222 (void) atomic_swap_64(cpuobj
, object
);
227 dmu_object_alloc(objset_t
*os
, dmu_object_type_t ot
, int blocksize
,
228 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
230 return dmu_object_alloc_impl(os
, ot
, blocksize
, 0, bonustype
,
231 bonuslen
, 0, NULL
, NULL
, tx
);
235 dmu_object_alloc_ibs(objset_t
*os
, dmu_object_type_t ot
, int blocksize
,
236 int indirect_blockshift
, dmu_object_type_t bonustype
, int bonuslen
,
239 return dmu_object_alloc_impl(os
, ot
, blocksize
, indirect_blockshift
,
240 bonustype
, bonuslen
, 0, NULL
, NULL
, tx
);
244 dmu_object_alloc_dnsize(objset_t
*os
, dmu_object_type_t ot
, int blocksize
,
245 dmu_object_type_t bonustype
, int bonuslen
, int dnodesize
, dmu_tx_t
*tx
)
247 return (dmu_object_alloc_impl(os
, ot
, blocksize
, 0, bonustype
,
248 bonuslen
, dnodesize
, NULL
, NULL
, tx
));
252 * Allocate a new object and return a pointer to the newly allocated dnode
253 * via the allocated_dnode argument. The returned dnode will be held and
254 * the caller is responsible for releasing the hold by calling dnode_rele().
257 dmu_object_alloc_hold(objset_t
*os
, dmu_object_type_t ot
, int blocksize
,
258 int indirect_blockshift
, dmu_object_type_t bonustype
, int bonuslen
,
259 int dnodesize
, dnode_t
**allocated_dnode
, void *tag
, dmu_tx_t
*tx
)
261 return (dmu_object_alloc_impl(os
, ot
, blocksize
, indirect_blockshift
,
262 bonustype
, bonuslen
, dnodesize
, allocated_dnode
, tag
, tx
));
266 dmu_object_claim(objset_t
*os
, uint64_t object
, dmu_object_type_t ot
,
267 int blocksize
, dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
269 return (dmu_object_claim_dnsize(os
, object
, ot
, blocksize
, bonustype
,
274 dmu_object_claim_dnsize(objset_t
*os
, uint64_t object
, dmu_object_type_t ot
,
275 int blocksize
, dmu_object_type_t bonustype
, int bonuslen
,
276 int dnodesize
, dmu_tx_t
*tx
)
279 int dn_slots
= dnodesize
>> DNODE_SHIFT
;
283 dn_slots
= DNODE_MIN_SLOTS
;
284 ASSERT3S(dn_slots
, >=, DNODE_MIN_SLOTS
);
285 ASSERT3S(dn_slots
, <=, DNODE_MAX_SLOTS
);
287 if (object
== DMU_META_DNODE_OBJECT
&& !dmu_tx_private_ok(tx
))
288 return (SET_ERROR(EBADF
));
290 err
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_FREE
, dn_slots
,
295 dnode_allocate(dn
, ot
, blocksize
, 0, bonustype
, bonuslen
, dn_slots
, tx
);
296 dmu_tx_add_new_object(tx
, dn
);
298 dnode_rele(dn
, FTAG
);
304 dmu_object_reclaim(objset_t
*os
, uint64_t object
, dmu_object_type_t ot
,
305 int blocksize
, dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
307 return (dmu_object_reclaim_dnsize(os
, object
, ot
, blocksize
, bonustype
,
308 bonuslen
, DNODE_MIN_SIZE
, B_FALSE
, tx
));
312 dmu_object_reclaim_dnsize(objset_t
*os
, uint64_t object
, dmu_object_type_t ot
,
313 int blocksize
, dmu_object_type_t bonustype
, int bonuslen
, int dnodesize
,
314 boolean_t keep_spill
, dmu_tx_t
*tx
)
317 int dn_slots
= dnodesize
>> DNODE_SHIFT
;
321 dn_slots
= DNODE_MIN_SLOTS
;
323 if (object
== DMU_META_DNODE_OBJECT
)
324 return (SET_ERROR(EBADF
));
326 err
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_ALLOCATED
, 0,
331 dnode_reallocate(dn
, ot
, blocksize
, bonustype
, bonuslen
, dn_slots
,
334 dnode_rele(dn
, FTAG
);
339 dmu_object_rm_spill(objset_t
*os
, uint64_t object
, dmu_tx_t
*tx
)
344 err
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_ALLOCATED
, 0,
349 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
350 if (dn
->dn_phys
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
351 dbuf_rm_spill(dn
, tx
);
352 dnode_rm_spill(dn
, tx
);
354 rw_exit(&dn
->dn_struct_rwlock
);
356 dnode_rele(dn
, FTAG
);
361 dmu_object_free(objset_t
*os
, uint64_t object
, dmu_tx_t
*tx
)
366 ASSERT(object
!= DMU_META_DNODE_OBJECT
|| dmu_tx_private_ok(tx
));
368 err
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_ALLOCATED
, 0,
373 ASSERT(dn
->dn_type
!= DMU_OT_NONE
);
375 * If we don't create this free range, we'll leak indirect blocks when
376 * we get to freeing the dnode in syncing context.
378 dnode_free_range(dn
, 0, DMU_OBJECT_END
, tx
);
380 dnode_rele(dn
, FTAG
);
386 * Return (in *objectp) the next object which is allocated (or a hole)
387 * after *object, taking into account only objects that may have been modified
388 * after the specified txg.
391 dmu_object_next(objset_t
*os
, uint64_t *objectp
, boolean_t hole
, uint64_t txg
)
395 struct dsl_dataset
*ds
= os
->os_dsl_dataset
;
400 } else if (ds
&& dsl_dataset_feature_is_active(ds
,
401 SPA_FEATURE_LARGE_DNODE
)) {
402 uint64_t i
= *objectp
+ 1;
403 uint64_t last_obj
= *objectp
| (DNODES_PER_BLOCK
- 1);
404 dmu_object_info_t doi
;
407 * Scan through the remaining meta dnode block. The contents
408 * of each slot in the block are known so it can be quickly
409 * checked. If the block is exhausted without a match then
410 * hand off to dnode_next_offset() for further scanning.
412 while (i
<= last_obj
) {
413 error
= dmu_object_info(os
, i
, &doi
);
414 if (error
== ENOENT
) {
421 } else if (error
== EEXIST
) {
423 } else if (error
== 0) {
425 i
+= doi
.doi_dnodesize
>> DNODE_SHIFT
;
437 start_obj
= *objectp
+ 1;
440 offset
= start_obj
<< DNODE_SHIFT
;
442 error
= dnode_next_offset(DMU_META_DNODE(os
),
443 (hole
? DNODE_FIND_HOLE
: 0), &offset
, 0, DNODES_PER_BLOCK
, txg
);
445 *objectp
= offset
>> DNODE_SHIFT
;
451 * Turn this object from old_type into DMU_OTN_ZAP_METADATA, and bump the
452 * refcount on SPA_FEATURE_EXTENSIBLE_DATASET.
454 * Only for use from syncing context, on MOS objects.
457 dmu_object_zapify(objset_t
*mos
, uint64_t object
, dmu_object_type_t old_type
,
462 ASSERT(dmu_tx_is_syncing(tx
));
464 VERIFY0(dnode_hold(mos
, object
, FTAG
, &dn
));
465 if (dn
->dn_type
== DMU_OTN_ZAP_METADATA
) {
466 dnode_rele(dn
, FTAG
);
469 ASSERT3U(dn
->dn_type
, ==, old_type
);
470 ASSERT0(dn
->dn_maxblkid
);
473 * We must initialize the ZAP data before changing the type,
474 * so that concurrent calls to *_is_zapified() can determine if
475 * the object has been completely zapified by checking the type.
477 mzap_create_impl(dn
, 0, 0, tx
);
479 dn
->dn_next_type
[tx
->tx_txg
& TXG_MASK
] = dn
->dn_type
=
480 DMU_OTN_ZAP_METADATA
;
481 dnode_setdirty(dn
, tx
);
482 dnode_rele(dn
, FTAG
);
484 spa_feature_incr(dmu_objset_spa(mos
),
485 SPA_FEATURE_EXTENSIBLE_DATASET
, tx
);
489 dmu_object_free_zapified(objset_t
*mos
, uint64_t object
, dmu_tx_t
*tx
)
494 ASSERT(dmu_tx_is_syncing(tx
));
496 VERIFY0(dnode_hold(mos
, object
, FTAG
, &dn
));
498 dnode_rele(dn
, FTAG
);
500 if (t
== DMU_OTN_ZAP_METADATA
) {
501 spa_feature_decr(dmu_objset_spa(mos
),
502 SPA_FEATURE_EXTENSIBLE_DATASET
, tx
);
504 VERIFY0(dmu_object_free(mos
, object
, tx
));
508 EXPORT_SYMBOL(dmu_object_alloc
);
509 EXPORT_SYMBOL(dmu_object_alloc_ibs
);
510 EXPORT_SYMBOL(dmu_object_alloc_dnsize
);
511 EXPORT_SYMBOL(dmu_object_alloc_hold
);
512 EXPORT_SYMBOL(dmu_object_claim
);
513 EXPORT_SYMBOL(dmu_object_claim_dnsize
);
514 EXPORT_SYMBOL(dmu_object_reclaim
);
515 EXPORT_SYMBOL(dmu_object_reclaim_dnsize
);
516 EXPORT_SYMBOL(dmu_object_rm_spill
);
517 EXPORT_SYMBOL(dmu_object_free
);
518 EXPORT_SYMBOL(dmu_object_next
);
519 EXPORT_SYMBOL(dmu_object_zapify
);
520 EXPORT_SYMBOL(dmu_object_free_zapified
);
523 module_param(dmu_object_alloc_chunk_shift
, int, 0644);
524 MODULE_PARM_DESC(dmu_object_alloc_chunk_shift
,
525 "CPU-specific allocator grabs 2^N objects at once");