]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. | |
23 | * Copyright (c) 2011, 2018 by Delphix. All rights reserved. | |
24 | * Copyright 2011 Nexenta Systems, Inc. All rights reserved. | |
25 | * Copyright (c) 2012, Joyent, Inc. All rights reserved. | |
26 | * Copyright 2014 HybridCluster. All rights reserved. | |
27 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. | |
28 | * Copyright 2013 Saso Kiselkov. All rights reserved. | |
29 | * Copyright (c) 2017, Intel Corporation. | |
30 | */ | |
31 | ||
32 | /* Portions Copyright 2010 Robert Milkowski */ | |
33 | ||
34 | #ifndef _SYS_DMU_H | |
35 | #define _SYS_DMU_H | |
36 | ||
37 | /* | |
38 | * This file describes the interface that the DMU provides for its | |
39 | * consumers. | |
40 | * | |
41 | * The DMU also interacts with the SPA. That interface is described in | |
42 | * dmu_spa.h. | |
43 | */ | |
44 | ||
45 | #include <sys/zfs_context.h> | |
46 | #include <sys/inttypes.h> | |
47 | #include <sys/cred.h> | |
48 | #include <sys/fs/zfs.h> | |
49 | #include <sys/zio_compress.h> | |
50 | #include <sys/zio_priority.h> | |
51 | #include <sys/uio.h> | |
52 | ||
53 | #ifdef __cplusplus | |
54 | extern "C" { | |
55 | #endif | |
56 | ||
57 | struct page; | |
58 | struct vnode; | |
59 | struct spa; | |
60 | struct zilog; | |
61 | struct zio; | |
62 | struct blkptr; | |
63 | struct zap_cursor; | |
64 | struct dsl_dataset; | |
65 | struct dsl_pool; | |
66 | struct dnode; | |
67 | struct drr_begin; | |
68 | struct drr_end; | |
69 | struct zbookmark_phys; | |
70 | struct spa; | |
71 | struct nvlist; | |
72 | struct arc_buf; | |
73 | struct zio_prop; | |
74 | struct sa_handle; | |
75 | struct dsl_crypto_params; | |
76 | struct locked_range; | |
77 | ||
78 | typedef struct objset objset_t; | |
79 | typedef struct dmu_tx dmu_tx_t; | |
80 | typedef struct dsl_dir dsl_dir_t; | |
81 | typedef struct dnode dnode_t; | |
82 | ||
83 | typedef enum dmu_object_byteswap { | |
84 | DMU_BSWAP_UINT8, | |
85 | DMU_BSWAP_UINT16, | |
86 | DMU_BSWAP_UINT32, | |
87 | DMU_BSWAP_UINT64, | |
88 | DMU_BSWAP_ZAP, | |
89 | DMU_BSWAP_DNODE, | |
90 | DMU_BSWAP_OBJSET, | |
91 | DMU_BSWAP_ZNODE, | |
92 | DMU_BSWAP_OLDACL, | |
93 | DMU_BSWAP_ACL, | |
94 | /* | |
95 | * Allocating a new byteswap type number makes the on-disk format | |
96 | * incompatible with any other format that uses the same number. | |
97 | * | |
98 | * Data can usually be structured to work with one of the | |
99 | * DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types. | |
100 | */ | |
101 | DMU_BSWAP_NUMFUNCS | |
102 | } dmu_object_byteswap_t; | |
103 | ||
104 | #define DMU_OT_NEWTYPE 0x80 | |
105 | #define DMU_OT_METADATA 0x40 | |
106 | #define DMU_OT_ENCRYPTED 0x20 | |
107 | #define DMU_OT_BYTESWAP_MASK 0x1f | |
108 | ||
109 | /* | |
110 | * Defines a uint8_t object type. Object types specify if the data | |
111 | * in the object is metadata (boolean) and how to byteswap the data | |
112 | * (dmu_object_byteswap_t). All of the types created by this method | |
113 | * are cached in the dbuf metadata cache. | |
114 | */ | |
115 | #define DMU_OT(byteswap, metadata, encrypted) \ | |
116 | (DMU_OT_NEWTYPE | \ | |
117 | ((metadata) ? DMU_OT_METADATA : 0) | \ | |
118 | ((encrypted) ? DMU_OT_ENCRYPTED : 0) | \ | |
119 | ((byteswap) & DMU_OT_BYTESWAP_MASK)) | |
120 | ||
121 | #define DMU_OT_IS_VALID(ot) (((ot) & DMU_OT_NEWTYPE) ? \ | |
122 | ((ot) & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS : \ | |
123 | (ot) < DMU_OT_NUMTYPES) | |
124 | ||
125 | #define DMU_OT_IS_METADATA_CACHED(ot) (((ot) & DMU_OT_NEWTYPE) ? \ | |
126 | B_TRUE : dmu_ot[(ot)].ot_dbuf_metadata_cache) | |
127 | ||
128 | /* | |
129 | * MDB doesn't have dmu_ot; it defines these macros itself. | |
130 | */ | |
131 | #ifndef ZFS_MDB | |
132 | #define DMU_OT_IS_METADATA_IMPL(ot) (dmu_ot[ot].ot_metadata) | |
133 | #define DMU_OT_IS_ENCRYPTED_IMPL(ot) (dmu_ot[ot].ot_encrypt) | |
134 | #define DMU_OT_BYTESWAP_IMPL(ot) (dmu_ot[ot].ot_byteswap) | |
135 | #endif | |
136 | ||
137 | #define DMU_OT_IS_METADATA(ot) (((ot) & DMU_OT_NEWTYPE) ? \ | |
138 | ((ot) & DMU_OT_METADATA) : \ | |
139 | DMU_OT_IS_METADATA_IMPL(ot)) | |
140 | ||
141 | #define DMU_OT_IS_DDT(ot) \ | |
142 | ((ot) == DMU_OT_DDT_ZAP) | |
143 | ||
144 | #define DMU_OT_IS_ZIL(ot) \ | |
145 | ((ot) == DMU_OT_INTENT_LOG) | |
146 | ||
147 | /* Note: ztest uses DMU_OT_UINT64_OTHER as a proxy for file blocks */ | |
148 | #define DMU_OT_IS_FILE(ot) \ | |
149 | ((ot) == DMU_OT_PLAIN_FILE_CONTENTS || (ot) == DMU_OT_UINT64_OTHER) | |
150 | ||
151 | #define DMU_OT_IS_ENCRYPTED(ot) (((ot) & DMU_OT_NEWTYPE) ? \ | |
152 | ((ot) & DMU_OT_ENCRYPTED) : \ | |
153 | DMU_OT_IS_ENCRYPTED_IMPL(ot)) | |
154 | ||
155 | /* | |
156 | * These object types use bp_fill != 1 for their L0 bp's. Therefore they can't | |
157 | * have their data embedded (i.e. use a BP_IS_EMBEDDED() bp), because bp_fill | |
158 | * is repurposed for embedded BPs. | |
159 | */ | |
160 | #define DMU_OT_HAS_FILL(ot) \ | |
161 | ((ot) == DMU_OT_DNODE || (ot) == DMU_OT_OBJSET) | |
162 | ||
163 | #define DMU_OT_BYTESWAP(ot) (((ot) & DMU_OT_NEWTYPE) ? \ | |
164 | ((ot) & DMU_OT_BYTESWAP_MASK) : \ | |
165 | DMU_OT_BYTESWAP_IMPL(ot)) | |
166 | ||
167 | typedef enum dmu_object_type { | |
168 | DMU_OT_NONE, | |
169 | /* general: */ | |
170 | DMU_OT_OBJECT_DIRECTORY, /* ZAP */ | |
171 | DMU_OT_OBJECT_ARRAY, /* UINT64 */ | |
172 | DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */ | |
173 | DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */ | |
174 | DMU_OT_BPOBJ, /* UINT64 */ | |
175 | DMU_OT_BPOBJ_HDR, /* UINT64 */ | |
176 | /* spa: */ | |
177 | DMU_OT_SPACE_MAP_HEADER, /* UINT64 */ | |
178 | DMU_OT_SPACE_MAP, /* UINT64 */ | |
179 | /* zil: */ | |
180 | DMU_OT_INTENT_LOG, /* UINT64 */ | |
181 | /* dmu: */ | |
182 | DMU_OT_DNODE, /* DNODE */ | |
183 | DMU_OT_OBJSET, /* OBJSET */ | |
184 | /* dsl: */ | |
185 | DMU_OT_DSL_DIR, /* UINT64 */ | |
186 | DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */ | |
187 | DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */ | |
188 | DMU_OT_DSL_PROPS, /* ZAP */ | |
189 | DMU_OT_DSL_DATASET, /* UINT64 */ | |
190 | /* zpl: */ | |
191 | DMU_OT_ZNODE, /* ZNODE */ | |
192 | DMU_OT_OLDACL, /* Old ACL */ | |
193 | DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */ | |
194 | DMU_OT_DIRECTORY_CONTENTS, /* ZAP */ | |
195 | DMU_OT_MASTER_NODE, /* ZAP */ | |
196 | DMU_OT_UNLINKED_SET, /* ZAP */ | |
197 | /* zvol: */ | |
198 | DMU_OT_ZVOL, /* UINT8 */ | |
199 | DMU_OT_ZVOL_PROP, /* ZAP */ | |
200 | /* other; for testing only! */ | |
201 | DMU_OT_PLAIN_OTHER, /* UINT8 */ | |
202 | DMU_OT_UINT64_OTHER, /* UINT64 */ | |
203 | DMU_OT_ZAP_OTHER, /* ZAP */ | |
204 | /* new object types: */ | |
205 | DMU_OT_ERROR_LOG, /* ZAP */ | |
206 | DMU_OT_SPA_HISTORY, /* UINT8 */ | |
207 | DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */ | |
208 | DMU_OT_POOL_PROPS, /* ZAP */ | |
209 | DMU_OT_DSL_PERMS, /* ZAP */ | |
210 | DMU_OT_ACL, /* ACL */ | |
211 | DMU_OT_SYSACL, /* SYSACL */ | |
212 | DMU_OT_FUID, /* FUID table (Packed NVLIST UINT8) */ | |
213 | DMU_OT_FUID_SIZE, /* FUID table size UINT64 */ | |
214 | DMU_OT_NEXT_CLONES, /* ZAP */ | |
215 | DMU_OT_SCAN_QUEUE, /* ZAP */ | |
216 | DMU_OT_USERGROUP_USED, /* ZAP */ | |
217 | DMU_OT_USERGROUP_QUOTA, /* ZAP */ | |
218 | DMU_OT_USERREFS, /* ZAP */ | |
219 | DMU_OT_DDT_ZAP, /* ZAP */ | |
220 | DMU_OT_DDT_STATS, /* ZAP */ | |
221 | DMU_OT_SA, /* System attr */ | |
222 | DMU_OT_SA_MASTER_NODE, /* ZAP */ | |
223 | DMU_OT_SA_ATTR_REGISTRATION, /* ZAP */ | |
224 | DMU_OT_SA_ATTR_LAYOUTS, /* ZAP */ | |
225 | DMU_OT_SCAN_XLATE, /* ZAP */ | |
226 | DMU_OT_DEDUP, /* fake dedup BP from ddt_bp_create() */ | |
227 | DMU_OT_DEADLIST, /* ZAP */ | |
228 | DMU_OT_DEADLIST_HDR, /* UINT64 */ | |
229 | DMU_OT_DSL_CLONES, /* ZAP */ | |
230 | DMU_OT_BPOBJ_SUBOBJ, /* UINT64 */ | |
231 | /* | |
232 | * Do not allocate new object types here. Doing so makes the on-disk | |
233 | * format incompatible with any other format that uses the same object | |
234 | * type number. | |
235 | * | |
236 | * When creating an object which does not have one of the above types | |
237 | * use the DMU_OTN_* type with the correct byteswap and metadata | |
238 | * values. | |
239 | * | |
240 | * The DMU_OTN_* types do not have entries in the dmu_ot table, | |
241 | * use the DMU_OT_IS_METADATA() and DMU_OT_BYTESWAP() macros instead | |
242 | * of indexing into dmu_ot directly (this works for both DMU_OT_* types | |
243 | * and DMU_OTN_* types). | |
244 | */ | |
245 | DMU_OT_NUMTYPES, | |
246 | ||
247 | /* | |
248 | * Names for valid types declared with DMU_OT(). | |
249 | */ | |
250 | DMU_OTN_UINT8_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE, B_FALSE), | |
251 | DMU_OTN_UINT8_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE, B_FALSE), | |
252 | DMU_OTN_UINT16_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE, B_FALSE), | |
253 | DMU_OTN_UINT16_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE, B_FALSE), | |
254 | DMU_OTN_UINT32_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE, B_FALSE), | |
255 | DMU_OTN_UINT32_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE, B_FALSE), | |
256 | DMU_OTN_UINT64_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE, B_FALSE), | |
257 | DMU_OTN_UINT64_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE, B_FALSE), | |
258 | DMU_OTN_ZAP_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE, B_FALSE), | |
259 | DMU_OTN_ZAP_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE, B_FALSE), | |
260 | ||
261 | DMU_OTN_UINT8_ENC_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE, B_TRUE), | |
262 | DMU_OTN_UINT8_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE, B_TRUE), | |
263 | DMU_OTN_UINT16_ENC_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE, B_TRUE), | |
264 | DMU_OTN_UINT16_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE, B_TRUE), | |
265 | DMU_OTN_UINT32_ENC_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE, B_TRUE), | |
266 | DMU_OTN_UINT32_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE, B_TRUE), | |
267 | DMU_OTN_UINT64_ENC_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE, B_TRUE), | |
268 | DMU_OTN_UINT64_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE, B_TRUE), | |
269 | DMU_OTN_ZAP_ENC_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE, B_TRUE), | |
270 | DMU_OTN_ZAP_ENC_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE, B_TRUE), | |
271 | } dmu_object_type_t; | |
272 | ||
273 | /* | |
274 | * These flags are intended to be used to specify the "txg_how" | |
275 | * parameter when calling the dmu_tx_assign() function. See the comment | |
276 | * above dmu_tx_assign() for more details on the meaning of these flags. | |
277 | */ | |
278 | #define TXG_NOWAIT (0ULL) | |
279 | #define TXG_WAIT (1ULL<<0) | |
280 | #define TXG_NOTHROTTLE (1ULL<<1) | |
281 | ||
282 | void byteswap_uint64_array(void *buf, size_t size); | |
283 | void byteswap_uint32_array(void *buf, size_t size); | |
284 | void byteswap_uint16_array(void *buf, size_t size); | |
285 | void byteswap_uint8_array(void *buf, size_t size); | |
286 | void zap_byteswap(void *buf, size_t size); | |
287 | void zfs_oldacl_byteswap(void *buf, size_t size); | |
288 | void zfs_acl_byteswap(void *buf, size_t size); | |
289 | void zfs_znode_byteswap(void *buf, size_t size); | |
290 | ||
291 | #define DS_FIND_SNAPSHOTS (1<<0) | |
292 | #define DS_FIND_CHILDREN (1<<1) | |
293 | #define DS_FIND_SERIALIZE (1<<2) | |
294 | ||
295 | /* | |
296 | * The maximum number of bytes that can be accessed as part of one | |
297 | * operation, including metadata. | |
298 | */ | |
299 | #define DMU_MAX_ACCESS (64 * 1024 * 1024) /* 64MB */ | |
300 | #define DMU_MAX_DELETEBLKCNT (20480) /* ~5MB of indirect blocks */ | |
301 | ||
302 | #define DMU_USERUSED_OBJECT (-1ULL) | |
303 | #define DMU_GROUPUSED_OBJECT (-2ULL) | |
304 | #define DMU_PROJECTUSED_OBJECT (-3ULL) | |
305 | ||
306 | /* | |
307 | * Zap prefix for object accounting in DMU_{USER,GROUP,PROJECT}USED_OBJECT. | |
308 | */ | |
309 | #define DMU_OBJACCT_PREFIX "obj-" | |
310 | #define DMU_OBJACCT_PREFIX_LEN 4 | |
311 | ||
312 | /* | |
313 | * artificial blkids for bonus buffer and spill blocks | |
314 | */ | |
315 | #define DMU_BONUS_BLKID (-1ULL) | |
316 | #define DMU_SPILL_BLKID (-2ULL) | |
317 | ||
318 | /* | |
319 | * Public routines to create, destroy, open, and close objsets. | |
320 | */ | |
321 | typedef void dmu_objset_create_sync_func_t(objset_t *os, void *arg, | |
322 | cred_t *cr, dmu_tx_t *tx); | |
323 | ||
324 | int dmu_objset_hold(const char *name, void *tag, objset_t **osp); | |
325 | int dmu_objset_own(const char *name, dmu_objset_type_t type, | |
326 | boolean_t readonly, boolean_t key_required, void *tag, objset_t **osp); | |
327 | void dmu_objset_rele(objset_t *os, void *tag); | |
328 | void dmu_objset_disown(objset_t *os, boolean_t key_required, void *tag); | |
329 | int dmu_objset_open_ds(struct dsl_dataset *ds, objset_t **osp); | |
330 | ||
331 | void dmu_objset_evict_dbufs(objset_t *os); | |
332 | int dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, | |
333 | struct dsl_crypto_params *dcp, dmu_objset_create_sync_func_t func, | |
334 | void *arg); | |
335 | int dmu_objset_clone(const char *name, const char *origin); | |
336 | int dsl_destroy_snapshots_nvl(struct nvlist *snaps, boolean_t defer, | |
337 | struct nvlist *errlist); | |
338 | int dmu_objset_snapshot_one(const char *fsname, const char *snapname); | |
339 | int dmu_objset_snapshot_tmp(const char *, const char *, int); | |
340 | int dmu_objset_find(char *name, int func(const char *, void *), void *arg, | |
341 | int flags); | |
342 | void dmu_objset_byteswap(void *buf, size_t size); | |
343 | int dsl_dataset_rename_snapshot(const char *fsname, | |
344 | const char *oldsnapname, const char *newsnapname, boolean_t recursive); | |
345 | int dmu_objset_remap_indirects(const char *fsname); | |
346 | ||
347 | typedef struct dmu_buf { | |
348 | uint64_t db_object; /* object that this buffer is part of */ | |
349 | uint64_t db_offset; /* byte offset in this object */ | |
350 | uint64_t db_size; /* size of buffer in bytes */ | |
351 | void *db_data; /* data in buffer */ | |
352 | } dmu_buf_t; | |
353 | ||
354 | /* | |
355 | * The names of zap entries in the DIRECTORY_OBJECT of the MOS. | |
356 | */ | |
357 | #define DMU_POOL_DIRECTORY_OBJECT 1 | |
358 | #define DMU_POOL_CONFIG "config" | |
359 | #define DMU_POOL_FEATURES_FOR_WRITE "features_for_write" | |
360 | #define DMU_POOL_FEATURES_FOR_READ "features_for_read" | |
361 | #define DMU_POOL_FEATURE_DESCRIPTIONS "feature_descriptions" | |
362 | #define DMU_POOL_FEATURE_ENABLED_TXG "feature_enabled_txg" | |
363 | #define DMU_POOL_ROOT_DATASET "root_dataset" | |
364 | #define DMU_POOL_SYNC_BPOBJ "sync_bplist" | |
365 | #define DMU_POOL_ERRLOG_SCRUB "errlog_scrub" | |
366 | #define DMU_POOL_ERRLOG_LAST "errlog_last" | |
367 | #define DMU_POOL_SPARES "spares" | |
368 | #define DMU_POOL_DEFLATE "deflate" | |
369 | #define DMU_POOL_HISTORY "history" | |
370 | #define DMU_POOL_PROPS "pool_props" | |
371 | #define DMU_POOL_L2CACHE "l2cache" | |
372 | #define DMU_POOL_TMP_USERREFS "tmp_userrefs" | |
373 | #define DMU_POOL_DDT "DDT-%s-%s-%s" | |
374 | #define DMU_POOL_DDT_STATS "DDT-statistics" | |
375 | #define DMU_POOL_CREATION_VERSION "creation_version" | |
376 | #define DMU_POOL_SCAN "scan" | |
377 | #define DMU_POOL_FREE_BPOBJ "free_bpobj" | |
378 | #define DMU_POOL_BPTREE_OBJ "bptree_obj" | |
379 | #define DMU_POOL_EMPTY_BPOBJ "empty_bpobj" | |
380 | #define DMU_POOL_CHECKSUM_SALT "org.illumos:checksum_salt" | |
381 | #define DMU_POOL_VDEV_ZAP_MAP "com.delphix:vdev_zap_map" | |
382 | #define DMU_POOL_REMOVING "com.delphix:removing" | |
383 | #define DMU_POOL_OBSOLETE_BPOBJ "com.delphix:obsolete_bpobj" | |
384 | #define DMU_POOL_CONDENSING_INDIRECT "com.delphix:condensing_indirect" | |
385 | #define DMU_POOL_ZPOOL_CHECKPOINT "com.delphix:zpool_checkpoint" | |
386 | ||
387 | /* | |
388 | * Allocate an object from this objset. The range of object numbers | |
389 | * available is (0, DN_MAX_OBJECT). Object 0 is the meta-dnode. | |
390 | * | |
391 | * The transaction must be assigned to a txg. The newly allocated | |
392 | * object will be "held" in the transaction (ie. you can modify the | |
393 | * newly allocated object in this transaction). | |
394 | * | |
395 | * dmu_object_alloc() chooses an object and returns it in *objectp. | |
396 | * | |
397 | * dmu_object_claim() allocates a specific object number. If that | |
398 | * number is already allocated, it fails and returns EEXIST. | |
399 | * | |
400 | * Return 0 on success, or ENOSPC or EEXIST as specified above. | |
401 | */ | |
402 | uint64_t dmu_object_alloc(objset_t *os, dmu_object_type_t ot, | |
403 | int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); | |
404 | uint64_t dmu_object_alloc_ibs(objset_t *os, dmu_object_type_t ot, int blocksize, | |
405 | int indirect_blockshift, | |
406 | dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); | |
407 | uint64_t dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, | |
408 | int blocksize, dmu_object_type_t bonus_type, int bonus_len, | |
409 | int dnodesize, dmu_tx_t *tx); | |
410 | uint64_t dmu_object_alloc_hold(objset_t *os, dmu_object_type_t ot, | |
411 | int blocksize, int indirect_blockshift, dmu_object_type_t bonustype, | |
412 | int bonuslen, int dnodesize, dnode_t **allocated_dnode, void *tag, | |
413 | dmu_tx_t *tx); | |
414 | int dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot, | |
415 | int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); | |
416 | int dmu_object_claim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot, | |
417 | int blocksize, dmu_object_type_t bonus_type, int bonus_len, | |
418 | int dnodesize, dmu_tx_t *tx); | |
419 | int dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot, | |
420 | int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *txp); | |
421 | int dmu_object_reclaim_dnsize(objset_t *os, uint64_t object, | |
422 | dmu_object_type_t ot, int blocksize, dmu_object_type_t bonustype, | |
423 | int bonuslen, int dnodesize, dmu_tx_t *txp); | |
424 | ||
425 | /* | |
426 | * Free an object from this objset. | |
427 | * | |
428 | * The object's data will be freed as well (ie. you don't need to call | |
429 | * dmu_free(object, 0, -1, tx)). | |
430 | * | |
431 | * The object need not be held in the transaction. | |
432 | * | |
433 | * If there are any holds on this object's buffers (via dmu_buf_hold()), | |
434 | * or tx holds on the object (via dmu_tx_hold_object()), you can not | |
435 | * free it; it fails and returns EBUSY. | |
436 | * | |
437 | * If the object is not allocated, it fails and returns ENOENT. | |
438 | * | |
439 | * Return 0 on success, or EBUSY or ENOENT as specified above. | |
440 | */ | |
441 | int dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx); | |
442 | ||
443 | /* | |
444 | * Find the next allocated or free object. | |
445 | * | |
446 | * The objectp parameter is in-out. It will be updated to be the next | |
447 | * object which is allocated. Ignore objects which have not been | |
448 | * modified since txg. | |
449 | * | |
450 | * XXX Can only be called on a objset with no dirty data. | |
451 | * | |
452 | * Returns 0 on success, or ENOENT if there are no more objects. | |
453 | */ | |
454 | int dmu_object_next(objset_t *os, uint64_t *objectp, | |
455 | boolean_t hole, uint64_t txg); | |
456 | ||
457 | /* | |
458 | * Set the number of levels on a dnode. nlevels must be greater than the | |
459 | * current number of levels or an EINVAL will be returned. | |
460 | */ | |
461 | int dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, | |
462 | dmu_tx_t *tx); | |
463 | ||
464 | /* | |
465 | * Set the data blocksize for an object. | |
466 | * | |
467 | * The object cannot have any blocks allcated beyond the first. If | |
468 | * the first block is allocated already, the new size must be greater | |
469 | * than the current block size. If these conditions are not met, | |
470 | * ENOTSUP will be returned. | |
471 | * | |
472 | * Returns 0 on success, or EBUSY if there are any holds on the object | |
473 | * contents, or ENOTSUP as described above. | |
474 | */ | |
475 | int dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, | |
476 | int ibs, dmu_tx_t *tx); | |
477 | ||
478 | /* | |
479 | * Manually set the maxblkid on a dnode. This will adjust nlevels accordingly | |
480 | * to accommodate the change. | |
481 | */ | |
482 | int dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid, | |
483 | dmu_tx_t *tx); | |
484 | ||
485 | /* | |
486 | * Set the checksum property on a dnode. The new checksum algorithm will | |
487 | * apply to all newly written blocks; existing blocks will not be affected. | |
488 | */ | |
489 | void dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, | |
490 | dmu_tx_t *tx); | |
491 | ||
492 | /* | |
493 | * Set the compress property on a dnode. The new compression algorithm will | |
494 | * apply to all newly written blocks; existing blocks will not be affected. | |
495 | */ | |
496 | void dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, | |
497 | dmu_tx_t *tx); | |
498 | ||
499 | ||
500 | int dmu_object_remap_indirects(objset_t *os, uint64_t object, uint64_t txg); | |
501 | ||
502 | void dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, | |
503 | void *data, uint8_t etype, uint8_t comp, int uncompressed_size, | |
504 | int compressed_size, int byteorder, dmu_tx_t *tx); | |
505 | ||
506 | /* | |
507 | * Decide how to write a block: checksum, compression, number of copies, etc. | |
508 | */ | |
509 | #define WP_NOFILL 0x1 | |
510 | #define WP_DMU_SYNC 0x2 | |
511 | #define WP_SPILL 0x4 | |
512 | ||
513 | void dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, | |
514 | struct zio_prop *zp); | |
515 | ||
516 | /* | |
517 | * The bonus data is accessed more or less like a regular buffer. | |
518 | * You must dmu_bonus_hold() to get the buffer, which will give you a | |
519 | * dmu_buf_t with db_offset==-1ULL, and db_size = the size of the bonus | |
520 | * data. As with any normal buffer, you must call dmu_buf_will_dirty() | |
521 | * before modifying it, and the | |
522 | * object must be held in an assigned transaction before calling | |
523 | * dmu_buf_will_dirty. You may use dmu_buf_set_user() on the bonus | |
524 | * buffer as well. You must release what you hold with dmu_buf_rele(). | |
525 | * | |
526 | * Returns ENOENT, EIO, or 0. | |
527 | */ | |
528 | int dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp); | |
529 | int dmu_bonus_hold_by_dnode(dnode_t *dn, void *tag, dmu_buf_t **dbp, | |
530 | uint32_t flags); | |
531 | int dmu_bonus_max(void); | |
532 | int dmu_set_bonus(dmu_buf_t *, int, dmu_tx_t *); | |
533 | int dmu_set_bonustype(dmu_buf_t *, dmu_object_type_t, dmu_tx_t *); | |
534 | dmu_object_type_t dmu_get_bonustype(dmu_buf_t *); | |
535 | int dmu_rm_spill(objset_t *, uint64_t, dmu_tx_t *); | |
536 | ||
537 | /* | |
538 | * Special spill buffer support used by "SA" framework | |
539 | */ | |
540 | ||
541 | int dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, void *tag, | |
542 | dmu_buf_t **dbp); | |
543 | int dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, | |
544 | void *tag, dmu_buf_t **dbp); | |
545 | int dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp); | |
546 | ||
547 | /* | |
548 | * Obtain the DMU buffer from the specified object which contains the | |
549 | * specified offset. dmu_buf_hold() puts a "hold" on the buffer, so | |
550 | * that it will remain in memory. You must release the hold with | |
551 | * dmu_buf_rele(). You must not access the dmu_buf_t after releasing | |
552 | * what you hold. You must have a hold on any dmu_buf_t* you pass to the DMU. | |
553 | * | |
554 | * You must call dmu_buf_read, dmu_buf_will_dirty, or dmu_buf_will_fill | |
555 | * on the returned buffer before reading or writing the buffer's | |
556 | * db_data. The comments for those routines describe what particular | |
557 | * operations are valid after calling them. | |
558 | * | |
559 | * The object number must be a valid, allocated object number. | |
560 | */ | |
561 | int dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, | |
562 | void *tag, dmu_buf_t **, int flags); | |
563 | int dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset, | |
564 | void *tag, dmu_buf_t **dbp, int flags); | |
565 | ||
566 | /* | |
567 | * Add a reference to a dmu buffer that has already been held via | |
568 | * dmu_buf_hold() in the current context. | |
569 | */ | |
570 | void dmu_buf_add_ref(dmu_buf_t *db, void* tag); | |
571 | ||
572 | /* | |
573 | * Attempt to add a reference to a dmu buffer that is in an unknown state, | |
574 | * using a pointer that may have been invalidated by eviction processing. | |
575 | * The request will succeed if the passed in dbuf still represents the | |
576 | * same os/object/blkid, is ineligible for eviction, and has at least | |
577 | * one hold by a user other than the syncer. | |
578 | */ | |
579 | boolean_t dmu_buf_try_add_ref(dmu_buf_t *, objset_t *os, uint64_t object, | |
580 | uint64_t blkid, void *tag); | |
581 | ||
582 | void dmu_buf_rele(dmu_buf_t *db, void *tag); | |
583 | uint64_t dmu_buf_refcount(dmu_buf_t *db); | |
584 | uint64_t dmu_buf_user_refcount(dmu_buf_t *db); | |
585 | ||
586 | /* | |
587 | * dmu_buf_hold_array holds the DMU buffers which contain all bytes in a | |
588 | * range of an object. A pointer to an array of dmu_buf_t*'s is | |
589 | * returned (in *dbpp). | |
590 | * | |
591 | * dmu_buf_rele_array releases the hold on an array of dmu_buf_t*'s, and | |
592 | * frees the array. The hold on the array of buffers MUST be released | |
593 | * with dmu_buf_rele_array. You can NOT release the hold on each buffer | |
594 | * individually with dmu_buf_rele. | |
595 | */ | |
596 | int dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset, | |
597 | uint64_t length, boolean_t read, void *tag, | |
598 | int *numbufsp, dmu_buf_t ***dbpp); | |
599 | void dmu_buf_rele_array(dmu_buf_t **, int numbufs, void *tag); | |
600 | ||
601 | typedef void dmu_buf_evict_func_t(void *user_ptr); | |
602 | ||
603 | /* | |
604 | * A DMU buffer user object may be associated with a dbuf for the | |
605 | * duration of its lifetime. This allows the user of a dbuf (client) | |
606 | * to attach private data to a dbuf (e.g. in-core only data such as a | |
607 | * dnode_children_t, zap_t, or zap_leaf_t) and be optionally notified | |
608 | * when that dbuf has been evicted. Clients typically respond to the | |
609 | * eviction notification by freeing their private data, thus ensuring | |
610 | * the same lifetime for both dbuf and private data. | |
611 | * | |
612 | * The mapping from a dmu_buf_user_t to any client private data is the | |
613 | * client's responsibility. All current consumers of the API with private | |
614 | * data embed a dmu_buf_user_t as the first member of the structure for | |
615 | * their private data. This allows conversions between the two types | |
616 | * with a simple cast. Since the DMU buf user API never needs access | |
617 | * to the private data, other strategies can be employed if necessary | |
618 | * or convenient for the client (e.g. using container_of() to do the | |
619 | * conversion for private data that cannot have the dmu_buf_user_t as | |
620 | * its first member). | |
621 | * | |
622 | * Eviction callbacks are executed without the dbuf mutex held or any | |
623 | * other type of mechanism to guarantee that the dbuf is still available. | |
624 | * For this reason, users must assume the dbuf has already been freed | |
625 | * and not reference the dbuf from the callback context. | |
626 | * | |
627 | * Users requesting "immediate eviction" are notified as soon as the dbuf | |
628 | * is only referenced by dirty records (dirties == holds). Otherwise the | |
629 | * notification occurs after eviction processing for the dbuf begins. | |
630 | */ | |
631 | typedef struct dmu_buf_user { | |
632 | /* | |
633 | * Asynchronous user eviction callback state. | |
634 | */ | |
635 | taskq_ent_t dbu_tqent; | |
636 | ||
637 | /* | |
638 | * This instance's eviction function pointers. | |
639 | * | |
640 | * dbu_evict_func_sync is called synchronously and then | |
641 | * dbu_evict_func_async is executed asynchronously on a taskq. | |
642 | */ | |
643 | dmu_buf_evict_func_t *dbu_evict_func_sync; | |
644 | dmu_buf_evict_func_t *dbu_evict_func_async; | |
645 | #ifdef ZFS_DEBUG | |
646 | /* | |
647 | * Pointer to user's dbuf pointer. NULL for clients that do | |
648 | * not associate a dbuf with their user data. | |
649 | * | |
650 | * The dbuf pointer is cleared upon eviction so as to catch | |
651 | * use-after-evict bugs in clients. | |
652 | */ | |
653 | dmu_buf_t **dbu_clear_on_evict_dbufp; | |
654 | #endif | |
655 | } dmu_buf_user_t; | |
656 | ||
657 | /* | |
658 | * Initialize the given dmu_buf_user_t instance with the eviction function | |
659 | * evict_func, to be called when the user is evicted. | |
660 | * | |
661 | * NOTE: This function should only be called once on a given dmu_buf_user_t. | |
662 | * To allow enforcement of this, dbu must already be zeroed on entry. | |
663 | */ | |
664 | /*ARGSUSED*/ | |
665 | static inline void | |
666 | dmu_buf_init_user(dmu_buf_user_t *dbu, dmu_buf_evict_func_t *evict_func_sync, | |
667 | dmu_buf_evict_func_t *evict_func_async, dmu_buf_t **clear_on_evict_dbufp) | |
668 | { | |
669 | ASSERT(dbu->dbu_evict_func_sync == NULL); | |
670 | ASSERT(dbu->dbu_evict_func_async == NULL); | |
671 | ||
672 | /* must have at least one evict func */ | |
673 | IMPLY(evict_func_sync == NULL, evict_func_async != NULL); | |
674 | dbu->dbu_evict_func_sync = evict_func_sync; | |
675 | dbu->dbu_evict_func_async = evict_func_async; | |
676 | taskq_init_ent(&dbu->dbu_tqent); | |
677 | #ifdef ZFS_DEBUG | |
678 | dbu->dbu_clear_on_evict_dbufp = clear_on_evict_dbufp; | |
679 | #endif | |
680 | } | |
681 | ||
682 | /* | |
683 | * Attach user data to a dbuf and mark it for normal (when the dbuf's | |
684 | * data is cleared or its reference count goes to zero) eviction processing. | |
685 | * | |
686 | * Returns NULL on success, or the existing user if another user currently | |
687 | * owns the buffer. | |
688 | */ | |
689 | void *dmu_buf_set_user(dmu_buf_t *db, dmu_buf_user_t *user); | |
690 | ||
691 | /* | |
692 | * Attach user data to a dbuf and mark it for immediate (its dirty and | |
693 | * reference counts are equal) eviction processing. | |
694 | * | |
695 | * Returns NULL on success, or the existing user if another user currently | |
696 | * owns the buffer. | |
697 | */ | |
698 | void *dmu_buf_set_user_ie(dmu_buf_t *db, dmu_buf_user_t *user); | |
699 | ||
700 | /* | |
701 | * Replace the current user of a dbuf. | |
702 | * | |
703 | * If given the current user of a dbuf, replaces the dbuf's user with | |
704 | * "new_user" and returns the user data pointer that was replaced. | |
705 | * Otherwise returns the current, and unmodified, dbuf user pointer. | |
706 | */ | |
707 | void *dmu_buf_replace_user(dmu_buf_t *db, | |
708 | dmu_buf_user_t *old_user, dmu_buf_user_t *new_user); | |
709 | ||
710 | /* | |
711 | * Remove the specified user data for a DMU buffer. | |
712 | * | |
713 | * Returns the user that was removed on success, or the current user if | |
714 | * another user currently owns the buffer. | |
715 | */ | |
716 | void *dmu_buf_remove_user(dmu_buf_t *db, dmu_buf_user_t *user); | |
717 | ||
718 | /* | |
719 | * Returns the user data (dmu_buf_user_t *) associated with this dbuf. | |
720 | */ | |
721 | void *dmu_buf_get_user(dmu_buf_t *db); | |
722 | ||
723 | objset_t *dmu_buf_get_objset(dmu_buf_t *db); | |
724 | dnode_t *dmu_buf_dnode_enter(dmu_buf_t *db); | |
725 | void dmu_buf_dnode_exit(dmu_buf_t *db); | |
726 | ||
727 | /* Block until any in-progress dmu buf user evictions complete. */ | |
728 | void dmu_buf_user_evict_wait(void); | |
729 | ||
730 | /* | |
731 | * Returns the blkptr associated with this dbuf, or NULL if not set. | |
732 | */ | |
733 | struct blkptr *dmu_buf_get_blkptr(dmu_buf_t *db); | |
734 | ||
735 | /* | |
736 | * Indicate that you are going to modify the buffer's data (db_data). | |
737 | * | |
738 | * The transaction (tx) must be assigned to a txg (ie. you've called | |
739 | * dmu_tx_assign()). The buffer's object must be held in the tx | |
740 | * (ie. you've called dmu_tx_hold_object(tx, db->db_object)). | |
741 | */ | |
742 | void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx); | |
743 | void dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder, | |
744 | const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx); | |
745 | ||
746 | /* | |
747 | * You must create a transaction, then hold the objects which you will | |
748 | * (or might) modify as part of this transaction. Then you must assign | |
749 | * the transaction to a transaction group. Once the transaction has | |
750 | * been assigned, you can modify buffers which belong to held objects as | |
751 | * part of this transaction. You can't modify buffers before the | |
752 | * transaction has been assigned; you can't modify buffers which don't | |
753 | * belong to objects which this transaction holds; you can't hold | |
754 | * objects once the transaction has been assigned. You may hold an | |
755 | * object which you are going to free (with dmu_object_free()), but you | |
756 | * don't have to. | |
757 | * | |
758 | * You can abort the transaction before it has been assigned. | |
759 | * | |
760 | * Note that you may hold buffers (with dmu_buf_hold) at any time, | |
761 | * regardless of transaction state. | |
762 | */ | |
763 | ||
764 | #define DMU_NEW_OBJECT (-1ULL) | |
765 | #define DMU_OBJECT_END (-1ULL) | |
766 | ||
767 | dmu_tx_t *dmu_tx_create(objset_t *os); | |
768 | void dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len); | |
769 | void dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, | |
770 | int len); | |
771 | void dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, | |
772 | uint64_t len); | |
773 | void dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, | |
774 | uint64_t len); | |
775 | void dmu_tx_hold_remap_l1indirect(dmu_tx_t *tx, uint64_t object); | |
776 | void dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name); | |
777 | void dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, | |
778 | const char *name); | |
779 | void dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object); | |
780 | void dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn); | |
781 | void dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object); | |
782 | void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow); | |
783 | void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size); | |
784 | void dmu_tx_abort(dmu_tx_t *tx); | |
785 | int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how); | |
786 | void dmu_tx_wait(dmu_tx_t *tx); | |
787 | void dmu_tx_commit(dmu_tx_t *tx); | |
788 | void dmu_tx_mark_netfree(dmu_tx_t *tx); | |
789 | ||
790 | /* | |
791 | * To register a commit callback, dmu_tx_callback_register() must be called. | |
792 | * | |
793 | * dcb_data is a pointer to caller private data that is passed on as a | |
794 | * callback parameter. The caller is responsible for properly allocating and | |
795 | * freeing it. | |
796 | * | |
797 | * When registering a callback, the transaction must be already created, but | |
798 | * it cannot be committed or aborted. It can be assigned to a txg or not. | |
799 | * | |
800 | * The callback will be called after the transaction has been safely written | |
801 | * to stable storage and will also be called if the dmu_tx is aborted. | |
802 | * If there is any error which prevents the transaction from being committed to | |
803 | * disk, the callback will be called with a value of error != 0. | |
804 | * | |
805 | * When multiple callbacks are registered to the transaction, the callbacks | |
806 | * will be called in reverse order to let Lustre, the only user of commit | |
807 | * callback currently, take the fast path of its commit callback handling. | |
808 | */ | |
809 | typedef void dmu_tx_callback_func_t(void *dcb_data, int error); | |
810 | ||
811 | void dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *dcb_func, | |
812 | void *dcb_data); | |
813 | void dmu_tx_do_callbacks(list_t *cb_list, int error); | |
814 | ||
815 | /* | |
816 | * Free up the data blocks for a defined range of a file. If size is | |
817 | * -1, the range from offset to end-of-file is freed. | |
818 | */ | |
819 | int dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, | |
820 | uint64_t size, dmu_tx_t *tx); | |
821 | int dmu_free_long_range(objset_t *os, uint64_t object, uint64_t offset, | |
822 | uint64_t size); | |
823 | int dmu_free_long_object(objset_t *os, uint64_t object); | |
824 | ||
825 | /* | |
826 | * Convenience functions. | |
827 | * | |
828 | * Canfail routines will return 0 on success, or an errno if there is a | |
829 | * nonrecoverable I/O error. | |
830 | */ | |
831 | #define DMU_READ_PREFETCH 0 /* prefetch */ | |
832 | #define DMU_READ_NO_PREFETCH 1 /* don't prefetch */ | |
833 | #define DMU_READ_NO_DECRYPT 2 /* don't decrypt */ | |
834 | int dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, | |
835 | void *buf, uint32_t flags); | |
836 | int dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf, | |
837 | uint32_t flags); | |
838 | void dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, | |
839 | const void *buf, dmu_tx_t *tx); | |
840 | void dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, | |
841 | const void *buf, dmu_tx_t *tx); | |
842 | void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, | |
843 | dmu_tx_t *tx); | |
844 | #ifdef _KERNEL | |
845 | #include <linux/blkdev_compat.h> | |
846 | int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size); | |
847 | int dmu_read_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size); | |
848 | int dmu_read_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size); | |
849 | int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size, | |
850 | dmu_tx_t *tx); | |
851 | int dmu_write_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size, | |
852 | dmu_tx_t *tx); | |
853 | int dmu_write_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size, | |
854 | dmu_tx_t *tx); | |
855 | #endif | |
856 | struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size); | |
857 | void dmu_return_arcbuf(struct arc_buf *buf); | |
858 | void dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, | |
859 | struct arc_buf *buf, dmu_tx_t *tx); | |
860 | void dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, | |
861 | struct arc_buf *buf, dmu_tx_t *tx); | |
862 | #define dmu_assign_arcbuf dmu_assign_arcbuf_by_dbuf | |
863 | void dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset, | |
864 | dmu_buf_t *handle, dmu_tx_t *tx); | |
865 | #ifdef HAVE_UIO_ZEROCOPY | |
866 | int dmu_xuio_init(struct xuio *uio, int niov); | |
867 | void dmu_xuio_fini(struct xuio *uio); | |
868 | int dmu_xuio_add(struct xuio *uio, struct arc_buf *abuf, offset_t off, | |
869 | size_t n); | |
870 | int dmu_xuio_cnt(struct xuio *uio); | |
871 | struct arc_buf *dmu_xuio_arcbuf(struct xuio *uio, int i); | |
872 | void dmu_xuio_clear(struct xuio *uio, int i); | |
873 | #endif /* HAVE_UIO_ZEROCOPY */ | |
874 | void xuio_stat_wbuf_copied(void); | |
875 | void xuio_stat_wbuf_nocopy(void); | |
876 | ||
877 | extern int zfs_prefetch_disable; | |
878 | extern int zfs_max_recordsize; | |
879 | ||
880 | /* | |
881 | * Asynchronously try to read in the data. | |
882 | */ | |
883 | void dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, | |
884 | uint64_t len, enum zio_priority pri); | |
885 | ||
886 | typedef struct dmu_object_info { | |
887 | /* All sizes are in bytes unless otherwise indicated. */ | |
888 | uint32_t doi_data_block_size; | |
889 | uint32_t doi_metadata_block_size; | |
890 | dmu_object_type_t doi_type; | |
891 | dmu_object_type_t doi_bonus_type; | |
892 | uint64_t doi_bonus_size; | |
893 | uint8_t doi_indirection; /* 2 = dnode->indirect->data */ | |
894 | uint8_t doi_checksum; | |
895 | uint8_t doi_compress; | |
896 | uint8_t doi_nblkptr; | |
897 | uint8_t doi_pad[4]; | |
898 | uint64_t doi_dnodesize; | |
899 | uint64_t doi_physical_blocks_512; /* data + metadata, 512b blks */ | |
900 | uint64_t doi_max_offset; | |
901 | uint64_t doi_fill_count; /* number of non-empty blocks */ | |
902 | } dmu_object_info_t; | |
903 | ||
904 | typedef void (*const arc_byteswap_func_t)(void *buf, size_t size); | |
905 | ||
906 | typedef struct dmu_object_type_info { | |
907 | dmu_object_byteswap_t ot_byteswap; | |
908 | boolean_t ot_metadata; | |
909 | boolean_t ot_dbuf_metadata_cache; | |
910 | boolean_t ot_encrypt; | |
911 | char *ot_name; | |
912 | } dmu_object_type_info_t; | |
913 | ||
914 | typedef const struct dmu_object_byteswap_info { | |
915 | arc_byteswap_func_t ob_func; | |
916 | char *ob_name; | |
917 | } dmu_object_byteswap_info_t; | |
918 | ||
919 | extern const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES]; | |
920 | extern const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS]; | |
921 | ||
922 | /* | |
923 | * Get information on a DMU object. | |
924 | * | |
925 | * Return 0 on success or ENOENT if object is not allocated. | |
926 | * | |
927 | * If doi is NULL, just indicates whether the object exists. | |
928 | */ | |
929 | int dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi); | |
930 | void __dmu_object_info_from_dnode(struct dnode *dn, dmu_object_info_t *doi); | |
931 | /* Like dmu_object_info, but faster if you have a held dnode in hand. */ | |
932 | void dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi); | |
933 | /* Like dmu_object_info, but faster if you have a held dbuf in hand. */ | |
934 | void dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi); | |
935 | /* | |
936 | * Like dmu_object_info_from_db, but faster still when you only care about | |
937 | * the size. This is specifically optimized for zfs_getattr(). | |
938 | */ | |
939 | void dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, | |
940 | u_longlong_t *nblk512); | |
941 | ||
942 | void dmu_object_dnsize_from_db(dmu_buf_t *db, int *dnsize); | |
943 | ||
944 | typedef struct dmu_objset_stats { | |
945 | uint64_t dds_num_clones; /* number of clones of this */ | |
946 | uint64_t dds_creation_txg; | |
947 | uint64_t dds_guid; | |
948 | dmu_objset_type_t dds_type; | |
949 | uint8_t dds_is_snapshot; | |
950 | uint8_t dds_inconsistent; | |
951 | char dds_origin[ZFS_MAX_DATASET_NAME_LEN]; | |
952 | } dmu_objset_stats_t; | |
953 | ||
954 | /* | |
955 | * Get stats on a dataset. | |
956 | */ | |
957 | void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat); | |
958 | ||
959 | /* | |
960 | * Add entries to the nvlist for all the objset's properties. See | |
961 | * zfs_prop_table[] and zfs(1m) for details on the properties. | |
962 | */ | |
963 | void dmu_objset_stats(objset_t *os, struct nvlist *nv); | |
964 | ||
965 | /* | |
966 | * Get the space usage statistics for statvfs(). | |
967 | * | |
968 | * refdbytes is the amount of space "referenced" by this objset. | |
969 | * availbytes is the amount of space available to this objset, taking | |
970 | * into account quotas & reservations, assuming that no other objsets | |
971 | * use the space first. These values correspond to the 'referenced' and | |
972 | * 'available' properties, described in the zfs(1m) manpage. | |
973 | * | |
974 | * usedobjs and availobjs are the number of objects currently allocated, | |
975 | * and available. | |
976 | */ | |
977 | void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, | |
978 | uint64_t *usedobjsp, uint64_t *availobjsp); | |
979 | ||
980 | /* | |
981 | * The fsid_guid is a 56-bit ID that can change to avoid collisions. | |
982 | * (Contrast with the ds_guid which is a 64-bit ID that will never | |
983 | * change, so there is a small probability that it will collide.) | |
984 | */ | |
985 | uint64_t dmu_objset_fsid_guid(objset_t *os); | |
986 | ||
987 | /* | |
988 | * Get the [cm]time for an objset's snapshot dir | |
989 | */ | |
990 | inode_timespec_t dmu_objset_snap_cmtime(objset_t *os); | |
991 | ||
992 | int dmu_objset_is_snapshot(objset_t *os); | |
993 | ||
994 | extern struct spa *dmu_objset_spa(objset_t *os); | |
995 | extern struct zilog *dmu_objset_zil(objset_t *os); | |
996 | extern struct dsl_pool *dmu_objset_pool(objset_t *os); | |
997 | extern struct dsl_dataset *dmu_objset_ds(objset_t *os); | |
998 | extern void dmu_objset_name(objset_t *os, char *buf); | |
999 | extern dmu_objset_type_t dmu_objset_type(objset_t *os); | |
1000 | extern uint64_t dmu_objset_id(objset_t *os); | |
1001 | extern uint64_t dmu_objset_dnodesize(objset_t *os); | |
1002 | extern zfs_sync_type_t dmu_objset_syncprop(objset_t *os); | |
1003 | extern zfs_logbias_op_t dmu_objset_logbias(objset_t *os); | |
1004 | extern int dmu_snapshot_list_next(objset_t *os, int namelen, char *name, | |
1005 | uint64_t *id, uint64_t *offp, boolean_t *case_conflict); | |
1006 | extern int dmu_snapshot_lookup(objset_t *os, const char *name, uint64_t *val); | |
1007 | extern int dmu_snapshot_realname(objset_t *os, char *name, char *real, | |
1008 | int maxlen, boolean_t *conflict); | |
1009 | extern int dmu_dir_list_next(objset_t *os, int namelen, char *name, | |
1010 | uint64_t *idp, uint64_t *offp); | |
1011 | ||
1012 | typedef int objset_used_cb_t(dmu_object_type_t bonustype, | |
1013 | void *bonus, uint64_t *userp, uint64_t *groupp, uint64_t *projectp); | |
1014 | extern void dmu_objset_register_type(dmu_objset_type_t ost, | |
1015 | objset_used_cb_t *cb); | |
1016 | extern void dmu_objset_set_user(objset_t *os, void *user_ptr); | |
1017 | extern void *dmu_objset_get_user(objset_t *os); | |
1018 | ||
1019 | /* | |
1020 | * Return the txg number for the given assigned transaction. | |
1021 | */ | |
1022 | uint64_t dmu_tx_get_txg(dmu_tx_t *tx); | |
1023 | ||
1024 | /* | |
1025 | * Synchronous write. | |
1026 | * If a parent zio is provided this function initiates a write on the | |
1027 | * provided buffer as a child of the parent zio. | |
1028 | * In the absence of a parent zio, the write is completed synchronously. | |
1029 | * At write completion, blk is filled with the bp of the written block. | |
1030 | * Note that while the data covered by this function will be on stable | |
1031 | * storage when the write completes this new data does not become a | |
1032 | * permanent part of the file until the associated transaction commits. | |
1033 | */ | |
1034 | ||
1035 | /* | |
1036 | * {zfs,zvol,ztest}_get_done() args | |
1037 | */ | |
1038 | typedef struct zgd { | |
1039 | struct lwb *zgd_lwb; | |
1040 | struct blkptr *zgd_bp; | |
1041 | dmu_buf_t *zgd_db; | |
1042 | struct locked_range *zgd_lr; | |
1043 | void *zgd_private; | |
1044 | } zgd_t; | |
1045 | ||
1046 | typedef void dmu_sync_cb_t(zgd_t *arg, int error); | |
1047 | int dmu_sync(struct zio *zio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd); | |
1048 | ||
1049 | /* | |
1050 | * Find the next hole or data block in file starting at *off | |
1051 | * Return found offset in *off. Return ESRCH for end of file. | |
1052 | */ | |
1053 | int dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, | |
1054 | uint64_t *off); | |
1055 | ||
1056 | /* | |
1057 | * Initial setup and final teardown. | |
1058 | */ | |
1059 | extern void dmu_init(void); | |
1060 | extern void dmu_fini(void); | |
1061 | ||
1062 | typedef void (*dmu_traverse_cb_t)(objset_t *os, void *arg, struct blkptr *bp, | |
1063 | uint64_t object, uint64_t offset, int len); | |
1064 | void dmu_traverse_objset(objset_t *os, uint64_t txg_start, | |
1065 | dmu_traverse_cb_t cb, void *arg); | |
1066 | ||
1067 | int dmu_diff(const char *tosnap_name, const char *fromsnap_name, | |
1068 | struct vnode *vp, offset_t *offp); | |
1069 | ||
1070 | /* CRC64 table */ | |
1071 | #define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */ | |
1072 | extern uint64_t zfs_crc64_table[256]; | |
1073 | ||
1074 | #ifdef __cplusplus | |
1075 | } | |
1076 | #endif | |
1077 | ||
1078 | #endif /* _SYS_DMU_H */ |