]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zfs_znode.c
Illumos 3139 - zdb dies when it tries to determine path of unlinked file
[mirror_zfs.git] / module / zfs / zfs_znode.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 */
25
26 /* Portions Copyright 2007 Jeremy Teo */
27
28 #ifdef _KERNEL
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/time.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/resource.h>
35 #include <sys/mntent.h>
36 #include <sys/mkdev.h>
37 #include <sys/u8_textprep.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/vfs.h>
40 #include <sys/vfs_opreg.h>
41 #include <sys/vnode.h>
42 #include <sys/file.h>
43 #include <sys/kmem.h>
44 #include <sys/errno.h>
45 #include <sys/unistd.h>
46 #include <sys/mode.h>
47 #include <sys/atomic.h>
48 #include <vm/pvn.h>
49 #include "fs/fs_subr.h"
50 #include <sys/zfs_dir.h>
51 #include <sys/zfs_acl.h>
52 #include <sys/zfs_ioctl.h>
53 #include <sys/zfs_rlock.h>
54 #include <sys/zfs_fuid.h>
55 #include <sys/zfs_vnops.h>
56 #include <sys/zfs_ctldir.h>
57 #include <sys/dnode.h>
58 #include <sys/fs/zfs.h>
59 #include <sys/kidmap.h>
60 #include <sys/zpl.h>
61 #endif /* _KERNEL */
62
63 #include <sys/dmu.h>
64 #include <sys/dmu_objset.h>
65 #include <sys/refcount.h>
66 #include <sys/stat.h>
67 #include <sys/zap.h>
68 #include <sys/zfs_znode.h>
69 #include <sys/sa.h>
70 #include <sys/zfs_sa.h>
71 #include <sys/zfs_stat.h>
72
73 #include "zfs_prop.h"
74 #include "zfs_comutil.h"
75
76 /*
77 * Define ZNODE_STATS to turn on statistic gathering. By default, it is only
78 * turned on when DEBUG is also defined.
79 */
80 #ifdef DEBUG
81 #define ZNODE_STATS
82 #endif /* DEBUG */
83
84 #ifdef ZNODE_STATS
85 #define ZNODE_STAT_ADD(stat) ((stat)++)
86 #else
87 #define ZNODE_STAT_ADD(stat) /* nothing */
88 #endif /* ZNODE_STATS */
89
90 /*
91 * Functions needed for userland (ie: libzpool) are not put under
92 * #ifdef_KERNEL; the rest of the functions have dependencies
93 * (such as VFS logic) that will not compile easily in userland.
94 */
95 #ifdef _KERNEL
96
97 static kmem_cache_t *znode_cache = NULL;
98
99 /*ARGSUSED*/
100 static int
101 zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
102 {
103 znode_t *zp = buf;
104
105 inode_init_once(ZTOI(zp));
106 list_link_init(&zp->z_link_node);
107
108 mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
109 rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
110 rw_init(&zp->z_name_lock, NULL, RW_NOLOCKDEP, NULL);
111 mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
112 rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
113
114 mutex_init(&zp->z_range_lock, NULL, MUTEX_DEFAULT, NULL);
115 avl_create(&zp->z_range_avl, zfs_range_compare,
116 sizeof (rl_t), offsetof(rl_t, r_node));
117
118 zp->z_dirlocks = NULL;
119 zp->z_acl_cached = NULL;
120 zp->z_xattr_cached = NULL;
121 zp->z_xattr_parent = NULL;
122 zp->z_moved = 0;
123 return (0);
124 }
125
126 /*ARGSUSED*/
127 static void
128 zfs_znode_cache_destructor(void *buf, void *arg)
129 {
130 znode_t *zp = buf;
131
132 ASSERT(!list_link_active(&zp->z_link_node));
133 mutex_destroy(&zp->z_lock);
134 rw_destroy(&zp->z_parent_lock);
135 rw_destroy(&zp->z_name_lock);
136 mutex_destroy(&zp->z_acl_lock);
137 rw_destroy(&zp->z_xattr_lock);
138 avl_destroy(&zp->z_range_avl);
139 mutex_destroy(&zp->z_range_lock);
140
141 ASSERT(zp->z_dirlocks == NULL);
142 ASSERT(zp->z_acl_cached == NULL);
143 ASSERT(zp->z_xattr_cached == NULL);
144 ASSERT(zp->z_xattr_parent == NULL);
145 }
146
147 void
148 zfs_znode_init(void)
149 {
150 /*
151 * Initialize zcache. The KMC_SLAB hint is used in order that it be
152 * backed by kmalloc() when on the Linux slab in order that any
153 * wait_on_bit() operations on the related inode operate properly.
154 */
155 ASSERT(znode_cache == NULL);
156 znode_cache = kmem_cache_create("zfs_znode_cache",
157 sizeof (znode_t), 0, zfs_znode_cache_constructor,
158 zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_SLAB);
159 }
160
161 void
162 zfs_znode_fini(void)
163 {
164 /*
165 * Cleanup zcache
166 */
167 if (znode_cache)
168 kmem_cache_destroy(znode_cache);
169 znode_cache = NULL;
170 }
171
172 int
173 zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx)
174 {
175 #ifdef HAVE_SMB_SHARE
176 zfs_acl_ids_t acl_ids;
177 vattr_t vattr;
178 znode_t *sharezp;
179 vnode_t *vp;
180 znode_t *zp;
181 int error;
182
183 vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
184 vattr.va_mode = S_IFDIR | 0555;
185 vattr.va_uid = crgetuid(kcred);
186 vattr.va_gid = crgetgid(kcred);
187
188 sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP);
189 sharezp->z_moved = 0;
190 sharezp->z_unlinked = 0;
191 sharezp->z_atime_dirty = 0;
192 sharezp->z_zfsvfs = zfsvfs;
193 sharezp->z_is_sa = zfsvfs->z_use_sa;
194
195 vp = ZTOV(sharezp);
196 vn_reinit(vp);
197 vp->v_type = VDIR;
198
199 VERIFY(0 == zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr,
200 kcred, NULL, &acl_ids));
201 zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids);
202 ASSERT3P(zp, ==, sharezp);
203 ASSERT(!vn_in_dnlc(ZTOV(sharezp))); /* not valid to move */
204 POINTER_INVALIDATE(&sharezp->z_zfsvfs);
205 error = zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
206 ZFS_SHARES_DIR, 8, 1, &sharezp->z_id, tx);
207 zfsvfs->z_shares_dir = sharezp->z_id;
208
209 zfs_acl_ids_free(&acl_ids);
210 // ZTOV(sharezp)->v_count = 0;
211 sa_handle_destroy(sharezp->z_sa_hdl);
212 kmem_cache_free(znode_cache, sharezp);
213
214 return (error);
215 #else
216 return (0);
217 #endif /* HAVE_SMB_SHARE */
218 }
219
220 static void
221 zfs_znode_sa_init(zfs_sb_t *zsb, znode_t *zp,
222 dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
223 {
224 ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zsb, zp->z_id)));
225
226 mutex_enter(&zp->z_lock);
227
228 ASSERT(zp->z_sa_hdl == NULL);
229 ASSERT(zp->z_acl_cached == NULL);
230 if (sa_hdl == NULL) {
231 VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, zp,
232 SA_HDL_SHARED, &zp->z_sa_hdl));
233 } else {
234 zp->z_sa_hdl = sa_hdl;
235 sa_set_userp(sa_hdl, zp);
236 }
237
238 zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
239
240 mutex_exit(&zp->z_lock);
241 }
242
243 void
244 zfs_znode_dmu_fini(znode_t *zp)
245 {
246 ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(ZTOZSB(zp), zp->z_id)) ||
247 zp->z_unlinked ||
248 RW_WRITE_HELD(&ZTOZSB(zp)->z_teardown_inactive_lock));
249
250 sa_handle_destroy(zp->z_sa_hdl);
251 zp->z_sa_hdl = NULL;
252 }
253
254 /*
255 * Called by new_inode() to allocate a new inode.
256 */
257 int
258 zfs_inode_alloc(struct super_block *sb, struct inode **ip)
259 {
260 znode_t *zp;
261
262 zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
263 *ip = ZTOI(zp);
264
265 return (0);
266 }
267
268 /*
269 * Called in multiple places when an inode should be destroyed.
270 */
271 void
272 zfs_inode_destroy(struct inode *ip)
273 {
274 znode_t *zp = ITOZ(ip);
275 zfs_sb_t *zsb = ZTOZSB(zp);
276
277 mutex_enter(&zsb->z_znodes_lock);
278 if (list_link_active(&zp->z_link_node)) {
279 list_remove(&zsb->z_all_znodes, zp);
280 zsb->z_nr_znodes--;
281 }
282 mutex_exit(&zsb->z_znodes_lock);
283
284 if (zp->z_acl_cached) {
285 zfs_acl_free(zp->z_acl_cached);
286 zp->z_acl_cached = NULL;
287 }
288
289 if (zp->z_xattr_cached) {
290 nvlist_free(zp->z_xattr_cached);
291 zp->z_xattr_cached = NULL;
292 }
293
294 if (zp->z_xattr_parent) {
295 zfs_iput_async(ZTOI(zp->z_xattr_parent));
296 zp->z_xattr_parent = NULL;
297 }
298
299 kmem_cache_free(znode_cache, zp);
300 }
301
302 static void
303 zfs_inode_set_ops(zfs_sb_t *zsb, struct inode *ip)
304 {
305 uint64_t rdev = 0;
306
307 switch (ip->i_mode & S_IFMT) {
308 case S_IFREG:
309 ip->i_op = &zpl_inode_operations;
310 ip->i_fop = &zpl_file_operations;
311 ip->i_mapping->a_ops = &zpl_address_space_operations;
312 break;
313
314 case S_IFDIR:
315 ip->i_op = &zpl_dir_inode_operations;
316 ip->i_fop = &zpl_dir_file_operations;
317 ITOZ(ip)->z_zn_prefetch = B_TRUE;
318 break;
319
320 case S_IFLNK:
321 ip->i_op = &zpl_symlink_inode_operations;
322 break;
323
324 /*
325 * rdev is only stored in a SA only for device files.
326 */
327 case S_IFCHR:
328 case S_IFBLK:
329 sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zsb), &rdev,
330 sizeof (rdev));
331 /*FALLTHROUGH*/
332 case S_IFIFO:
333 case S_IFSOCK:
334 init_special_inode(ip, ip->i_mode, rdev);
335 ip->i_op = &zpl_special_inode_operations;
336 break;
337
338 default:
339 zfs_panic_recover("inode %llu has invalid mode: 0x%x\n",
340 (u_longlong_t)ip->i_ino, ip->i_mode);
341
342 /* Assume the inode is a file and attempt to continue */
343 ip->i_mode = S_IFREG | 0644;
344 ip->i_op = &zpl_inode_operations;
345 ip->i_fop = &zpl_file_operations;
346 ip->i_mapping->a_ops = &zpl_address_space_operations;
347 break;
348 }
349 }
350
351 /*
352 * Construct a znode+inode and initialize.
353 *
354 * This does not do a call to dmu_set_user() that is
355 * up to the caller to do, in case you don't want to
356 * return the znode
357 */
358 static znode_t *
359 zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
360 dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl,
361 struct inode *dip)
362 {
363 znode_t *zp;
364 struct inode *ip;
365 uint64_t mode;
366 uint64_t parent;
367 sa_bulk_attr_t bulk[9];
368 int count = 0;
369
370 ASSERT(zsb != NULL);
371
372 ip = new_inode(zsb->z_sb);
373 if (ip == NULL)
374 return (NULL);
375
376 zp = ITOZ(ip);
377 ASSERT(zp->z_dirlocks == NULL);
378 ASSERT3P(zp->z_acl_cached, ==, NULL);
379 ASSERT3P(zp->z_xattr_cached, ==, NULL);
380 ASSERT3P(zp->z_xattr_parent, ==, NULL);
381 zp->z_moved = 0;
382 zp->z_sa_hdl = NULL;
383 zp->z_unlinked = 0;
384 zp->z_atime_dirty = 0;
385 zp->z_mapcnt = 0;
386 zp->z_id = db->db_object;
387 zp->z_blksz = blksz;
388 zp->z_seq = 0x7A4653;
389 zp->z_sync_cnt = 0;
390 zp->z_is_zvol = B_FALSE;
391 zp->z_is_mapped = B_FALSE;
392 zp->z_is_ctldir = B_FALSE;
393 zp->z_is_stale = B_FALSE;
394
395 zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);
396
397 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
398 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &zp->z_gen, 8);
399 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
400 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8);
401 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
402 &zp->z_pflags, 8);
403 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
404 &parent, 8);
405 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
406 &zp->z_atime, 16);
407 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8);
408 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8);
409
410 if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) {
411 if (hdl == NULL)
412 sa_handle_destroy(zp->z_sa_hdl);
413 zp->z_sa_hdl = NULL;
414 goto error;
415 }
416
417 zp->z_mode = mode;
418
419 /*
420 * xattr znodes hold a reference on their unique parent
421 */
422 if (dip && zp->z_pflags & ZFS_XATTR) {
423 igrab(dip);
424 zp->z_xattr_parent = ITOZ(dip);
425 }
426
427 ip->i_ino = obj;
428 zfs_inode_update(zp);
429 zfs_inode_set_ops(zsb, ip);
430
431 /*
432 * The only way insert_inode_locked() can fail is if the ip->i_ino
433 * number is already hashed for this super block. This can never
434 * happen because the inode numbers map 1:1 with the object numbers.
435 *
436 * The one exception is rolling back a mounted file system, but in
437 * this case all the active inode are unhashed during the rollback.
438 */
439 VERIFY3S(insert_inode_locked(ip), ==, 0);
440
441 mutex_enter(&zsb->z_znodes_lock);
442 list_insert_tail(&zsb->z_all_znodes, zp);
443 zsb->z_nr_znodes++;
444 membar_producer();
445 mutex_exit(&zsb->z_znodes_lock);
446
447 unlock_new_inode(ip);
448 return (zp);
449
450 error:
451 iput(ip);
452 return (NULL);
453 }
454
455 void
456 zfs_set_inode_flags(znode_t *zp, struct inode *ip)
457 {
458 /*
459 * Linux and Solaris have different sets of file attributes, so we
460 * restrict this conversion to the intersection of the two.
461 */
462
463 if (zp->z_pflags & ZFS_IMMUTABLE)
464 ip->i_flags |= S_IMMUTABLE;
465 else
466 ip->i_flags &= ~S_IMMUTABLE;
467
468 if (zp->z_pflags & ZFS_APPENDONLY)
469 ip->i_flags |= S_APPEND;
470 else
471 ip->i_flags &= ~S_APPEND;
472 }
473
474 /*
475 * Update the embedded inode given the znode. We should work toward
476 * eliminating this function as soon as possible by removing values
477 * which are duplicated between the znode and inode. If the generic
478 * inode has the correct field it should be used, and the ZFS code
479 * updated to access the inode. This can be done incrementally.
480 */
481 void
482 zfs_inode_update(znode_t *zp)
483 {
484 zfs_sb_t *zsb;
485 struct inode *ip;
486 uint32_t blksize;
487 u_longlong_t i_blocks;
488 uint64_t atime[2], mtime[2], ctime[2];
489
490 ASSERT(zp != NULL);
491 zsb = ZTOZSB(zp);
492 ip = ZTOI(zp);
493
494 /* Skip .zfs control nodes which do not exist on disk. */
495 if (zfsctl_is_node(ip))
496 return;
497
498 sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zsb), &atime, 16);
499 sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zsb), &mtime, 16);
500 sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zsb), &ctime, 16);
501
502 dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize, &i_blocks);
503
504 spin_lock(&ip->i_lock);
505 ip->i_generation = zp->z_gen;
506 ip->i_uid = SUID_TO_KUID(zp->z_uid);
507 ip->i_gid = SGID_TO_KGID(zp->z_gid);
508 set_nlink(ip, zp->z_links);
509 ip->i_mode = zp->z_mode;
510 zfs_set_inode_flags(zp, ip);
511 ip->i_blkbits = SPA_MINBLOCKSHIFT;
512 ip->i_blocks = i_blocks;
513
514 ZFS_TIME_DECODE(&ip->i_atime, atime);
515 ZFS_TIME_DECODE(&ip->i_mtime, mtime);
516 ZFS_TIME_DECODE(&ip->i_ctime, ctime);
517
518 i_size_write(ip, zp->z_size);
519 spin_unlock(&ip->i_lock);
520 }
521
522 /*
523 * Safely mark an inode dirty. Inodes which are part of a read-only
524 * file system or snapshot may not be dirtied.
525 */
526 void
527 zfs_mark_inode_dirty(struct inode *ip)
528 {
529 zfs_sb_t *zsb = ITOZSB(ip);
530
531 if (zfs_is_readonly(zsb) || dmu_objset_is_snapshot(zsb->z_os))
532 return;
533
534 mark_inode_dirty(ip);
535 }
536
537 static uint64_t empty_xattr;
538 static uint64_t pad[4];
539 static zfs_acl_phys_t acl_phys;
540 /*
541 * Create a new DMU object to hold a zfs znode.
542 *
543 * IN: dzp - parent directory for new znode
544 * vap - file attributes for new znode
545 * tx - dmu transaction id for zap operations
546 * cr - credentials of caller
547 * flag - flags:
548 * IS_ROOT_NODE - new object will be root
549 * IS_XATTR - new object is an attribute
550 * bonuslen - length of bonus buffer
551 * setaclp - File/Dir initial ACL
552 * fuidp - Tracks fuid allocation.
553 *
554 * OUT: zpp - allocated znode
555 *
556 */
557 void
558 zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
559 uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
560 {
561 uint64_t crtime[2], atime[2], mtime[2], ctime[2];
562 uint64_t mode, size, links, parent, pflags;
563 uint64_t dzp_pflags = 0;
564 uint64_t rdev = 0;
565 zfs_sb_t *zsb = ZTOZSB(dzp);
566 dmu_buf_t *db;
567 timestruc_t now;
568 uint64_t gen, obj;
569 int bonuslen;
570 sa_handle_t *sa_hdl;
571 dmu_object_type_t obj_type;
572 sa_bulk_attr_t *sa_attrs;
573 int cnt = 0;
574 zfs_acl_locator_cb_t locate = { 0 };
575
576 if (zsb->z_replay) {
577 obj = vap->va_nodeid;
578 now = vap->va_ctime; /* see zfs_replay_create() */
579 gen = vap->va_nblocks; /* ditto */
580 } else {
581 obj = 0;
582 gethrestime(&now);
583 gen = dmu_tx_get_txg(tx);
584 }
585
586 obj_type = zsb->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
587 bonuslen = (obj_type == DMU_OT_SA) ?
588 DN_MAX_BONUSLEN : ZFS_OLD_ZNODE_PHYS_SIZE;
589
590 /*
591 * Create a new DMU object.
592 */
593 /*
594 * There's currently no mechanism for pre-reading the blocks that will
595 * be needed to allocate a new object, so we accept the small chance
596 * that there will be an i/o error and we will fail one of the
597 * assertions below.
598 */
599 if (S_ISDIR(vap->va_mode)) {
600 if (zsb->z_replay) {
601 VERIFY0(zap_create_claim_norm(zsb->z_os, obj,
602 zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
603 obj_type, bonuslen, tx));
604 } else {
605 obj = zap_create_norm(zsb->z_os,
606 zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
607 obj_type, bonuslen, tx);
608 }
609 } else {
610 if (zsb->z_replay) {
611 VERIFY0(dmu_object_claim(zsb->z_os, obj,
612 DMU_OT_PLAIN_FILE_CONTENTS, 0,
613 obj_type, bonuslen, tx));
614 } else {
615 obj = dmu_object_alloc(zsb->z_os,
616 DMU_OT_PLAIN_FILE_CONTENTS, 0,
617 obj_type, bonuslen, tx);
618 }
619 }
620
621 ZFS_OBJ_HOLD_ENTER(zsb, obj);
622 VERIFY(0 == sa_buf_hold(zsb->z_os, obj, NULL, &db));
623
624 /*
625 * If this is the root, fix up the half-initialized parent pointer
626 * to reference the just-allocated physical data area.
627 */
628 if (flag & IS_ROOT_NODE) {
629 dzp->z_id = obj;
630 } else {
631 dzp_pflags = dzp->z_pflags;
632 }
633
634 /*
635 * If parent is an xattr, so am I.
636 */
637 if (dzp_pflags & ZFS_XATTR) {
638 flag |= IS_XATTR;
639 }
640
641 if (zsb->z_use_fuids)
642 pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
643 else
644 pflags = 0;
645
646 if (S_ISDIR(vap->va_mode)) {
647 size = 2; /* contents ("." and "..") */
648 links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
649 } else {
650 size = links = 0;
651 }
652
653 if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))
654 rdev = vap->va_rdev;
655
656 parent = dzp->z_id;
657 mode = acl_ids->z_mode;
658 if (flag & IS_XATTR)
659 pflags |= ZFS_XATTR;
660
661 /*
662 * No execs denied will be deterimed when zfs_mode_compute() is called.
663 */
664 pflags |= acl_ids->z_aclp->z_hints &
665 (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
666 ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
667
668 ZFS_TIME_ENCODE(&now, crtime);
669 ZFS_TIME_ENCODE(&now, ctime);
670
671 if (vap->va_mask & ATTR_ATIME) {
672 ZFS_TIME_ENCODE(&vap->va_atime, atime);
673 } else {
674 ZFS_TIME_ENCODE(&now, atime);
675 }
676
677 if (vap->va_mask & ATTR_MTIME) {
678 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
679 } else {
680 ZFS_TIME_ENCODE(&now, mtime);
681 }
682
683 /* Now add in all of the "SA" attributes */
684 VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, NULL, SA_HDL_SHARED,
685 &sa_hdl));
686
687 /*
688 * Setup the array of attributes to be replaced/set on the new file
689 *
690 * order for DMU_OT_ZNODE is critical since it needs to be constructed
691 * in the old znode_phys_t format. Don't change this ordering
692 */
693 sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
694
695 if (obj_type == DMU_OT_ZNODE) {
696 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
697 NULL, &atime, 16);
698 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
699 NULL, &mtime, 16);
700 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
701 NULL, &ctime, 16);
702 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
703 NULL, &crtime, 16);
704 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
705 NULL, &gen, 8);
706 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
707 NULL, &mode, 8);
708 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
709 NULL, &size, 8);
710 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
711 NULL, &parent, 8);
712 } else {
713 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
714 NULL, &mode, 8);
715 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
716 NULL, &size, 8);
717 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
718 NULL, &gen, 8);
719 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb),
720 NULL, &acl_ids->z_fuid, 8);
721 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb),
722 NULL, &acl_ids->z_fgid, 8);
723 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
724 NULL, &parent, 8);
725 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
726 NULL, &pflags, 8);
727 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
728 NULL, &atime, 16);
729 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
730 NULL, &mtime, 16);
731 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
732 NULL, &ctime, 16);
733 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
734 NULL, &crtime, 16);
735 }
736
737 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zsb), NULL, &links, 8);
738
739 if (obj_type == DMU_OT_ZNODE) {
740 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zsb), NULL,
741 &empty_xattr, 8);
742 }
743 if (obj_type == DMU_OT_ZNODE ||
744 (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
745 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zsb),
746 NULL, &rdev, 8);
747 }
748 if (obj_type == DMU_OT_ZNODE) {
749 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
750 NULL, &pflags, 8);
751 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb), NULL,
752 &acl_ids->z_fuid, 8);
753 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb), NULL,
754 &acl_ids->z_fgid, 8);
755 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zsb), NULL, pad,
756 sizeof (uint64_t) * 4);
757 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zsb), NULL,
758 &acl_phys, sizeof (zfs_acl_phys_t));
759 } else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
760 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zsb), NULL,
761 &acl_ids->z_aclp->z_acl_count, 8);
762 locate.cb_aclp = acl_ids->z_aclp;
763 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zsb),
764 zfs_acl_data_locator, &locate,
765 acl_ids->z_aclp->z_acl_bytes);
766 mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
767 acl_ids->z_fuid, acl_ids->z_fgid);
768 }
769
770 VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
771
772 if (!(flag & IS_ROOT_NODE)) {
773 *zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl,
774 ZTOI(dzp));
775 VERIFY(*zpp != NULL);
776 VERIFY(dzp != NULL);
777 } else {
778 /*
779 * If we are creating the root node, the "parent" we
780 * passed in is the znode for the root.
781 */
782 *zpp = dzp;
783
784 (*zpp)->z_sa_hdl = sa_hdl;
785 }
786
787 (*zpp)->z_pflags = pflags;
788 (*zpp)->z_mode = mode;
789
790 if (obj_type == DMU_OT_ZNODE ||
791 acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
792 VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
793 }
794 kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
795 ZFS_OBJ_HOLD_EXIT(zsb, obj);
796 }
797
798 /*
799 * Update in-core attributes. It is assumed the caller will be doing an
800 * sa_bulk_update to push the changes out.
801 */
802 void
803 zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
804 {
805 xoptattr_t *xoap;
806
807 xoap = xva_getxoptattr(xvap);
808 ASSERT(xoap);
809
810 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
811 uint64_t times[2];
812 ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
813 (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
814 &times, sizeof (times), tx);
815 XVA_SET_RTN(xvap, XAT_CREATETIME);
816 }
817 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
818 ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
819 zp->z_pflags, tx);
820 XVA_SET_RTN(xvap, XAT_READONLY);
821 }
822 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
823 ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
824 zp->z_pflags, tx);
825 XVA_SET_RTN(xvap, XAT_HIDDEN);
826 }
827 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
828 ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
829 zp->z_pflags, tx);
830 XVA_SET_RTN(xvap, XAT_SYSTEM);
831 }
832 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
833 ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
834 zp->z_pflags, tx);
835 XVA_SET_RTN(xvap, XAT_ARCHIVE);
836 }
837 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
838 ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
839 zp->z_pflags, tx);
840 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
841 }
842 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
843 ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
844 zp->z_pflags, tx);
845 XVA_SET_RTN(xvap, XAT_NOUNLINK);
846 }
847 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
848 ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
849 zp->z_pflags, tx);
850 XVA_SET_RTN(xvap, XAT_APPENDONLY);
851 }
852 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
853 ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
854 zp->z_pflags, tx);
855 XVA_SET_RTN(xvap, XAT_NODUMP);
856 }
857 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
858 ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
859 zp->z_pflags, tx);
860 XVA_SET_RTN(xvap, XAT_OPAQUE);
861 }
862 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
863 ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
864 xoap->xoa_av_quarantined, zp->z_pflags, tx);
865 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
866 }
867 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
868 ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
869 zp->z_pflags, tx);
870 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
871 }
872 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
873 zfs_sa_set_scanstamp(zp, xvap, tx);
874 XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
875 }
876 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
877 ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
878 zp->z_pflags, tx);
879 XVA_SET_RTN(xvap, XAT_REPARSE);
880 }
881 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
882 ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
883 zp->z_pflags, tx);
884 XVA_SET_RTN(xvap, XAT_OFFLINE);
885 }
886 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
887 ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
888 zp->z_pflags, tx);
889 XVA_SET_RTN(xvap, XAT_SPARSE);
890 }
891 }
892
893 int
894 zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp)
895 {
896 dmu_object_info_t doi;
897 dmu_buf_t *db;
898 znode_t *zp;
899 int err;
900 sa_handle_t *hdl;
901
902 *zpp = NULL;
903
904 again:
905 ZFS_OBJ_HOLD_ENTER(zsb, obj_num);
906
907 err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
908 if (err) {
909 ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
910 return (err);
911 }
912
913 dmu_object_info_from_db(db, &doi);
914 if (doi.doi_bonus_type != DMU_OT_SA &&
915 (doi.doi_bonus_type != DMU_OT_ZNODE ||
916 (doi.doi_bonus_type == DMU_OT_ZNODE &&
917 doi.doi_bonus_size < sizeof (znode_phys_t)))) {
918 sa_buf_rele(db, NULL);
919 ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
920 return (SET_ERROR(EINVAL));
921 }
922
923 hdl = dmu_buf_get_user(db);
924 if (hdl != NULL) {
925 zp = sa_get_userdata(hdl);
926
927
928 /*
929 * Since "SA" does immediate eviction we
930 * should never find a sa handle that doesn't
931 * know about the znode.
932 */
933
934 ASSERT3P(zp, !=, NULL);
935
936 mutex_enter(&zp->z_lock);
937 ASSERT3U(zp->z_id, ==, obj_num);
938 if (zp->z_unlinked) {
939 err = SET_ERROR(ENOENT);
940 } else {
941 /*
942 * If igrab() returns NULL the VFS has independently
943 * determined the inode should be evicted and has
944 * called iput_final() to start the eviction process.
945 * The SA handle is still valid but because the VFS
946 * requires that the eviction succeed we must drop
947 * our locks and references to allow the eviction to
948 * complete. The zfs_zget() may then be retried.
949 *
950 * This unlikely case could be optimized by registering
951 * a sops->drop_inode() callback. The callback would
952 * need to detect the active SA hold thereby informing
953 * the VFS that this inode should not be evicted.
954 */
955 if (igrab(ZTOI(zp)) == NULL) {
956 mutex_exit(&zp->z_lock);
957 sa_buf_rele(db, NULL);
958 ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
959 /* inode might need this to finish evict */
960 cond_resched();
961 goto again;
962 }
963 *zpp = zp;
964 err = 0;
965 }
966 mutex_exit(&zp->z_lock);
967 sa_buf_rele(db, NULL);
968 ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
969 return (err);
970 }
971
972 /*
973 * Not found create new znode/vnode but only if file exists.
974 *
975 * There is a small window where zfs_vget() could
976 * find this object while a file create is still in
977 * progress. This is checked for in zfs_znode_alloc()
978 *
979 * if zfs_znode_alloc() fails it will drop the hold on the
980 * bonus buffer.
981 */
982 zp = zfs_znode_alloc(zsb, db, doi.doi_data_block_size,
983 doi.doi_bonus_type, obj_num, NULL, NULL);
984 if (zp == NULL) {
985 err = SET_ERROR(ENOENT);
986 } else {
987 *zpp = zp;
988 }
989 ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
990 return (err);
991 }
992
993 int
994 zfs_rezget(znode_t *zp)
995 {
996 zfs_sb_t *zsb = ZTOZSB(zp);
997 dmu_object_info_t doi;
998 dmu_buf_t *db;
999 uint64_t obj_num = zp->z_id;
1000 uint64_t mode;
1001 sa_bulk_attr_t bulk[8];
1002 int err;
1003 int count = 0;
1004 uint64_t gen;
1005
1006 ZFS_OBJ_HOLD_ENTER(zsb, obj_num);
1007
1008 mutex_enter(&zp->z_acl_lock);
1009 if (zp->z_acl_cached) {
1010 zfs_acl_free(zp->z_acl_cached);
1011 zp->z_acl_cached = NULL;
1012 }
1013 mutex_exit(&zp->z_acl_lock);
1014
1015 rw_enter(&zp->z_xattr_lock, RW_WRITER);
1016 if (zp->z_xattr_cached) {
1017 nvlist_free(zp->z_xattr_cached);
1018 zp->z_xattr_cached = NULL;
1019 }
1020
1021 if (zp->z_xattr_parent) {
1022 zfs_iput_async(ZTOI(zp->z_xattr_parent));
1023 zp->z_xattr_parent = NULL;
1024 }
1025 rw_exit(&zp->z_xattr_lock);
1026
1027 ASSERT(zp->z_sa_hdl == NULL);
1028 err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
1029 if (err) {
1030 ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
1031 return (err);
1032 }
1033
1034 dmu_object_info_from_db(db, &doi);
1035 if (doi.doi_bonus_type != DMU_OT_SA &&
1036 (doi.doi_bonus_type != DMU_OT_ZNODE ||
1037 (doi.doi_bonus_type == DMU_OT_ZNODE &&
1038 doi.doi_bonus_size < sizeof (znode_phys_t)))) {
1039 sa_buf_rele(db, NULL);
1040 ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
1041 return (SET_ERROR(EINVAL));
1042 }
1043
1044 zfs_znode_sa_init(zsb, zp, db, doi.doi_bonus_type, NULL);
1045
1046 /* reload cached values */
1047 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL,
1048 &gen, sizeof (gen));
1049 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL,
1050 &zp->z_size, sizeof (zp->z_size));
1051 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
1052 &zp->z_links, sizeof (zp->z_links));
1053 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
1054 &zp->z_pflags, sizeof (zp->z_pflags));
1055 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
1056 &zp->z_atime, sizeof (zp->z_atime));
1057 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
1058 &zp->z_uid, sizeof (zp->z_uid));
1059 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL,
1060 &zp->z_gid, sizeof (zp->z_gid));
1061 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
1062 &mode, sizeof (mode));
1063
1064 if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
1065 zfs_znode_dmu_fini(zp);
1066 ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
1067 return (SET_ERROR(EIO));
1068 }
1069
1070 zp->z_mode = mode;
1071
1072 if (gen != zp->z_gen) {
1073 zfs_znode_dmu_fini(zp);
1074 ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
1075 return (SET_ERROR(EIO));
1076 }
1077
1078 zp->z_unlinked = (zp->z_links == 0);
1079 zp->z_blksz = doi.doi_data_block_size;
1080 zfs_inode_update(zp);
1081
1082 ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
1083
1084 return (0);
1085 }
1086
1087 void
1088 zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
1089 {
1090 zfs_sb_t *zsb = ZTOZSB(zp);
1091 objset_t *os = zsb->z_os;
1092 uint64_t obj = zp->z_id;
1093 uint64_t acl_obj = zfs_external_acl(zp);
1094
1095 ZFS_OBJ_HOLD_ENTER(zsb, obj);
1096 if (acl_obj) {
1097 VERIFY(!zp->z_is_sa);
1098 VERIFY(0 == dmu_object_free(os, acl_obj, tx));
1099 }
1100 VERIFY(0 == dmu_object_free(os, obj, tx));
1101 zfs_znode_dmu_fini(zp);
1102 ZFS_OBJ_HOLD_EXIT(zsb, obj);
1103 }
1104
1105 void
1106 zfs_zinactive(znode_t *zp)
1107 {
1108 zfs_sb_t *zsb = ZTOZSB(zp);
1109 uint64_t z_id = zp->z_id;
1110
1111 ASSERT(zp->z_sa_hdl);
1112
1113 /*
1114 * Don't allow a zfs_zget() while were trying to release this znode.
1115 */
1116 ZFS_OBJ_HOLD_ENTER(zsb, z_id);
1117
1118 mutex_enter(&zp->z_lock);
1119
1120 /*
1121 * If this was the last reference to a file with no links,
1122 * remove the file from the file system.
1123 */
1124 if (zp->z_unlinked) {
1125 mutex_exit(&zp->z_lock);
1126
1127 ZFS_OBJ_HOLD_EXIT(zsb, z_id);
1128
1129 zfs_rmnode(zp);
1130 return;
1131 }
1132
1133 mutex_exit(&zp->z_lock);
1134 zfs_znode_dmu_fini(zp);
1135
1136 ZFS_OBJ_HOLD_EXIT(zsb, z_id);
1137 }
1138
1139 static inline int
1140 zfs_compare_timespec(struct timespec *t1, struct timespec *t2)
1141 {
1142 if (t1->tv_sec < t2->tv_sec)
1143 return (-1);
1144
1145 if (t1->tv_sec > t2->tv_sec)
1146 return (1);
1147
1148 return (t1->tv_nsec - t2->tv_nsec);
1149 }
1150
1151 /*
1152 * Determine whether the znode's atime must be updated. The logic mostly
1153 * duplicates the Linux kernel's relatime_need_update() functionality.
1154 * This function is only called if the underlying filesystem actually has
1155 * atime updates enabled.
1156 */
1157 static inline boolean_t
1158 zfs_atime_need_update(znode_t *zp, timestruc_t *now)
1159 {
1160 if (!ZTOZSB(zp)->z_relatime)
1161 return (B_TRUE);
1162
1163 /*
1164 * In relatime mode, only update the atime if the previous atime
1165 * is earlier than either the ctime or mtime or if at least a day
1166 * has passed since the last update of atime.
1167 */
1168 if (zfs_compare_timespec(&ZTOI(zp)->i_mtime, &ZTOI(zp)->i_atime) >= 0)
1169 return (B_TRUE);
1170
1171 if (zfs_compare_timespec(&ZTOI(zp)->i_ctime, &ZTOI(zp)->i_atime) >= 0)
1172 return (B_TRUE);
1173
1174 if ((long)now->tv_sec - ZTOI(zp)->i_atime.tv_sec >= 24*60*60)
1175 return (B_TRUE);
1176
1177 return (B_FALSE);
1178 }
1179
1180 /*
1181 * Prepare to update znode time stamps.
1182 *
1183 * IN: zp - znode requiring timestamp update
1184 * flag - ATTR_MTIME, ATTR_CTIME, ATTR_ATIME flags
1185 * have_tx - true of caller is creating a new txg
1186 *
1187 * OUT: zp - new atime (via underlying inode's i_atime)
1188 * mtime - new mtime
1189 * ctime - new ctime
1190 *
1191 * NOTE: The arguments are somewhat redundant. The following condition
1192 * is always true:
1193 *
1194 * have_tx == !(flag & ATTR_ATIME)
1195 */
1196 void
1197 zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
1198 uint64_t ctime[2], boolean_t have_tx)
1199 {
1200 timestruc_t now;
1201
1202 ASSERT(have_tx == !(flag & ATTR_ATIME));
1203 gethrestime(&now);
1204
1205 /*
1206 * NOTE: The following test intentionally does not update z_atime_dirty
1207 * in the case where an ATIME update has been requested but for which
1208 * the update is omitted due to relatime logic. The rationale being
1209 * that if the flag was set somewhere else, we should leave it alone
1210 * here.
1211 */
1212 if (flag & ATTR_ATIME) {
1213 if (zfs_atime_need_update(zp, &now)) {
1214 ZFS_TIME_ENCODE(&now, zp->z_atime);
1215 ZTOI(zp)->i_atime.tv_sec = zp->z_atime[0];
1216 ZTOI(zp)->i_atime.tv_nsec = zp->z_atime[1];
1217 zp->z_atime_dirty = 1;
1218 }
1219 } else {
1220 zp->z_atime_dirty = 0;
1221 zp->z_seq++;
1222 }
1223
1224 if (flag & ATTR_MTIME) {
1225 ZFS_TIME_ENCODE(&now, mtime);
1226 if (ZTOZSB(zp)->z_use_fuids) {
1227 zp->z_pflags |= (ZFS_ARCHIVE |
1228 ZFS_AV_MODIFIED);
1229 }
1230 }
1231
1232 if (flag & ATTR_CTIME) {
1233 ZFS_TIME_ENCODE(&now, ctime);
1234 if (ZTOZSB(zp)->z_use_fuids)
1235 zp->z_pflags |= ZFS_ARCHIVE;
1236 }
1237 }
1238
1239 /*
1240 * Grow the block size for a file.
1241 *
1242 * IN: zp - znode of file to free data in.
1243 * size - requested block size
1244 * tx - open transaction.
1245 *
1246 * NOTE: this function assumes that the znode is write locked.
1247 */
1248 void
1249 zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
1250 {
1251 int error;
1252 u_longlong_t dummy;
1253
1254 if (size <= zp->z_blksz)
1255 return;
1256 /*
1257 * If the file size is already greater than the current blocksize,
1258 * we will not grow. If there is more than one block in a file,
1259 * the blocksize cannot change.
1260 */
1261 if (zp->z_blksz && zp->z_size > zp->z_blksz)
1262 return;
1263
1264 error = dmu_object_set_blocksize(ZTOZSB(zp)->z_os, zp->z_id,
1265 size, 0, tx);
1266
1267 if (error == ENOTSUP)
1268 return;
1269 ASSERT0(error);
1270
1271 /* What blocksize did we actually get? */
1272 dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
1273 }
1274
1275 /*
1276 * Increase the file length
1277 *
1278 * IN: zp - znode of file to free data in.
1279 * end - new end-of-file
1280 *
1281 * RETURN: 0 on success, error code on failure
1282 */
1283 static int
1284 zfs_extend(znode_t *zp, uint64_t end)
1285 {
1286 zfs_sb_t *zsb = ZTOZSB(zp);
1287 dmu_tx_t *tx;
1288 rl_t *rl;
1289 uint64_t newblksz;
1290 int error;
1291
1292 /*
1293 * We will change zp_size, lock the whole file.
1294 */
1295 rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);
1296
1297 /*
1298 * Nothing to do if file already at desired length.
1299 */
1300 if (end <= zp->z_size) {
1301 zfs_range_unlock(rl);
1302 return (0);
1303 }
1304 tx = dmu_tx_create(zsb->z_os);
1305 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1306 zfs_sa_upgrade_txholds(tx, zp);
1307 if (end > zp->z_blksz &&
1308 (!ISP2(zp->z_blksz) || zp->z_blksz < zsb->z_max_blksz)) {
1309 /*
1310 * We are growing the file past the current block size.
1311 */
1312 if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
1313 /*
1314 * File's blocksize is already larger than the
1315 * "recordsize" property. Only let it grow to
1316 * the next power of 2.
1317 */
1318 ASSERT(!ISP2(zp->z_blksz));
1319 newblksz = MIN(end, 1 << highbit64(zp->z_blksz));
1320 } else {
1321 newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
1322 }
1323 dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
1324 } else {
1325 newblksz = 0;
1326 }
1327
1328 error = dmu_tx_assign(tx, TXG_WAIT);
1329 if (error) {
1330 dmu_tx_abort(tx);
1331 zfs_range_unlock(rl);
1332 return (error);
1333 }
1334
1335 if (newblksz)
1336 zfs_grow_blocksize(zp, newblksz, tx);
1337
1338 zp->z_size = end;
1339
1340 VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
1341 &zp->z_size, sizeof (zp->z_size), tx));
1342
1343 zfs_range_unlock(rl);
1344
1345 dmu_tx_commit(tx);
1346
1347 return (0);
1348 }
1349
1350 /*
1351 * zfs_zero_partial_page - Modeled after update_pages() but
1352 * with different arguments and semantics for use by zfs_freesp().
1353 *
1354 * Zeroes a piece of a single page cache entry for zp at offset
1355 * start and length len.
1356 *
1357 * Caller must acquire a range lock on the file for the region
1358 * being zeroed in order that the ARC and page cache stay in sync.
1359 */
1360 static void
1361 zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
1362 {
1363 struct address_space *mp = ZTOI(zp)->i_mapping;
1364 struct page *pp;
1365 int64_t off;
1366 void *pb;
1367
1368 ASSERT((start & PAGE_CACHE_MASK) ==
1369 ((start + len - 1) & PAGE_CACHE_MASK));
1370
1371 off = start & (PAGE_CACHE_SIZE - 1);
1372 start &= PAGE_CACHE_MASK;
1373
1374 pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
1375 if (pp) {
1376 if (mapping_writably_mapped(mp))
1377 flush_dcache_page(pp);
1378
1379 pb = kmap(pp);
1380 bzero(pb + off, len);
1381 kunmap(pp);
1382
1383 if (mapping_writably_mapped(mp))
1384 flush_dcache_page(pp);
1385
1386 mark_page_accessed(pp);
1387 SetPageUptodate(pp);
1388 ClearPageError(pp);
1389 unlock_page(pp);
1390 page_cache_release(pp);
1391 }
1392 }
1393
1394 /*
1395 * Free space in a file.
1396 *
1397 * IN: zp - znode of file to free data in.
1398 * off - start of section to free.
1399 * len - length of section to free.
1400 *
1401 * RETURN: 0 on success, error code on failure
1402 */
1403 static int
1404 zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
1405 {
1406 zfs_sb_t *zsb = ZTOZSB(zp);
1407 rl_t *rl;
1408 int error;
1409
1410 /*
1411 * Lock the range being freed.
1412 */
1413 rl = zfs_range_lock(zp, off, len, RL_WRITER);
1414
1415 /*
1416 * Nothing to do if file already at desired length.
1417 */
1418 if (off >= zp->z_size) {
1419 zfs_range_unlock(rl);
1420 return (0);
1421 }
1422
1423 if (off + len > zp->z_size)
1424 len = zp->z_size - off;
1425
1426 error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);
1427
1428 /*
1429 * Zero partial page cache entries. This must be done under a
1430 * range lock in order to keep the ARC and page cache in sync.
1431 */
1432 if (zp->z_is_mapped) {
1433 loff_t first_page, last_page, page_len;
1434 loff_t first_page_offset, last_page_offset;
1435
1436 /* first possible full page in hole */
1437 first_page = (off + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1438 /* last page of hole */
1439 last_page = (off + len) >> PAGE_CACHE_SHIFT;
1440
1441 /* offset of first_page */
1442 first_page_offset = first_page << PAGE_CACHE_SHIFT;
1443 /* offset of last_page */
1444 last_page_offset = last_page << PAGE_CACHE_SHIFT;
1445
1446 /* truncate whole pages */
1447 if (last_page_offset > first_page_offset) {
1448 truncate_inode_pages_range(ZTOI(zp)->i_mapping,
1449 first_page_offset, last_page_offset - 1);
1450 }
1451
1452 /* truncate sub-page ranges */
1453 if (first_page > last_page) {
1454 /* entire punched area within a single page */
1455 zfs_zero_partial_page(zp, off, len);
1456 } else {
1457 /* beginning of punched area at the end of a page */
1458 page_len = first_page_offset - off;
1459 if (page_len > 0)
1460 zfs_zero_partial_page(zp, off, page_len);
1461
1462 /* end of punched area at the beginning of a page */
1463 page_len = off + len - last_page_offset;
1464 if (page_len > 0)
1465 zfs_zero_partial_page(zp, last_page_offset,
1466 page_len);
1467 }
1468 }
1469 zfs_range_unlock(rl);
1470
1471 return (error);
1472 }
1473
1474 /*
1475 * Truncate a file
1476 *
1477 * IN: zp - znode of file to free data in.
1478 * end - new end-of-file.
1479 *
1480 * RETURN: 0 on success, error code on failure
1481 */
1482 static int
1483 zfs_trunc(znode_t *zp, uint64_t end)
1484 {
1485 zfs_sb_t *zsb = ZTOZSB(zp);
1486 dmu_tx_t *tx;
1487 rl_t *rl;
1488 int error;
1489 sa_bulk_attr_t bulk[2];
1490 int count = 0;
1491
1492 /*
1493 * We will change zp_size, lock the whole file.
1494 */
1495 rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);
1496
1497 /*
1498 * Nothing to do if file already at desired length.
1499 */
1500 if (end >= zp->z_size) {
1501 zfs_range_unlock(rl);
1502 return (0);
1503 }
1504
1505 error = dmu_free_long_range(zsb->z_os, zp->z_id, end, -1);
1506 if (error) {
1507 zfs_range_unlock(rl);
1508 return (error);
1509 }
1510 tx = dmu_tx_create(zsb->z_os);
1511 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1512 zfs_sa_upgrade_txholds(tx, zp);
1513 error = dmu_tx_assign(tx, TXG_WAIT);
1514 if (error) {
1515 dmu_tx_abort(tx);
1516 zfs_range_unlock(rl);
1517 return (error);
1518 }
1519
1520 zp->z_size = end;
1521 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb),
1522 NULL, &zp->z_size, sizeof (zp->z_size));
1523
1524 if (end == 0) {
1525 zp->z_pflags &= ~ZFS_SPARSE;
1526 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
1527 NULL, &zp->z_pflags, 8);
1528 }
1529 VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
1530
1531 dmu_tx_commit(tx);
1532
1533 zfs_range_unlock(rl);
1534
1535 return (0);
1536 }
1537
1538 /*
1539 * Free space in a file
1540 *
1541 * IN: zp - znode of file to free data in.
1542 * off - start of range
1543 * len - end of range (0 => EOF)
1544 * flag - current file open mode flags.
1545 * log - TRUE if this action should be logged
1546 *
1547 * RETURN: 0 on success, error code on failure
1548 */
1549 int
1550 zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
1551 {
1552 dmu_tx_t *tx;
1553 zfs_sb_t *zsb = ZTOZSB(zp);
1554 zilog_t *zilog = zsb->z_log;
1555 uint64_t mode;
1556 uint64_t mtime[2], ctime[2];
1557 sa_bulk_attr_t bulk[3];
1558 int count = 0;
1559 int error;
1560
1561 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zsb), &mode,
1562 sizeof (mode))) != 0)
1563 return (error);
1564
1565 if (off > zp->z_size) {
1566 error = zfs_extend(zp, off+len);
1567 if (error == 0 && log)
1568 goto log;
1569 goto out;
1570 }
1571
1572 if (len == 0) {
1573 error = zfs_trunc(zp, off);
1574 } else {
1575 if ((error = zfs_free_range(zp, off, len)) == 0 &&
1576 off + len > zp->z_size)
1577 error = zfs_extend(zp, off+len);
1578 }
1579 if (error || !log)
1580 goto out;
1581 log:
1582 tx = dmu_tx_create(zsb->z_os);
1583 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1584 zfs_sa_upgrade_txholds(tx, zp);
1585 error = dmu_tx_assign(tx, TXG_WAIT);
1586 if (error) {
1587 dmu_tx_abort(tx);
1588 goto out;
1589 }
1590
1591 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, 16);
1592 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, ctime, 16);
1593 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
1594 NULL, &zp->z_pflags, 8);
1595 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
1596 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1597 ASSERT(error == 0);
1598
1599 zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
1600
1601 dmu_tx_commit(tx);
1602
1603 zfs_inode_update(zp);
1604 error = 0;
1605
1606 out:
1607 /*
1608 * Truncate the page cache - for file truncate operations, use
1609 * the purpose-built API for truncations. For punching operations,
1610 * the truncation is handled under a range lock in zfs_free_range.
1611 */
1612 if (len == 0)
1613 truncate_setsize(ZTOI(zp), off);
1614 return (error);
1615 }
1616
1617 void
1618 zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
1619 {
1620 struct super_block *sb;
1621 zfs_sb_t *zsb;
1622 uint64_t moid, obj, sa_obj, version;
1623 uint64_t sense = ZFS_CASE_SENSITIVE;
1624 uint64_t norm = 0;
1625 nvpair_t *elem;
1626 int error;
1627 int i;
1628 znode_t *rootzp = NULL;
1629 vattr_t vattr;
1630 znode_t *zp;
1631 zfs_acl_ids_t acl_ids;
1632
1633 /*
1634 * First attempt to create master node.
1635 */
1636 /*
1637 * In an empty objset, there are no blocks to read and thus
1638 * there can be no i/o errors (which we assert below).
1639 */
1640 moid = MASTER_NODE_OBJ;
1641 error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
1642 DMU_OT_NONE, 0, tx);
1643 ASSERT(error == 0);
1644
1645 /*
1646 * Set starting attributes.
1647 */
1648 version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
1649 elem = NULL;
1650 while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
1651 /* For the moment we expect all zpl props to be uint64_ts */
1652 uint64_t val;
1653 char *name;
1654
1655 ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
1656 VERIFY(nvpair_value_uint64(elem, &val) == 0);
1657 name = nvpair_name(elem);
1658 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
1659 if (val < version)
1660 version = val;
1661 } else {
1662 error = zap_update(os, moid, name, 8, 1, &val, tx);
1663 }
1664 ASSERT(error == 0);
1665 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
1666 norm = val;
1667 else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
1668 sense = val;
1669 }
1670 ASSERT(version != 0);
1671 error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
1672
1673 /*
1674 * Create zap object used for SA attribute registration
1675 */
1676
1677 if (version >= ZPL_VERSION_SA) {
1678 sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
1679 DMU_OT_NONE, 0, tx);
1680 error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
1681 ASSERT(error == 0);
1682 } else {
1683 sa_obj = 0;
1684 }
1685 /*
1686 * Create a delete queue.
1687 */
1688 obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
1689
1690 error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
1691 ASSERT(error == 0);
1692
1693 /*
1694 * Create root znode. Create minimal znode/inode/zsb/sb
1695 * to allow zfs_mknode to work.
1696 */
1697 vattr.va_mask = ATTR_MODE|ATTR_UID|ATTR_GID;
1698 vattr.va_mode = S_IFDIR|0755;
1699 vattr.va_uid = crgetuid(cr);
1700 vattr.va_gid = crgetgid(cr);
1701
1702 rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
1703 rootzp->z_moved = 0;
1704 rootzp->z_unlinked = 0;
1705 rootzp->z_atime_dirty = 0;
1706 rootzp->z_is_sa = USE_SA(version, os);
1707
1708 zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP);
1709 zsb->z_os = os;
1710 zsb->z_parent = zsb;
1711 zsb->z_version = version;
1712 zsb->z_use_fuids = USE_FUIDS(version, os);
1713 zsb->z_use_sa = USE_SA(version, os);
1714 zsb->z_norm = norm;
1715
1716 sb = kmem_zalloc(sizeof (struct super_block), KM_SLEEP);
1717 sb->s_fs_info = zsb;
1718
1719 ZTOI(rootzp)->i_sb = sb;
1720
1721 error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
1722 &zsb->z_attr_table);
1723
1724 ASSERT(error == 0);
1725
1726 /*
1727 * Fold case on file systems that are always or sometimes case
1728 * insensitive.
1729 */
1730 if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
1731 zsb->z_norm |= U8_TEXTPREP_TOUPPER;
1732
1733 mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
1734 list_create(&zsb->z_all_znodes, sizeof (znode_t),
1735 offsetof(znode_t, z_link_node));
1736
1737 zsb->z_hold_mtx = vmem_zalloc(sizeof (kmutex_t) * ZFS_OBJ_MTX_SZ,
1738 KM_SLEEP);
1739 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
1740 mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
1741
1742 VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
1743 cr, NULL, &acl_ids));
1744 zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
1745 ASSERT3P(zp, ==, rootzp);
1746 error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
1747 ASSERT(error == 0);
1748 zfs_acl_ids_free(&acl_ids);
1749
1750 atomic_set(&ZTOI(rootzp)->i_count, 0);
1751 sa_handle_destroy(rootzp->z_sa_hdl);
1752 kmem_cache_free(znode_cache, rootzp);
1753
1754 /*
1755 * Create shares directory
1756 */
1757 error = zfs_create_share_dir(zsb, tx);
1758 ASSERT(error == 0);
1759
1760 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
1761 mutex_destroy(&zsb->z_hold_mtx[i]);
1762
1763 vmem_free(zsb->z_hold_mtx, sizeof (kmutex_t) * ZFS_OBJ_MTX_SZ);
1764 kmem_free(sb, sizeof (struct super_block));
1765 kmem_free(zsb, sizeof (zfs_sb_t));
1766 }
1767 #endif /* _KERNEL */
1768
1769 static int
1770 zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
1771 {
1772 uint64_t sa_obj = 0;
1773 int error;
1774
1775 error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
1776 if (error != 0 && error != ENOENT)
1777 return (error);
1778
1779 error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
1780 return (error);
1781 }
1782
1783 static int
1784 zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
1785 dmu_buf_t **db, void *tag)
1786 {
1787 dmu_object_info_t doi;
1788 int error;
1789
1790 if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
1791 return (error);
1792
1793 dmu_object_info_from_db(*db, &doi);
1794 if ((doi.doi_bonus_type != DMU_OT_SA &&
1795 doi.doi_bonus_type != DMU_OT_ZNODE) ||
1796 (doi.doi_bonus_type == DMU_OT_ZNODE &&
1797 doi.doi_bonus_size < sizeof (znode_phys_t))) {
1798 sa_buf_rele(*db, tag);
1799 return (SET_ERROR(ENOTSUP));
1800 }
1801
1802 error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
1803 if (error != 0) {
1804 sa_buf_rele(*db, tag);
1805 return (error);
1806 }
1807
1808 return (0);
1809 }
1810
1811 void
1812 zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, void *tag)
1813 {
1814 sa_handle_destroy(hdl);
1815 sa_buf_rele(db, tag);
1816 }
1817
1818 /*
1819 * Given an object number, return its parent object number and whether
1820 * or not the object is an extended attribute directory.
1821 */
1822 static int
1823 zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table,
1824 uint64_t *pobjp, int *is_xattrdir)
1825 {
1826 uint64_t parent;
1827 uint64_t pflags;
1828 uint64_t mode;
1829 uint64_t parent_mode;
1830 sa_bulk_attr_t bulk[3];
1831 sa_handle_t *sa_hdl;
1832 dmu_buf_t *sa_db;
1833 int count = 0;
1834 int error;
1835
1836 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
1837 &parent, sizeof (parent));
1838 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
1839 &pflags, sizeof (pflags));
1840 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
1841 &mode, sizeof (mode));
1842
1843 if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
1844 return (error);
1845
1846 /*
1847 * When a link is removed its parent pointer is not changed and will
1848 * be invalid. There are two cases where a link is removed but the
1849 * file stays around, when it goes to the delete queue and when there
1850 * are additional links.
1851 */
1852 error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG);
1853 if (error != 0)
1854 return (error);
1855
1856 error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode));
1857 zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
1858 if (error != 0)
1859 return (error);
1860
1861 *is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
1862
1863 /*
1864 * Extended attributes can be applied to files, directories, etc.
1865 * Otherwise the parent must be a directory.
1866 */
1867 if (!*is_xattrdir && !S_ISDIR(parent_mode))
1868 return (EINVAL);
1869
1870 *pobjp = parent;
1871
1872 return (0);
1873 }
1874
1875 /*
1876 * Given an object number, return some zpl level statistics
1877 */
1878 static int
1879 zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
1880 zfs_stat_t *sb)
1881 {
1882 sa_bulk_attr_t bulk[4];
1883 int count = 0;
1884
1885 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
1886 &sb->zs_mode, sizeof (sb->zs_mode));
1887 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
1888 &sb->zs_gen, sizeof (sb->zs_gen));
1889 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
1890 &sb->zs_links, sizeof (sb->zs_links));
1891 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
1892 &sb->zs_ctime, sizeof (sb->zs_ctime));
1893
1894 return (sa_bulk_lookup(hdl, bulk, count));
1895 }
1896
1897 static int
1898 zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
1899 sa_attr_type_t *sa_table, char *buf, int len)
1900 {
1901 sa_handle_t *sa_hdl;
1902 sa_handle_t *prevhdl = NULL;
1903 dmu_buf_t *prevdb = NULL;
1904 dmu_buf_t *sa_db = NULL;
1905 char *path = buf + len - 1;
1906 int error;
1907
1908 *path = '\0';
1909 sa_hdl = hdl;
1910
1911 for (;;) {
1912 uint64_t pobj = 0;
1913 char component[MAXNAMELEN + 2];
1914 size_t complen;
1915 int is_xattrdir = 0;
1916
1917 if (prevdb)
1918 zfs_release_sa_handle(prevhdl, prevdb, FTAG);
1919
1920 if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj,
1921 &is_xattrdir)) != 0)
1922 break;
1923
1924 if (pobj == obj) {
1925 if (path[0] != '/')
1926 *--path = '/';
1927 break;
1928 }
1929
1930 component[0] = '/';
1931 if (is_xattrdir) {
1932 (void) sprintf(component + 1, "<xattrdir>");
1933 } else {
1934 error = zap_value_search(osp, pobj, obj,
1935 ZFS_DIRENT_OBJ(-1ULL), component + 1);
1936 if (error != 0)
1937 break;
1938 }
1939
1940 complen = strlen(component);
1941 path -= complen;
1942 ASSERT(path >= buf);
1943 bcopy(component, path, complen);
1944 obj = pobj;
1945
1946 if (sa_hdl != hdl) {
1947 prevhdl = sa_hdl;
1948 prevdb = sa_db;
1949 }
1950 error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
1951 if (error != 0) {
1952 sa_hdl = prevhdl;
1953 sa_db = prevdb;
1954 break;
1955 }
1956 }
1957
1958 if (sa_hdl != NULL && sa_hdl != hdl) {
1959 ASSERT(sa_db != NULL);
1960 zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
1961 }
1962
1963 if (error == 0)
1964 (void) memmove(buf, path, buf + len - path);
1965
1966 return (error);
1967 }
1968
1969 int
1970 zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
1971 {
1972 sa_attr_type_t *sa_table;
1973 sa_handle_t *hdl;
1974 dmu_buf_t *db;
1975 int error;
1976
1977 error = zfs_sa_setup(osp, &sa_table);
1978 if (error != 0)
1979 return (error);
1980
1981 error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
1982 if (error != 0)
1983 return (error);
1984
1985 error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
1986
1987 zfs_release_sa_handle(hdl, db, FTAG);
1988 return (error);
1989 }
1990
1991 int
1992 zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
1993 char *buf, int len)
1994 {
1995 char *path = buf + len - 1;
1996 sa_attr_type_t *sa_table;
1997 sa_handle_t *hdl;
1998 dmu_buf_t *db;
1999 int error;
2000
2001 *path = '\0';
2002
2003 error = zfs_sa_setup(osp, &sa_table);
2004 if (error != 0)
2005 return (error);
2006
2007 error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2008 if (error != 0)
2009 return (error);
2010
2011 error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
2012 if (error != 0) {
2013 zfs_release_sa_handle(hdl, db, FTAG);
2014 return (error);
2015 }
2016
2017 error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2018
2019 zfs_release_sa_handle(hdl, db, FTAG);
2020 return (error);
2021 }
2022
2023 #if defined(_KERNEL) && defined(HAVE_SPL)
2024 EXPORT_SYMBOL(zfs_create_fs);
2025 EXPORT_SYMBOL(zfs_obj_to_path);
2026 #endif