]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zfs_znode.c
Kill znode->z_gen field
[mirror_zfs.git] / module / zfs / zfs_znode.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24 */
25
26 /* Portions Copyright 2007 Jeremy Teo */
27
28 #ifdef _KERNEL
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/time.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/resource.h>
35 #include <sys/mntent.h>
36 #include <sys/mkdev.h>
37 #include <sys/u8_textprep.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/vfs.h>
40 #include <sys/vfs_opreg.h>
41 #include <sys/vnode.h>
42 #include <sys/file.h>
43 #include <sys/kmem.h>
44 #include <sys/errno.h>
45 #include <sys/unistd.h>
46 #include <sys/mode.h>
47 #include <sys/atomic.h>
48 #include <vm/pvn.h>
49 #include "fs/fs_subr.h"
50 #include <sys/zfs_dir.h>
51 #include <sys/zfs_acl.h>
52 #include <sys/zfs_ioctl.h>
53 #include <sys/zfs_rlock.h>
54 #include <sys/zfs_fuid.h>
55 #include <sys/zfs_vnops.h>
56 #include <sys/zfs_ctldir.h>
57 #include <sys/dnode.h>
58 #include <sys/fs/zfs.h>
59 #include <sys/kidmap.h>
60 #include <sys/zpl.h>
61 #endif /* _KERNEL */
62
63 #include <sys/dmu.h>
64 #include <sys/dmu_objset.h>
65 #include <sys/refcount.h>
66 #include <sys/stat.h>
67 #include <sys/zap.h>
68 #include <sys/zfs_znode.h>
69 #include <sys/sa.h>
70 #include <sys/zfs_sa.h>
71 #include <sys/zfs_stat.h>
72
73 #include "zfs_prop.h"
74 #include "zfs_comutil.h"
75
76 /*
77 * Define ZNODE_STATS to turn on statistic gathering. By default, it is only
78 * turned on when DEBUG is also defined.
79 */
80 #ifdef DEBUG
81 #define ZNODE_STATS
82 #endif /* DEBUG */
83
84 #ifdef ZNODE_STATS
85 #define ZNODE_STAT_ADD(stat) ((stat)++)
86 #else
87 #define ZNODE_STAT_ADD(stat) /* nothing */
88 #endif /* ZNODE_STATS */
89
90 /*
91 * Functions needed for userland (ie: libzpool) are not put under
92 * #ifdef_KERNEL; the rest of the functions have dependencies
93 * (such as VFS logic) that will not compile easily in userland.
94 */
95 #ifdef _KERNEL
96
97 static kmem_cache_t *znode_cache = NULL;
98 static kmem_cache_t *znode_hold_cache = NULL;
99 unsigned int zfs_object_mutex_size = ZFS_OBJ_MTX_SZ;
100
101 /*ARGSUSED*/
102 static int
103 zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
104 {
105 znode_t *zp = buf;
106
107 inode_init_once(ZTOI(zp));
108 list_link_init(&zp->z_link_node);
109
110 mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
111 rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
112 rw_init(&zp->z_name_lock, NULL, RW_NOLOCKDEP, NULL);
113 mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
114 rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
115
116 mutex_init(&zp->z_range_lock, NULL, MUTEX_DEFAULT, NULL);
117 avl_create(&zp->z_range_avl, zfs_range_compare,
118 sizeof (rl_t), offsetof(rl_t, r_node));
119
120 zp->z_dirlocks = NULL;
121 zp->z_acl_cached = NULL;
122 zp->z_xattr_cached = NULL;
123 zp->z_xattr_parent = NULL;
124 zp->z_moved = 0;
125 return (0);
126 }
127
128 /*ARGSUSED*/
129 static void
130 zfs_znode_cache_destructor(void *buf, void *arg)
131 {
132 znode_t *zp = buf;
133
134 ASSERT(!list_link_active(&zp->z_link_node));
135 mutex_destroy(&zp->z_lock);
136 rw_destroy(&zp->z_parent_lock);
137 rw_destroy(&zp->z_name_lock);
138 mutex_destroy(&zp->z_acl_lock);
139 rw_destroy(&zp->z_xattr_lock);
140 avl_destroy(&zp->z_range_avl);
141 mutex_destroy(&zp->z_range_lock);
142
143 ASSERT(zp->z_dirlocks == NULL);
144 ASSERT(zp->z_acl_cached == NULL);
145 ASSERT(zp->z_xattr_cached == NULL);
146 ASSERT(zp->z_xattr_parent == NULL);
147 }
148
149 static int
150 zfs_znode_hold_cache_constructor(void *buf, void *arg, int kmflags)
151 {
152 znode_hold_t *zh = buf;
153
154 mutex_init(&zh->zh_lock, NULL, MUTEX_DEFAULT, NULL);
155 refcount_create(&zh->zh_refcount);
156 zh->zh_obj = ZFS_NO_OBJECT;
157
158 return (0);
159 }
160
161 static void
162 zfs_znode_hold_cache_destructor(void *buf, void *arg)
163 {
164 znode_hold_t *zh = buf;
165
166 mutex_destroy(&zh->zh_lock);
167 refcount_destroy(&zh->zh_refcount);
168 }
169
170 void
171 zfs_znode_init(void)
172 {
173 /*
174 * Initialize zcache. The KMC_SLAB hint is used in order that it be
175 * backed by kmalloc() when on the Linux slab in order that any
176 * wait_on_bit() operations on the related inode operate properly.
177 */
178 ASSERT(znode_cache == NULL);
179 znode_cache = kmem_cache_create("zfs_znode_cache",
180 sizeof (znode_t), 0, zfs_znode_cache_constructor,
181 zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_SLAB);
182
183 ASSERT(znode_hold_cache == NULL);
184 znode_hold_cache = kmem_cache_create("zfs_znode_hold_cache",
185 sizeof (znode_hold_t), 0, zfs_znode_hold_cache_constructor,
186 zfs_znode_hold_cache_destructor, NULL, NULL, NULL, 0);
187 }
188
189 void
190 zfs_znode_fini(void)
191 {
192 /*
193 * Cleanup zcache
194 */
195 if (znode_cache)
196 kmem_cache_destroy(znode_cache);
197 znode_cache = NULL;
198
199 if (znode_hold_cache)
200 kmem_cache_destroy(znode_hold_cache);
201 znode_hold_cache = NULL;
202 }
203
204 /*
205 * The zfs_znode_hold_enter() / zfs_znode_hold_exit() functions are used to
206 * serialize access to a znode and its SA buffer while the object is being
207 * created or destroyed. This kind of locking would normally reside in the
208 * znode itself but in this case that's impossible because the znode and SA
209 * buffer may not yet exist. Therefore the locking is handled externally
210 * with an array of mutexs and AVLs trees which contain per-object locks.
211 *
212 * In zfs_znode_hold_enter() a per-object lock is created as needed, inserted
213 * in to the correct AVL tree and finally the per-object lock is held. In
214 * zfs_znode_hold_exit() the process is reversed. The per-object lock is
215 * released, removed from the AVL tree and destroyed if there are no waiters.
216 *
217 * This scheme has two important properties:
218 *
219 * 1) No memory allocations are performed while holding one of the z_hold_locks.
220 * This ensures evict(), which can be called from direct memory reclaim, will
221 * never block waiting on a z_hold_locks which just happens to have hashed
222 * to the same index.
223 *
224 * 2) All locks used to serialize access to an object are per-object and never
225 * shared. This minimizes lock contention without creating a large number
226 * of dedicated locks.
227 *
228 * On the downside it does require znode_lock_t structures to be frequently
229 * allocated and freed. However, because these are backed by a kmem cache
230 * and very short lived this cost is minimal.
231 */
232 int
233 zfs_znode_hold_compare(const void *a, const void *b)
234 {
235 const znode_hold_t *zh_a = a;
236 const znode_hold_t *zh_b = b;
237
238 if (zh_a->zh_obj < zh_b->zh_obj)
239 return (-1);
240 else if (zh_a->zh_obj > zh_b->zh_obj)
241 return (1);
242 else
243 return (0);
244 }
245
246 boolean_t
247 zfs_znode_held(zfs_sb_t *zsb, uint64_t obj)
248 {
249 znode_hold_t *zh, search;
250 int i = ZFS_OBJ_HASH(zsb, obj);
251 boolean_t held;
252
253 search.zh_obj = obj;
254
255 mutex_enter(&zsb->z_hold_locks[i]);
256 zh = avl_find(&zsb->z_hold_trees[i], &search, NULL);
257 held = (zh && MUTEX_HELD(&zh->zh_lock)) ? B_TRUE : B_FALSE;
258 mutex_exit(&zsb->z_hold_locks[i]);
259
260 return (held);
261 }
262
263 static znode_hold_t *
264 zfs_znode_hold_enter(zfs_sb_t *zsb, uint64_t obj)
265 {
266 znode_hold_t *zh, *zh_new, search;
267 int i = ZFS_OBJ_HASH(zsb, obj);
268 boolean_t found = B_FALSE;
269
270 zh_new = kmem_cache_alloc(znode_hold_cache, KM_SLEEP);
271 zh_new->zh_obj = obj;
272 search.zh_obj = obj;
273
274 mutex_enter(&zsb->z_hold_locks[i]);
275 zh = avl_find(&zsb->z_hold_trees[i], &search, NULL);
276 if (likely(zh == NULL)) {
277 zh = zh_new;
278 avl_add(&zsb->z_hold_trees[i], zh);
279 } else {
280 ASSERT3U(zh->zh_obj, ==, obj);
281 found = B_TRUE;
282 }
283 refcount_add(&zh->zh_refcount, NULL);
284 mutex_exit(&zsb->z_hold_locks[i]);
285
286 if (found == B_TRUE)
287 kmem_cache_free(znode_hold_cache, zh_new);
288
289 ASSERT(MUTEX_NOT_HELD(&zh->zh_lock));
290 ASSERT3S(refcount_count(&zh->zh_refcount), >, 0);
291 mutex_enter(&zh->zh_lock);
292
293 return (zh);
294 }
295
296 static void
297 zfs_znode_hold_exit(zfs_sb_t *zsb, znode_hold_t *zh)
298 {
299 int i = ZFS_OBJ_HASH(zsb, zh->zh_obj);
300 boolean_t remove = B_FALSE;
301
302 ASSERT(zfs_znode_held(zsb, zh->zh_obj));
303 ASSERT3S(refcount_count(&zh->zh_refcount), >, 0);
304 mutex_exit(&zh->zh_lock);
305
306 mutex_enter(&zsb->z_hold_locks[i]);
307 if (refcount_remove(&zh->zh_refcount, NULL) == 0) {
308 avl_remove(&zsb->z_hold_trees[i], zh);
309 remove = B_TRUE;
310 }
311 mutex_exit(&zsb->z_hold_locks[i]);
312
313 if (remove == B_TRUE)
314 kmem_cache_free(znode_hold_cache, zh);
315 }
316
317 int
318 zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx)
319 {
320 #ifdef HAVE_SMB_SHARE
321 zfs_acl_ids_t acl_ids;
322 vattr_t vattr;
323 znode_t *sharezp;
324 vnode_t *vp;
325 znode_t *zp;
326 int error;
327
328 vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
329 vattr.va_mode = S_IFDIR | 0555;
330 vattr.va_uid = crgetuid(kcred);
331 vattr.va_gid = crgetgid(kcred);
332
333 sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP);
334 sharezp->z_moved = 0;
335 sharezp->z_unlinked = 0;
336 sharezp->z_atime_dirty = 0;
337 sharezp->z_zfsvfs = zfsvfs;
338 sharezp->z_is_sa = zfsvfs->z_use_sa;
339
340 vp = ZTOV(sharezp);
341 vn_reinit(vp);
342 vp->v_type = VDIR;
343
344 VERIFY(0 == zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr,
345 kcred, NULL, &acl_ids));
346 zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids);
347 ASSERT3P(zp, ==, sharezp);
348 ASSERT(!vn_in_dnlc(ZTOV(sharezp))); /* not valid to move */
349 POINTER_INVALIDATE(&sharezp->z_zfsvfs);
350 error = zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
351 ZFS_SHARES_DIR, 8, 1, &sharezp->z_id, tx);
352 zfsvfs->z_shares_dir = sharezp->z_id;
353
354 zfs_acl_ids_free(&acl_ids);
355 // ZTOV(sharezp)->v_count = 0;
356 sa_handle_destroy(sharezp->z_sa_hdl);
357 kmem_cache_free(znode_cache, sharezp);
358
359 return (error);
360 #else
361 return (0);
362 #endif /* HAVE_SMB_SHARE */
363 }
364
365 static void
366 zfs_znode_sa_init(zfs_sb_t *zsb, znode_t *zp,
367 dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
368 {
369 ASSERT(zfs_znode_held(zsb, zp->z_id));
370
371 mutex_enter(&zp->z_lock);
372
373 ASSERT(zp->z_sa_hdl == NULL);
374 ASSERT(zp->z_acl_cached == NULL);
375 if (sa_hdl == NULL) {
376 VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, zp,
377 SA_HDL_SHARED, &zp->z_sa_hdl));
378 } else {
379 zp->z_sa_hdl = sa_hdl;
380 sa_set_userp(sa_hdl, zp);
381 }
382
383 zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
384
385 mutex_exit(&zp->z_lock);
386 }
387
388 void
389 zfs_znode_dmu_fini(znode_t *zp)
390 {
391 ASSERT(zfs_znode_held(ZTOZSB(zp), zp->z_id) || zp->z_unlinked ||
392 RW_WRITE_HELD(&ZTOZSB(zp)->z_teardown_inactive_lock));
393
394 sa_handle_destroy(zp->z_sa_hdl);
395 zp->z_sa_hdl = NULL;
396 }
397
398 /*
399 * Called by new_inode() to allocate a new inode.
400 */
401 int
402 zfs_inode_alloc(struct super_block *sb, struct inode **ip)
403 {
404 znode_t *zp;
405
406 zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
407 *ip = ZTOI(zp);
408
409 return (0);
410 }
411
412 /*
413 * Called in multiple places when an inode should be destroyed.
414 */
415 void
416 zfs_inode_destroy(struct inode *ip)
417 {
418 znode_t *zp = ITOZ(ip);
419 zfs_sb_t *zsb = ZTOZSB(zp);
420
421 mutex_enter(&zsb->z_znodes_lock);
422 if (list_link_active(&zp->z_link_node)) {
423 list_remove(&zsb->z_all_znodes, zp);
424 zsb->z_nr_znodes--;
425 }
426 mutex_exit(&zsb->z_znodes_lock);
427
428 if (zp->z_acl_cached) {
429 zfs_acl_free(zp->z_acl_cached);
430 zp->z_acl_cached = NULL;
431 }
432
433 if (zp->z_xattr_cached) {
434 nvlist_free(zp->z_xattr_cached);
435 zp->z_xattr_cached = NULL;
436 }
437
438 if (zp->z_xattr_parent) {
439 zfs_iput_async(ZTOI(zp->z_xattr_parent));
440 zp->z_xattr_parent = NULL;
441 }
442
443 kmem_cache_free(znode_cache, zp);
444 }
445
446 static void
447 zfs_inode_set_ops(zfs_sb_t *zsb, struct inode *ip)
448 {
449 uint64_t rdev = 0;
450
451 switch (ip->i_mode & S_IFMT) {
452 case S_IFREG:
453 ip->i_op = &zpl_inode_operations;
454 ip->i_fop = &zpl_file_operations;
455 ip->i_mapping->a_ops = &zpl_address_space_operations;
456 break;
457
458 case S_IFDIR:
459 ip->i_op = &zpl_dir_inode_operations;
460 ip->i_fop = &zpl_dir_file_operations;
461 ITOZ(ip)->z_zn_prefetch = B_TRUE;
462 break;
463
464 case S_IFLNK:
465 ip->i_op = &zpl_symlink_inode_operations;
466 break;
467
468 /*
469 * rdev is only stored in a SA only for device files.
470 */
471 case S_IFCHR:
472 case S_IFBLK:
473 sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zsb), &rdev,
474 sizeof (rdev));
475 /*FALLTHROUGH*/
476 case S_IFIFO:
477 case S_IFSOCK:
478 init_special_inode(ip, ip->i_mode, rdev);
479 ip->i_op = &zpl_special_inode_operations;
480 break;
481
482 default:
483 zfs_panic_recover("inode %llu has invalid mode: 0x%x\n",
484 (u_longlong_t)ip->i_ino, ip->i_mode);
485
486 /* Assume the inode is a file and attempt to continue */
487 ip->i_mode = S_IFREG | 0644;
488 ip->i_op = &zpl_inode_operations;
489 ip->i_fop = &zpl_file_operations;
490 ip->i_mapping->a_ops = &zpl_address_space_operations;
491 break;
492 }
493 }
494
495 void
496 zfs_set_inode_flags(znode_t *zp, struct inode *ip)
497 {
498 /*
499 * Linux and Solaris have different sets of file attributes, so we
500 * restrict this conversion to the intersection of the two.
501 */
502
503 if (zp->z_pflags & ZFS_IMMUTABLE)
504 ip->i_flags |= S_IMMUTABLE;
505 else
506 ip->i_flags &= ~S_IMMUTABLE;
507
508 if (zp->z_pflags & ZFS_APPENDONLY)
509 ip->i_flags |= S_APPEND;
510 else
511 ip->i_flags &= ~S_APPEND;
512 }
513
514 /*
515 * Update the embedded inode given the znode. We should work toward
516 * eliminating this function as soon as possible by removing values
517 * which are duplicated between the znode and inode. If the generic
518 * inode has the correct field it should be used, and the ZFS code
519 * updated to access the inode. This can be done incrementally.
520 */
521 static void
522 zfs_inode_update_impl(znode_t *zp, boolean_t new)
523 {
524 zfs_sb_t *zsb;
525 struct inode *ip;
526 uint32_t blksize;
527 u_longlong_t i_blocks;
528 uint64_t atime[2], mtime[2], ctime[2];
529
530 ASSERT(zp != NULL);
531 zsb = ZTOZSB(zp);
532 ip = ZTOI(zp);
533
534 /* Skip .zfs control nodes which do not exist on disk. */
535 if (zfsctl_is_node(ip))
536 return;
537
538 sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zsb), &atime, 16);
539 sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zsb), &mtime, 16);
540 sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zsb), &ctime, 16);
541
542 dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize, &i_blocks);
543
544 spin_lock(&ip->i_lock);
545 ip->i_uid = SUID_TO_KUID(zp->z_uid);
546 ip->i_gid = SGID_TO_KGID(zp->z_gid);
547 set_nlink(ip, zp->z_links);
548 ip->i_mode = zp->z_mode;
549 zfs_set_inode_flags(zp, ip);
550 ip->i_blkbits = SPA_MINBLOCKSHIFT;
551 ip->i_blocks = i_blocks;
552
553 /*
554 * Only read atime from SA if we are newly created inode (or rezget),
555 * otherwise i_atime might be dirty.
556 */
557 if (new)
558 ZFS_TIME_DECODE(&ip->i_atime, atime);
559 ZFS_TIME_DECODE(&ip->i_mtime, mtime);
560 ZFS_TIME_DECODE(&ip->i_ctime, ctime);
561
562 i_size_write(ip, zp->z_size);
563 spin_unlock(&ip->i_lock);
564 }
565
566 static void
567 zfs_inode_update_new(znode_t *zp)
568 {
569 zfs_inode_update_impl(zp, B_TRUE);
570 }
571
572 void
573 zfs_inode_update(znode_t *zp)
574 {
575 zfs_inode_update_impl(zp, B_FALSE);
576 }
577
578 /*
579 * Construct a znode+inode and initialize.
580 *
581 * This does not do a call to dmu_set_user() that is
582 * up to the caller to do, in case you don't want to
583 * return the znode
584 */
585 static znode_t *
586 zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
587 dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl,
588 struct inode *dip)
589 {
590 znode_t *zp;
591 struct inode *ip;
592 uint64_t mode;
593 uint64_t parent;
594 sa_bulk_attr_t bulk[8];
595 int count = 0;
596
597 ASSERT(zsb != NULL);
598
599 ip = new_inode(zsb->z_sb);
600 if (ip == NULL)
601 return (NULL);
602
603 zp = ITOZ(ip);
604 ASSERT(zp->z_dirlocks == NULL);
605 ASSERT3P(zp->z_acl_cached, ==, NULL);
606 ASSERT3P(zp->z_xattr_cached, ==, NULL);
607 ASSERT3P(zp->z_xattr_parent, ==, NULL);
608 zp->z_moved = 0;
609 zp->z_sa_hdl = NULL;
610 zp->z_unlinked = 0;
611 zp->z_atime_dirty = 0;
612 zp->z_mapcnt = 0;
613 zp->z_id = db->db_object;
614 zp->z_blksz = blksz;
615 zp->z_seq = 0x7A4653;
616 zp->z_sync_cnt = 0;
617 zp->z_is_zvol = B_FALSE;
618 zp->z_is_mapped = B_FALSE;
619 zp->z_is_ctldir = B_FALSE;
620 zp->z_is_stale = B_FALSE;
621
622 zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);
623
624 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
625 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL,
626 &ip->i_generation, 8);
627 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
628 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8);
629 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
630 &zp->z_pflags, 8);
631 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
632 &parent, 8);
633 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8);
634 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8);
635
636 if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 ||
637 ip->i_generation == 0) {
638
639 if (hdl == NULL)
640 sa_handle_destroy(zp->z_sa_hdl);
641 zp->z_sa_hdl = NULL;
642 goto error;
643 }
644
645 zp->z_mode = mode;
646
647 /*
648 * xattr znodes hold a reference on their unique parent
649 */
650 if (dip && zp->z_pflags & ZFS_XATTR) {
651 igrab(dip);
652 zp->z_xattr_parent = ITOZ(dip);
653 }
654
655 ip->i_ino = obj;
656 zfs_inode_update_new(zp);
657 zfs_inode_set_ops(zsb, ip);
658
659 /*
660 * The only way insert_inode_locked() can fail is if the ip->i_ino
661 * number is already hashed for this super block. This can never
662 * happen because the inode numbers map 1:1 with the object numbers.
663 *
664 * The one exception is rolling back a mounted file system, but in
665 * this case all the active inode are unhashed during the rollback.
666 */
667 VERIFY3S(insert_inode_locked(ip), ==, 0);
668
669 mutex_enter(&zsb->z_znodes_lock);
670 list_insert_tail(&zsb->z_all_znodes, zp);
671 zsb->z_nr_znodes++;
672 membar_producer();
673 mutex_exit(&zsb->z_znodes_lock);
674
675 unlock_new_inode(ip);
676 return (zp);
677
678 error:
679 iput(ip);
680 return (NULL);
681 }
682
683 /*
684 * Safely mark an inode dirty. Inodes which are part of a read-only
685 * file system or snapshot may not be dirtied.
686 */
687 void
688 zfs_mark_inode_dirty(struct inode *ip)
689 {
690 zfs_sb_t *zsb = ITOZSB(ip);
691
692 if (zfs_is_readonly(zsb) || dmu_objset_is_snapshot(zsb->z_os))
693 return;
694
695 mark_inode_dirty(ip);
696 }
697
698 static uint64_t empty_xattr;
699 static uint64_t pad[4];
700 static zfs_acl_phys_t acl_phys;
701 /*
702 * Create a new DMU object to hold a zfs znode.
703 *
704 * IN: dzp - parent directory for new znode
705 * vap - file attributes for new znode
706 * tx - dmu transaction id for zap operations
707 * cr - credentials of caller
708 * flag - flags:
709 * IS_ROOT_NODE - new object will be root
710 * IS_XATTR - new object is an attribute
711 * bonuslen - length of bonus buffer
712 * setaclp - File/Dir initial ACL
713 * fuidp - Tracks fuid allocation.
714 *
715 * OUT: zpp - allocated znode
716 *
717 */
718 void
719 zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
720 uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
721 {
722 uint64_t crtime[2], atime[2], mtime[2], ctime[2];
723 uint64_t mode, size, links, parent, pflags;
724 uint64_t dzp_pflags = 0;
725 uint64_t rdev = 0;
726 zfs_sb_t *zsb = ZTOZSB(dzp);
727 dmu_buf_t *db;
728 timestruc_t now;
729 uint64_t gen, obj;
730 int bonuslen;
731 sa_handle_t *sa_hdl;
732 dmu_object_type_t obj_type;
733 sa_bulk_attr_t *sa_attrs;
734 int cnt = 0;
735 zfs_acl_locator_cb_t locate = { 0 };
736 znode_hold_t *zh;
737
738 if (zsb->z_replay) {
739 obj = vap->va_nodeid;
740 now = vap->va_ctime; /* see zfs_replay_create() */
741 gen = vap->va_nblocks; /* ditto */
742 } else {
743 obj = 0;
744 gethrestime(&now);
745 gen = dmu_tx_get_txg(tx);
746 }
747
748 obj_type = zsb->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
749 bonuslen = (obj_type == DMU_OT_SA) ?
750 DN_MAX_BONUSLEN : ZFS_OLD_ZNODE_PHYS_SIZE;
751
752 /*
753 * Create a new DMU object.
754 */
755 /*
756 * There's currently no mechanism for pre-reading the blocks that will
757 * be needed to allocate a new object, so we accept the small chance
758 * that there will be an i/o error and we will fail one of the
759 * assertions below.
760 */
761 if (S_ISDIR(vap->va_mode)) {
762 if (zsb->z_replay) {
763 VERIFY0(zap_create_claim_norm(zsb->z_os, obj,
764 zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
765 obj_type, bonuslen, tx));
766 } else {
767 obj = zap_create_norm(zsb->z_os,
768 zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
769 obj_type, bonuslen, tx);
770 }
771 } else {
772 if (zsb->z_replay) {
773 VERIFY0(dmu_object_claim(zsb->z_os, obj,
774 DMU_OT_PLAIN_FILE_CONTENTS, 0,
775 obj_type, bonuslen, tx));
776 } else {
777 obj = dmu_object_alloc(zsb->z_os,
778 DMU_OT_PLAIN_FILE_CONTENTS, 0,
779 obj_type, bonuslen, tx);
780 }
781 }
782
783 zh = zfs_znode_hold_enter(zsb, obj);
784 VERIFY(0 == sa_buf_hold(zsb->z_os, obj, NULL, &db));
785
786 /*
787 * If this is the root, fix up the half-initialized parent pointer
788 * to reference the just-allocated physical data area.
789 */
790 if (flag & IS_ROOT_NODE) {
791 dzp->z_id = obj;
792 } else {
793 dzp_pflags = dzp->z_pflags;
794 }
795
796 /*
797 * If parent is an xattr, so am I.
798 */
799 if (dzp_pflags & ZFS_XATTR) {
800 flag |= IS_XATTR;
801 }
802
803 if (zsb->z_use_fuids)
804 pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
805 else
806 pflags = 0;
807
808 if (S_ISDIR(vap->va_mode)) {
809 size = 2; /* contents ("." and "..") */
810 links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
811 } else {
812 size = links = 0;
813 }
814
815 if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))
816 rdev = vap->va_rdev;
817
818 parent = dzp->z_id;
819 mode = acl_ids->z_mode;
820 if (flag & IS_XATTR)
821 pflags |= ZFS_XATTR;
822
823 /*
824 * No execs denied will be deterimed when zfs_mode_compute() is called.
825 */
826 pflags |= acl_ids->z_aclp->z_hints &
827 (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
828 ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
829
830 ZFS_TIME_ENCODE(&now, crtime);
831 ZFS_TIME_ENCODE(&now, ctime);
832
833 if (vap->va_mask & ATTR_ATIME) {
834 ZFS_TIME_ENCODE(&vap->va_atime, atime);
835 } else {
836 ZFS_TIME_ENCODE(&now, atime);
837 }
838
839 if (vap->va_mask & ATTR_MTIME) {
840 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
841 } else {
842 ZFS_TIME_ENCODE(&now, mtime);
843 }
844
845 /* Now add in all of the "SA" attributes */
846 VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, NULL, SA_HDL_SHARED,
847 &sa_hdl));
848
849 /*
850 * Setup the array of attributes to be replaced/set on the new file
851 *
852 * order for DMU_OT_ZNODE is critical since it needs to be constructed
853 * in the old znode_phys_t format. Don't change this ordering
854 */
855 sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
856
857 if (obj_type == DMU_OT_ZNODE) {
858 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
859 NULL, &atime, 16);
860 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
861 NULL, &mtime, 16);
862 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
863 NULL, &ctime, 16);
864 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
865 NULL, &crtime, 16);
866 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
867 NULL, &gen, 8);
868 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
869 NULL, &mode, 8);
870 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
871 NULL, &size, 8);
872 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
873 NULL, &parent, 8);
874 } else {
875 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
876 NULL, &mode, 8);
877 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
878 NULL, &size, 8);
879 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
880 NULL, &gen, 8);
881 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb),
882 NULL, &acl_ids->z_fuid, 8);
883 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb),
884 NULL, &acl_ids->z_fgid, 8);
885 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
886 NULL, &parent, 8);
887 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
888 NULL, &pflags, 8);
889 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
890 NULL, &atime, 16);
891 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
892 NULL, &mtime, 16);
893 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
894 NULL, &ctime, 16);
895 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
896 NULL, &crtime, 16);
897 }
898
899 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zsb), NULL, &links, 8);
900
901 if (obj_type == DMU_OT_ZNODE) {
902 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zsb), NULL,
903 &empty_xattr, 8);
904 }
905 if (obj_type == DMU_OT_ZNODE ||
906 (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
907 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zsb),
908 NULL, &rdev, 8);
909 }
910 if (obj_type == DMU_OT_ZNODE) {
911 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
912 NULL, &pflags, 8);
913 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb), NULL,
914 &acl_ids->z_fuid, 8);
915 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb), NULL,
916 &acl_ids->z_fgid, 8);
917 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zsb), NULL, pad,
918 sizeof (uint64_t) * 4);
919 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zsb), NULL,
920 &acl_phys, sizeof (zfs_acl_phys_t));
921 } else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
922 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zsb), NULL,
923 &acl_ids->z_aclp->z_acl_count, 8);
924 locate.cb_aclp = acl_ids->z_aclp;
925 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zsb),
926 zfs_acl_data_locator, &locate,
927 acl_ids->z_aclp->z_acl_bytes);
928 mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
929 acl_ids->z_fuid, acl_ids->z_fgid);
930 }
931
932 VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
933
934 if (!(flag & IS_ROOT_NODE)) {
935 *zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl,
936 ZTOI(dzp));
937 VERIFY(*zpp != NULL);
938 VERIFY(dzp != NULL);
939 } else {
940 /*
941 * If we are creating the root node, the "parent" we
942 * passed in is the znode for the root.
943 */
944 *zpp = dzp;
945
946 (*zpp)->z_sa_hdl = sa_hdl;
947 }
948
949 (*zpp)->z_pflags = pflags;
950 (*zpp)->z_mode = mode;
951
952 if (obj_type == DMU_OT_ZNODE ||
953 acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
954 VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
955 }
956 kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
957 zfs_znode_hold_exit(zsb, zh);
958 }
959
960 /*
961 * Update in-core attributes. It is assumed the caller will be doing an
962 * sa_bulk_update to push the changes out.
963 */
964 void
965 zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
966 {
967 xoptattr_t *xoap;
968
969 xoap = xva_getxoptattr(xvap);
970 ASSERT(xoap);
971
972 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
973 uint64_t times[2];
974 ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
975 (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
976 &times, sizeof (times), tx);
977 XVA_SET_RTN(xvap, XAT_CREATETIME);
978 }
979 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
980 ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
981 zp->z_pflags, tx);
982 XVA_SET_RTN(xvap, XAT_READONLY);
983 }
984 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
985 ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
986 zp->z_pflags, tx);
987 XVA_SET_RTN(xvap, XAT_HIDDEN);
988 }
989 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
990 ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
991 zp->z_pflags, tx);
992 XVA_SET_RTN(xvap, XAT_SYSTEM);
993 }
994 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
995 ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
996 zp->z_pflags, tx);
997 XVA_SET_RTN(xvap, XAT_ARCHIVE);
998 }
999 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
1000 ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
1001 zp->z_pflags, tx);
1002 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
1003 }
1004 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
1005 ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
1006 zp->z_pflags, tx);
1007 XVA_SET_RTN(xvap, XAT_NOUNLINK);
1008 }
1009 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
1010 ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
1011 zp->z_pflags, tx);
1012 XVA_SET_RTN(xvap, XAT_APPENDONLY);
1013 }
1014 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
1015 ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
1016 zp->z_pflags, tx);
1017 XVA_SET_RTN(xvap, XAT_NODUMP);
1018 }
1019 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
1020 ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
1021 zp->z_pflags, tx);
1022 XVA_SET_RTN(xvap, XAT_OPAQUE);
1023 }
1024 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
1025 ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
1026 xoap->xoa_av_quarantined, zp->z_pflags, tx);
1027 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
1028 }
1029 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
1030 ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
1031 zp->z_pflags, tx);
1032 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
1033 }
1034 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
1035 zfs_sa_set_scanstamp(zp, xvap, tx);
1036 XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
1037 }
1038 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
1039 ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
1040 zp->z_pflags, tx);
1041 XVA_SET_RTN(xvap, XAT_REPARSE);
1042 }
1043 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
1044 ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
1045 zp->z_pflags, tx);
1046 XVA_SET_RTN(xvap, XAT_OFFLINE);
1047 }
1048 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
1049 ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
1050 zp->z_pflags, tx);
1051 XVA_SET_RTN(xvap, XAT_SPARSE);
1052 }
1053 }
1054
1055 int
1056 zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp)
1057 {
1058 dmu_object_info_t doi;
1059 dmu_buf_t *db;
1060 znode_t *zp;
1061 znode_hold_t *zh;
1062 int err;
1063 sa_handle_t *hdl;
1064
1065 *zpp = NULL;
1066
1067 again:
1068 zh = zfs_znode_hold_enter(zsb, obj_num);
1069
1070 err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
1071 if (err) {
1072 zfs_znode_hold_exit(zsb, zh);
1073 return (err);
1074 }
1075
1076 dmu_object_info_from_db(db, &doi);
1077 if (doi.doi_bonus_type != DMU_OT_SA &&
1078 (doi.doi_bonus_type != DMU_OT_ZNODE ||
1079 (doi.doi_bonus_type == DMU_OT_ZNODE &&
1080 doi.doi_bonus_size < sizeof (znode_phys_t)))) {
1081 sa_buf_rele(db, NULL);
1082 zfs_znode_hold_exit(zsb, zh);
1083 return (SET_ERROR(EINVAL));
1084 }
1085
1086 hdl = dmu_buf_get_user(db);
1087 if (hdl != NULL) {
1088 zp = sa_get_userdata(hdl);
1089
1090
1091 /*
1092 * Since "SA" does immediate eviction we
1093 * should never find a sa handle that doesn't
1094 * know about the znode.
1095 */
1096
1097 ASSERT3P(zp, !=, NULL);
1098
1099 mutex_enter(&zp->z_lock);
1100 ASSERT3U(zp->z_id, ==, obj_num);
1101 if (zp->z_unlinked) {
1102 err = SET_ERROR(ENOENT);
1103 } else {
1104 /*
1105 * If igrab() returns NULL the VFS has independently
1106 * determined the inode should be evicted and has
1107 * called iput_final() to start the eviction process.
1108 * The SA handle is still valid but because the VFS
1109 * requires that the eviction succeed we must drop
1110 * our locks and references to allow the eviction to
1111 * complete. The zfs_zget() may then be retried.
1112 *
1113 * This unlikely case could be optimized by registering
1114 * a sops->drop_inode() callback. The callback would
1115 * need to detect the active SA hold thereby informing
1116 * the VFS that this inode should not be evicted.
1117 */
1118 if (igrab(ZTOI(zp)) == NULL) {
1119 mutex_exit(&zp->z_lock);
1120 sa_buf_rele(db, NULL);
1121 zfs_znode_hold_exit(zsb, zh);
1122 /* inode might need this to finish evict */
1123 cond_resched();
1124 goto again;
1125 }
1126 *zpp = zp;
1127 err = 0;
1128 }
1129 mutex_exit(&zp->z_lock);
1130 sa_buf_rele(db, NULL);
1131 zfs_znode_hold_exit(zsb, zh);
1132 return (err);
1133 }
1134
1135 /*
1136 * Not found create new znode/vnode but only if file exists.
1137 *
1138 * There is a small window where zfs_vget() could
1139 * find this object while a file create is still in
1140 * progress. This is checked for in zfs_znode_alloc()
1141 *
1142 * if zfs_znode_alloc() fails it will drop the hold on the
1143 * bonus buffer.
1144 */
1145 zp = zfs_znode_alloc(zsb, db, doi.doi_data_block_size,
1146 doi.doi_bonus_type, obj_num, NULL, NULL);
1147 if (zp == NULL) {
1148 err = SET_ERROR(ENOENT);
1149 } else {
1150 *zpp = zp;
1151 }
1152 zfs_znode_hold_exit(zsb, zh);
1153 return (err);
1154 }
1155
1156 int
1157 zfs_rezget(znode_t *zp)
1158 {
1159 zfs_sb_t *zsb = ZTOZSB(zp);
1160 dmu_object_info_t doi;
1161 dmu_buf_t *db;
1162 uint64_t obj_num = zp->z_id;
1163 uint64_t mode;
1164 sa_bulk_attr_t bulk[7];
1165 int err;
1166 int count = 0;
1167 uint64_t gen;
1168 znode_hold_t *zh;
1169
1170 zh = zfs_znode_hold_enter(zsb, obj_num);
1171
1172 mutex_enter(&zp->z_acl_lock);
1173 if (zp->z_acl_cached) {
1174 zfs_acl_free(zp->z_acl_cached);
1175 zp->z_acl_cached = NULL;
1176 }
1177 mutex_exit(&zp->z_acl_lock);
1178
1179 rw_enter(&zp->z_xattr_lock, RW_WRITER);
1180 if (zp->z_xattr_cached) {
1181 nvlist_free(zp->z_xattr_cached);
1182 zp->z_xattr_cached = NULL;
1183 }
1184
1185 if (zp->z_xattr_parent) {
1186 zfs_iput_async(ZTOI(zp->z_xattr_parent));
1187 zp->z_xattr_parent = NULL;
1188 }
1189 rw_exit(&zp->z_xattr_lock);
1190
1191 ASSERT(zp->z_sa_hdl == NULL);
1192 err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
1193 if (err) {
1194 zfs_znode_hold_exit(zsb, zh);
1195 return (err);
1196 }
1197
1198 dmu_object_info_from_db(db, &doi);
1199 if (doi.doi_bonus_type != DMU_OT_SA &&
1200 (doi.doi_bonus_type != DMU_OT_ZNODE ||
1201 (doi.doi_bonus_type == DMU_OT_ZNODE &&
1202 doi.doi_bonus_size < sizeof (znode_phys_t)))) {
1203 sa_buf_rele(db, NULL);
1204 zfs_znode_hold_exit(zsb, zh);
1205 return (SET_ERROR(EINVAL));
1206 }
1207
1208 zfs_znode_sa_init(zsb, zp, db, doi.doi_bonus_type, NULL);
1209
1210 /* reload cached values */
1211 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL,
1212 &gen, sizeof (gen));
1213 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL,
1214 &zp->z_size, sizeof (zp->z_size));
1215 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
1216 &zp->z_links, sizeof (zp->z_links));
1217 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
1218 &zp->z_pflags, sizeof (zp->z_pflags));
1219 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
1220 &zp->z_uid, sizeof (zp->z_uid));
1221 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL,
1222 &zp->z_gid, sizeof (zp->z_gid));
1223 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
1224 &mode, sizeof (mode));
1225
1226 if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
1227 zfs_znode_dmu_fini(zp);
1228 zfs_znode_hold_exit(zsb, zh);
1229 return (SET_ERROR(EIO));
1230 }
1231
1232 zp->z_mode = mode;
1233
1234 if (gen != ZTOI(zp)->i_generation) {
1235 zfs_znode_dmu_fini(zp);
1236 zfs_znode_hold_exit(zsb, zh);
1237 return (SET_ERROR(EIO));
1238 }
1239
1240 zp->z_unlinked = (zp->z_links == 0);
1241 zp->z_blksz = doi.doi_data_block_size;
1242 zp->z_atime_dirty = 0;
1243 zfs_inode_update_new(zp);
1244
1245 zfs_znode_hold_exit(zsb, zh);
1246
1247 return (0);
1248 }
1249
1250 void
1251 zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
1252 {
1253 zfs_sb_t *zsb = ZTOZSB(zp);
1254 objset_t *os = zsb->z_os;
1255 uint64_t obj = zp->z_id;
1256 uint64_t acl_obj = zfs_external_acl(zp);
1257 znode_hold_t *zh;
1258
1259 zh = zfs_znode_hold_enter(zsb, obj);
1260 if (acl_obj) {
1261 VERIFY(!zp->z_is_sa);
1262 VERIFY(0 == dmu_object_free(os, acl_obj, tx));
1263 }
1264 VERIFY(0 == dmu_object_free(os, obj, tx));
1265 zfs_znode_dmu_fini(zp);
1266 zfs_znode_hold_exit(zsb, zh);
1267 }
1268
1269 void
1270 zfs_zinactive(znode_t *zp)
1271 {
1272 zfs_sb_t *zsb = ZTOZSB(zp);
1273 uint64_t z_id = zp->z_id;
1274 znode_hold_t *zh;
1275
1276 ASSERT(zp->z_sa_hdl);
1277
1278 /*
1279 * Don't allow a zfs_zget() while were trying to release this znode.
1280 */
1281 zh = zfs_znode_hold_enter(zsb, z_id);
1282
1283 mutex_enter(&zp->z_lock);
1284
1285 /*
1286 * If this was the last reference to a file with no links,
1287 * remove the file from the file system.
1288 */
1289 if (zp->z_unlinked) {
1290 mutex_exit(&zp->z_lock);
1291 zfs_znode_hold_exit(zsb, zh);
1292 zfs_rmnode(zp);
1293 return;
1294 }
1295
1296 mutex_exit(&zp->z_lock);
1297 zfs_znode_dmu_fini(zp);
1298
1299 zfs_znode_hold_exit(zsb, zh);
1300 }
1301
1302 static inline int
1303 zfs_compare_timespec(struct timespec *t1, struct timespec *t2)
1304 {
1305 if (t1->tv_sec < t2->tv_sec)
1306 return (-1);
1307
1308 if (t1->tv_sec > t2->tv_sec)
1309 return (1);
1310
1311 return (t1->tv_nsec - t2->tv_nsec);
1312 }
1313
1314 /*
1315 * Prepare to update znode time stamps.
1316 *
1317 * IN: zp - znode requiring timestamp update
1318 * flag - ATTR_MTIME, ATTR_CTIME flags
1319 *
1320 * OUT: zp - z_seq
1321 * mtime - new mtime
1322 * ctime - new ctime
1323 *
1324 * Note: We don't update atime here, because we rely on Linux VFS to do
1325 * atime updating.
1326 */
1327 void
1328 zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
1329 uint64_t ctime[2])
1330 {
1331 timestruc_t now;
1332
1333 gethrestime(&now);
1334
1335 zp->z_seq++;
1336
1337 if (flag & ATTR_MTIME) {
1338 ZFS_TIME_ENCODE(&now, mtime);
1339 if (ZTOZSB(zp)->z_use_fuids) {
1340 zp->z_pflags |= (ZFS_ARCHIVE |
1341 ZFS_AV_MODIFIED);
1342 }
1343 }
1344
1345 if (flag & ATTR_CTIME) {
1346 ZFS_TIME_ENCODE(&now, ctime);
1347 if (ZTOZSB(zp)->z_use_fuids)
1348 zp->z_pflags |= ZFS_ARCHIVE;
1349 }
1350 }
1351
1352 /*
1353 * Grow the block size for a file.
1354 *
1355 * IN: zp - znode of file to free data in.
1356 * size - requested block size
1357 * tx - open transaction.
1358 *
1359 * NOTE: this function assumes that the znode is write locked.
1360 */
1361 void
1362 zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
1363 {
1364 int error;
1365 u_longlong_t dummy;
1366
1367 if (size <= zp->z_blksz)
1368 return;
1369 /*
1370 * If the file size is already greater than the current blocksize,
1371 * we will not grow. If there is more than one block in a file,
1372 * the blocksize cannot change.
1373 */
1374 if (zp->z_blksz && zp->z_size > zp->z_blksz)
1375 return;
1376
1377 error = dmu_object_set_blocksize(ZTOZSB(zp)->z_os, zp->z_id,
1378 size, 0, tx);
1379
1380 if (error == ENOTSUP)
1381 return;
1382 ASSERT0(error);
1383
1384 /* What blocksize did we actually get? */
1385 dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
1386 }
1387
1388 /*
1389 * Increase the file length
1390 *
1391 * IN: zp - znode of file to free data in.
1392 * end - new end-of-file
1393 *
1394 * RETURN: 0 on success, error code on failure
1395 */
1396 static int
1397 zfs_extend(znode_t *zp, uint64_t end)
1398 {
1399 zfs_sb_t *zsb = ZTOZSB(zp);
1400 dmu_tx_t *tx;
1401 rl_t *rl;
1402 uint64_t newblksz;
1403 int error;
1404
1405 /*
1406 * We will change zp_size, lock the whole file.
1407 */
1408 rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);
1409
1410 /*
1411 * Nothing to do if file already at desired length.
1412 */
1413 if (end <= zp->z_size) {
1414 zfs_range_unlock(rl);
1415 return (0);
1416 }
1417 tx = dmu_tx_create(zsb->z_os);
1418 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1419 zfs_sa_upgrade_txholds(tx, zp);
1420 if (end > zp->z_blksz &&
1421 (!ISP2(zp->z_blksz) || zp->z_blksz < zsb->z_max_blksz)) {
1422 /*
1423 * We are growing the file past the current block size.
1424 */
1425 if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
1426 /*
1427 * File's blocksize is already larger than the
1428 * "recordsize" property. Only let it grow to
1429 * the next power of 2.
1430 */
1431 ASSERT(!ISP2(zp->z_blksz));
1432 newblksz = MIN(end, 1 << highbit64(zp->z_blksz));
1433 } else {
1434 newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
1435 }
1436 dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
1437 } else {
1438 newblksz = 0;
1439 }
1440
1441 error = dmu_tx_assign(tx, TXG_WAIT);
1442 if (error) {
1443 dmu_tx_abort(tx);
1444 zfs_range_unlock(rl);
1445 return (error);
1446 }
1447
1448 if (newblksz)
1449 zfs_grow_blocksize(zp, newblksz, tx);
1450
1451 zp->z_size = end;
1452
1453 VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
1454 &zp->z_size, sizeof (zp->z_size), tx));
1455
1456 zfs_range_unlock(rl);
1457
1458 dmu_tx_commit(tx);
1459
1460 return (0);
1461 }
1462
1463 /*
1464 * zfs_zero_partial_page - Modeled after update_pages() but
1465 * with different arguments and semantics for use by zfs_freesp().
1466 *
1467 * Zeroes a piece of a single page cache entry for zp at offset
1468 * start and length len.
1469 *
1470 * Caller must acquire a range lock on the file for the region
1471 * being zeroed in order that the ARC and page cache stay in sync.
1472 */
1473 static void
1474 zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
1475 {
1476 struct address_space *mp = ZTOI(zp)->i_mapping;
1477 struct page *pp;
1478 int64_t off;
1479 void *pb;
1480
1481 ASSERT((start & PAGE_MASK) == ((start + len - 1) & PAGE_MASK));
1482
1483 off = start & (PAGE_SIZE - 1);
1484 start &= PAGE_MASK;
1485
1486 pp = find_lock_page(mp, start >> PAGE_SHIFT);
1487 if (pp) {
1488 if (mapping_writably_mapped(mp))
1489 flush_dcache_page(pp);
1490
1491 pb = kmap(pp);
1492 bzero(pb + off, len);
1493 kunmap(pp);
1494
1495 if (mapping_writably_mapped(mp))
1496 flush_dcache_page(pp);
1497
1498 mark_page_accessed(pp);
1499 SetPageUptodate(pp);
1500 ClearPageError(pp);
1501 unlock_page(pp);
1502 put_page(pp);
1503 }
1504 }
1505
1506 /*
1507 * Free space in a file.
1508 *
1509 * IN: zp - znode of file to free data in.
1510 * off - start of section to free.
1511 * len - length of section to free.
1512 *
1513 * RETURN: 0 on success, error code on failure
1514 */
1515 static int
1516 zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
1517 {
1518 zfs_sb_t *zsb = ZTOZSB(zp);
1519 rl_t *rl;
1520 int error;
1521
1522 /*
1523 * Lock the range being freed.
1524 */
1525 rl = zfs_range_lock(zp, off, len, RL_WRITER);
1526
1527 /*
1528 * Nothing to do if file already at desired length.
1529 */
1530 if (off >= zp->z_size) {
1531 zfs_range_unlock(rl);
1532 return (0);
1533 }
1534
1535 if (off + len > zp->z_size)
1536 len = zp->z_size - off;
1537
1538 error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);
1539
1540 /*
1541 * Zero partial page cache entries. This must be done under a
1542 * range lock in order to keep the ARC and page cache in sync.
1543 */
1544 if (zp->z_is_mapped) {
1545 loff_t first_page, last_page, page_len;
1546 loff_t first_page_offset, last_page_offset;
1547
1548 /* first possible full page in hole */
1549 first_page = (off + PAGE_SIZE - 1) >> PAGE_SHIFT;
1550 /* last page of hole */
1551 last_page = (off + len) >> PAGE_SHIFT;
1552
1553 /* offset of first_page */
1554 first_page_offset = first_page << PAGE_SHIFT;
1555 /* offset of last_page */
1556 last_page_offset = last_page << PAGE_SHIFT;
1557
1558 /* truncate whole pages */
1559 if (last_page_offset > first_page_offset) {
1560 truncate_inode_pages_range(ZTOI(zp)->i_mapping,
1561 first_page_offset, last_page_offset - 1);
1562 }
1563
1564 /* truncate sub-page ranges */
1565 if (first_page > last_page) {
1566 /* entire punched area within a single page */
1567 zfs_zero_partial_page(zp, off, len);
1568 } else {
1569 /* beginning of punched area at the end of a page */
1570 page_len = first_page_offset - off;
1571 if (page_len > 0)
1572 zfs_zero_partial_page(zp, off, page_len);
1573
1574 /* end of punched area at the beginning of a page */
1575 page_len = off + len - last_page_offset;
1576 if (page_len > 0)
1577 zfs_zero_partial_page(zp, last_page_offset,
1578 page_len);
1579 }
1580 }
1581 zfs_range_unlock(rl);
1582
1583 return (error);
1584 }
1585
1586 /*
1587 * Truncate a file
1588 *
1589 * IN: zp - znode of file to free data in.
1590 * end - new end-of-file.
1591 *
1592 * RETURN: 0 on success, error code on failure
1593 */
1594 static int
1595 zfs_trunc(znode_t *zp, uint64_t end)
1596 {
1597 zfs_sb_t *zsb = ZTOZSB(zp);
1598 dmu_tx_t *tx;
1599 rl_t *rl;
1600 int error;
1601 sa_bulk_attr_t bulk[2];
1602 int count = 0;
1603
1604 /*
1605 * We will change zp_size, lock the whole file.
1606 */
1607 rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);
1608
1609 /*
1610 * Nothing to do if file already at desired length.
1611 */
1612 if (end >= zp->z_size) {
1613 zfs_range_unlock(rl);
1614 return (0);
1615 }
1616
1617 error = dmu_free_long_range(zsb->z_os, zp->z_id, end, -1);
1618 if (error) {
1619 zfs_range_unlock(rl);
1620 return (error);
1621 }
1622 tx = dmu_tx_create(zsb->z_os);
1623 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1624 zfs_sa_upgrade_txholds(tx, zp);
1625 dmu_tx_mark_netfree(tx);
1626 error = dmu_tx_assign(tx, TXG_WAIT);
1627 if (error) {
1628 dmu_tx_abort(tx);
1629 zfs_range_unlock(rl);
1630 return (error);
1631 }
1632
1633 zp->z_size = end;
1634 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb),
1635 NULL, &zp->z_size, sizeof (zp->z_size));
1636
1637 if (end == 0) {
1638 zp->z_pflags &= ~ZFS_SPARSE;
1639 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
1640 NULL, &zp->z_pflags, 8);
1641 }
1642 VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
1643
1644 dmu_tx_commit(tx);
1645
1646 zfs_range_unlock(rl);
1647
1648 return (0);
1649 }
1650
1651 /*
1652 * Free space in a file
1653 *
1654 * IN: zp - znode of file to free data in.
1655 * off - start of range
1656 * len - end of range (0 => EOF)
1657 * flag - current file open mode flags.
1658 * log - TRUE if this action should be logged
1659 *
1660 * RETURN: 0 on success, error code on failure
1661 */
1662 int
1663 zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
1664 {
1665 dmu_tx_t *tx;
1666 zfs_sb_t *zsb = ZTOZSB(zp);
1667 zilog_t *zilog = zsb->z_log;
1668 uint64_t mode;
1669 uint64_t mtime[2], ctime[2];
1670 sa_bulk_attr_t bulk[3];
1671 int count = 0;
1672 int error;
1673
1674 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zsb), &mode,
1675 sizeof (mode))) != 0)
1676 return (error);
1677
1678 if (off > zp->z_size) {
1679 error = zfs_extend(zp, off+len);
1680 if (error == 0 && log)
1681 goto log;
1682 goto out;
1683 }
1684
1685 if (len == 0) {
1686 error = zfs_trunc(zp, off);
1687 } else {
1688 if ((error = zfs_free_range(zp, off, len)) == 0 &&
1689 off + len > zp->z_size)
1690 error = zfs_extend(zp, off+len);
1691 }
1692 if (error || !log)
1693 goto out;
1694 log:
1695 tx = dmu_tx_create(zsb->z_os);
1696 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1697 zfs_sa_upgrade_txholds(tx, zp);
1698 error = dmu_tx_assign(tx, TXG_WAIT);
1699 if (error) {
1700 dmu_tx_abort(tx);
1701 goto out;
1702 }
1703
1704 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, 16);
1705 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, ctime, 16);
1706 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
1707 NULL, &zp->z_pflags, 8);
1708 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
1709 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1710 ASSERT(error == 0);
1711
1712 zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
1713
1714 dmu_tx_commit(tx);
1715
1716 zfs_inode_update(zp);
1717 error = 0;
1718
1719 out:
1720 /*
1721 * Truncate the page cache - for file truncate operations, use
1722 * the purpose-built API for truncations. For punching operations,
1723 * the truncation is handled under a range lock in zfs_free_range.
1724 */
1725 if (len == 0)
1726 truncate_setsize(ZTOI(zp), off);
1727 return (error);
1728 }
1729
1730 void
1731 zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
1732 {
1733 struct super_block *sb;
1734 zfs_sb_t *zsb;
1735 uint64_t moid, obj, sa_obj, version;
1736 uint64_t sense = ZFS_CASE_SENSITIVE;
1737 uint64_t norm = 0;
1738 nvpair_t *elem;
1739 int size;
1740 int error;
1741 int i;
1742 znode_t *rootzp = NULL;
1743 vattr_t vattr;
1744 znode_t *zp;
1745 zfs_acl_ids_t acl_ids;
1746
1747 /*
1748 * First attempt to create master node.
1749 */
1750 /*
1751 * In an empty objset, there are no blocks to read and thus
1752 * there can be no i/o errors (which we assert below).
1753 */
1754 moid = MASTER_NODE_OBJ;
1755 error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
1756 DMU_OT_NONE, 0, tx);
1757 ASSERT(error == 0);
1758
1759 /*
1760 * Set starting attributes.
1761 */
1762 version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
1763 elem = NULL;
1764 while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
1765 /* For the moment we expect all zpl props to be uint64_ts */
1766 uint64_t val;
1767 char *name;
1768
1769 ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
1770 VERIFY(nvpair_value_uint64(elem, &val) == 0);
1771 name = nvpair_name(elem);
1772 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
1773 if (val < version)
1774 version = val;
1775 } else {
1776 error = zap_update(os, moid, name, 8, 1, &val, tx);
1777 }
1778 ASSERT(error == 0);
1779 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
1780 norm = val;
1781 else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
1782 sense = val;
1783 }
1784 ASSERT(version != 0);
1785 error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
1786
1787 /*
1788 * Create zap object used for SA attribute registration
1789 */
1790
1791 if (version >= ZPL_VERSION_SA) {
1792 sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
1793 DMU_OT_NONE, 0, tx);
1794 error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
1795 ASSERT(error == 0);
1796 } else {
1797 sa_obj = 0;
1798 }
1799 /*
1800 * Create a delete queue.
1801 */
1802 obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
1803
1804 error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
1805 ASSERT(error == 0);
1806
1807 /*
1808 * Create root znode. Create minimal znode/inode/zsb/sb
1809 * to allow zfs_mknode to work.
1810 */
1811 vattr.va_mask = ATTR_MODE|ATTR_UID|ATTR_GID;
1812 vattr.va_mode = S_IFDIR|0755;
1813 vattr.va_uid = crgetuid(cr);
1814 vattr.va_gid = crgetgid(cr);
1815
1816 rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
1817 rootzp->z_moved = 0;
1818 rootzp->z_unlinked = 0;
1819 rootzp->z_atime_dirty = 0;
1820 rootzp->z_is_sa = USE_SA(version, os);
1821
1822 zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP);
1823 zsb->z_os = os;
1824 zsb->z_parent = zsb;
1825 zsb->z_version = version;
1826 zsb->z_use_fuids = USE_FUIDS(version, os);
1827 zsb->z_use_sa = USE_SA(version, os);
1828 zsb->z_norm = norm;
1829
1830 sb = kmem_zalloc(sizeof (struct super_block), KM_SLEEP);
1831 sb->s_fs_info = zsb;
1832
1833 ZTOI(rootzp)->i_sb = sb;
1834
1835 error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
1836 &zsb->z_attr_table);
1837
1838 ASSERT(error == 0);
1839
1840 /*
1841 * Fold case on file systems that are always or sometimes case
1842 * insensitive.
1843 */
1844 if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
1845 zsb->z_norm |= U8_TEXTPREP_TOUPPER;
1846
1847 mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
1848 list_create(&zsb->z_all_znodes, sizeof (znode_t),
1849 offsetof(znode_t, z_link_node));
1850
1851 size = MIN(1 << (highbit64(zfs_object_mutex_size)-1), ZFS_OBJ_MTX_MAX);
1852 zsb->z_hold_size = size;
1853 zsb->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size, KM_SLEEP);
1854 zsb->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
1855 for (i = 0; i != size; i++) {
1856 avl_create(&zsb->z_hold_trees[i], zfs_znode_hold_compare,
1857 sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
1858 mutex_init(&zsb->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
1859 }
1860
1861 VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
1862 cr, NULL, &acl_ids));
1863 zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
1864 ASSERT3P(zp, ==, rootzp);
1865 error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
1866 ASSERT(error == 0);
1867 zfs_acl_ids_free(&acl_ids);
1868
1869 atomic_set(&ZTOI(rootzp)->i_count, 0);
1870 sa_handle_destroy(rootzp->z_sa_hdl);
1871 kmem_cache_free(znode_cache, rootzp);
1872
1873 /*
1874 * Create shares directory
1875 */
1876 error = zfs_create_share_dir(zsb, tx);
1877 ASSERT(error == 0);
1878
1879 for (i = 0; i != size; i++) {
1880 avl_destroy(&zsb->z_hold_trees[i]);
1881 mutex_destroy(&zsb->z_hold_locks[i]);
1882 }
1883
1884 vmem_free(zsb->z_hold_trees, sizeof (avl_tree_t) * size);
1885 vmem_free(zsb->z_hold_locks, sizeof (kmutex_t) * size);
1886 kmem_free(sb, sizeof (struct super_block));
1887 kmem_free(zsb, sizeof (zfs_sb_t));
1888 }
1889 #endif /* _KERNEL */
1890
1891 static int
1892 zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
1893 {
1894 uint64_t sa_obj = 0;
1895 int error;
1896
1897 error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
1898 if (error != 0 && error != ENOENT)
1899 return (error);
1900
1901 error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
1902 return (error);
1903 }
1904
1905 static int
1906 zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
1907 dmu_buf_t **db, void *tag)
1908 {
1909 dmu_object_info_t doi;
1910 int error;
1911
1912 if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
1913 return (error);
1914
1915 dmu_object_info_from_db(*db, &doi);
1916 if ((doi.doi_bonus_type != DMU_OT_SA &&
1917 doi.doi_bonus_type != DMU_OT_ZNODE) ||
1918 (doi.doi_bonus_type == DMU_OT_ZNODE &&
1919 doi.doi_bonus_size < sizeof (znode_phys_t))) {
1920 sa_buf_rele(*db, tag);
1921 return (SET_ERROR(ENOTSUP));
1922 }
1923
1924 error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
1925 if (error != 0) {
1926 sa_buf_rele(*db, tag);
1927 return (error);
1928 }
1929
1930 return (0);
1931 }
1932
1933 void
1934 zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, void *tag)
1935 {
1936 sa_handle_destroy(hdl);
1937 sa_buf_rele(db, tag);
1938 }
1939
1940 /*
1941 * Given an object number, return its parent object number and whether
1942 * or not the object is an extended attribute directory.
1943 */
1944 static int
1945 zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table,
1946 uint64_t *pobjp, int *is_xattrdir)
1947 {
1948 uint64_t parent;
1949 uint64_t pflags;
1950 uint64_t mode;
1951 uint64_t parent_mode;
1952 sa_bulk_attr_t bulk[3];
1953 sa_handle_t *sa_hdl;
1954 dmu_buf_t *sa_db;
1955 int count = 0;
1956 int error;
1957
1958 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
1959 &parent, sizeof (parent));
1960 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
1961 &pflags, sizeof (pflags));
1962 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
1963 &mode, sizeof (mode));
1964
1965 if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
1966 return (error);
1967
1968 /*
1969 * When a link is removed its parent pointer is not changed and will
1970 * be invalid. There are two cases where a link is removed but the
1971 * file stays around, when it goes to the delete queue and when there
1972 * are additional links.
1973 */
1974 error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG);
1975 if (error != 0)
1976 return (error);
1977
1978 error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode));
1979 zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
1980 if (error != 0)
1981 return (error);
1982
1983 *is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
1984
1985 /*
1986 * Extended attributes can be applied to files, directories, etc.
1987 * Otherwise the parent must be a directory.
1988 */
1989 if (!*is_xattrdir && !S_ISDIR(parent_mode))
1990 return (EINVAL);
1991
1992 *pobjp = parent;
1993
1994 return (0);
1995 }
1996
1997 /*
1998 * Given an object number, return some zpl level statistics
1999 */
2000 static int
2001 zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
2002 zfs_stat_t *sb)
2003 {
2004 sa_bulk_attr_t bulk[4];
2005 int count = 0;
2006
2007 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
2008 &sb->zs_mode, sizeof (sb->zs_mode));
2009 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
2010 &sb->zs_gen, sizeof (sb->zs_gen));
2011 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
2012 &sb->zs_links, sizeof (sb->zs_links));
2013 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
2014 &sb->zs_ctime, sizeof (sb->zs_ctime));
2015
2016 return (sa_bulk_lookup(hdl, bulk, count));
2017 }
2018
2019 static int
2020 zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
2021 sa_attr_type_t *sa_table, char *buf, int len)
2022 {
2023 sa_handle_t *sa_hdl;
2024 sa_handle_t *prevhdl = NULL;
2025 dmu_buf_t *prevdb = NULL;
2026 dmu_buf_t *sa_db = NULL;
2027 char *path = buf + len - 1;
2028 int error;
2029
2030 *path = '\0';
2031 sa_hdl = hdl;
2032
2033 for (;;) {
2034 uint64_t pobj = 0;
2035 char component[MAXNAMELEN + 2];
2036 size_t complen;
2037 int is_xattrdir = 0;
2038
2039 if (prevdb)
2040 zfs_release_sa_handle(prevhdl, prevdb, FTAG);
2041
2042 if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj,
2043 &is_xattrdir)) != 0)
2044 break;
2045
2046 if (pobj == obj) {
2047 if (path[0] != '/')
2048 *--path = '/';
2049 break;
2050 }
2051
2052 component[0] = '/';
2053 if (is_xattrdir) {
2054 (void) sprintf(component + 1, "<xattrdir>");
2055 } else {
2056 error = zap_value_search(osp, pobj, obj,
2057 ZFS_DIRENT_OBJ(-1ULL), component + 1);
2058 if (error != 0)
2059 break;
2060 }
2061
2062 complen = strlen(component);
2063 path -= complen;
2064 ASSERT(path >= buf);
2065 bcopy(component, path, complen);
2066 obj = pobj;
2067
2068 if (sa_hdl != hdl) {
2069 prevhdl = sa_hdl;
2070 prevdb = sa_db;
2071 }
2072 error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
2073 if (error != 0) {
2074 sa_hdl = prevhdl;
2075 sa_db = prevdb;
2076 break;
2077 }
2078 }
2079
2080 if (sa_hdl != NULL && sa_hdl != hdl) {
2081 ASSERT(sa_db != NULL);
2082 zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
2083 }
2084
2085 if (error == 0)
2086 (void) memmove(buf, path, buf + len - path);
2087
2088 return (error);
2089 }
2090
2091 int
2092 zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
2093 {
2094 sa_attr_type_t *sa_table;
2095 sa_handle_t *hdl;
2096 dmu_buf_t *db;
2097 int error;
2098
2099 error = zfs_sa_setup(osp, &sa_table);
2100 if (error != 0)
2101 return (error);
2102
2103 error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2104 if (error != 0)
2105 return (error);
2106
2107 error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2108
2109 zfs_release_sa_handle(hdl, db, FTAG);
2110 return (error);
2111 }
2112
2113 int
2114 zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
2115 char *buf, int len)
2116 {
2117 char *path = buf + len - 1;
2118 sa_attr_type_t *sa_table;
2119 sa_handle_t *hdl;
2120 dmu_buf_t *db;
2121 int error;
2122
2123 *path = '\0';
2124
2125 error = zfs_sa_setup(osp, &sa_table);
2126 if (error != 0)
2127 return (error);
2128
2129 error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2130 if (error != 0)
2131 return (error);
2132
2133 error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
2134 if (error != 0) {
2135 zfs_release_sa_handle(hdl, db, FTAG);
2136 return (error);
2137 }
2138
2139 error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2140
2141 zfs_release_sa_handle(hdl, db, FTAG);
2142 return (error);
2143 }
2144
2145 #if defined(_KERNEL) && defined(HAVE_SPL)
2146 EXPORT_SYMBOL(zfs_create_fs);
2147 EXPORT_SYMBOL(zfs_obj_to_path);
2148
2149 module_param(zfs_object_mutex_size, uint, 0644);
2150 MODULE_PARM_DESC(zfs_object_mutex_size, "Size of znode hold array");
2151 #endif