]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zfs_znode.c
Get rid of space_map_update() for ms_synced_length
[mirror_zfs.git] / module / zfs / zfs_znode.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
24 */
25
26 /* Portions Copyright 2007 Jeremy Teo */
27
28 #ifdef _KERNEL
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/time.h>
32 #include <sys/sysmacros.h>
33 #include <sys/mntent.h>
34 #include <sys/u8_textprep.h>
35 #include <sys/dsl_dataset.h>
36 #include <sys/vfs.h>
37 #include <sys/vnode.h>
38 #include <sys/file.h>
39 #include <sys/kmem.h>
40 #include <sys/errno.h>
41 #include <sys/mode.h>
42 #include <sys/atomic.h>
43 #include <sys/zfs_dir.h>
44 #include <sys/zfs_acl.h>
45 #include <sys/zfs_ioctl.h>
46 #include <sys/zfs_rlock.h>
47 #include <sys/zfs_fuid.h>
48 #include <sys/zfs_vnops.h>
49 #include <sys/zfs_ctldir.h>
50 #include <sys/dnode.h>
51 #include <sys/fs/zfs.h>
52 #include <sys/zpl.h>
53 #endif /* _KERNEL */
54
55 #include <sys/dmu.h>
56 #include <sys/dmu_objset.h>
57 #include <sys/dmu_tx.h>
58 #include <sys/refcount.h>
59 #include <sys/stat.h>
60 #include <sys/zap.h>
61 #include <sys/zfs_znode.h>
62 #include <sys/sa.h>
63 #include <sys/zfs_sa.h>
64 #include <sys/zfs_stat.h>
65
66 #include "zfs_prop.h"
67 #include "zfs_comutil.h"
68
69 /*
70 * Define ZNODE_STATS to turn on statistic gathering. By default, it is only
71 * turned on when DEBUG is also defined.
72 */
73 #ifdef DEBUG
74 #define ZNODE_STATS
75 #endif /* DEBUG */
76
77 #ifdef ZNODE_STATS
78 #define ZNODE_STAT_ADD(stat) ((stat)++)
79 #else
80 #define ZNODE_STAT_ADD(stat) /* nothing */
81 #endif /* ZNODE_STATS */
82
83 /*
84 * Functions needed for userland (ie: libzpool) are not put under
85 * #ifdef_KERNEL; the rest of the functions have dependencies
86 * (such as VFS logic) that will not compile easily in userland.
87 */
88 #ifdef _KERNEL
89
90 static kmem_cache_t *znode_cache = NULL;
91 static kmem_cache_t *znode_hold_cache = NULL;
92 unsigned int zfs_object_mutex_size = ZFS_OBJ_MTX_SZ;
93
94 /*
95 * This callback is invoked when acquiring a RL_WRITER or RL_APPEND lock on
96 * z_rangelock. It will modify the offset and length of the lock to reflect
97 * znode-specific information, and convert RL_APPEND to RL_WRITER. This is
98 * called with the rangelock_t's rl_lock held, which avoids races.
99 */
100 static void
101 zfs_rangelock_cb(locked_range_t *new, void *arg)
102 {
103 znode_t *zp = arg;
104
105 /*
106 * If in append mode, convert to writer and lock starting at the
107 * current end of file.
108 */
109 if (new->lr_type == RL_APPEND) {
110 new->lr_offset = zp->z_size;
111 new->lr_type = RL_WRITER;
112 }
113
114 /*
115 * If we need to grow the block size then lock the whole file range.
116 */
117 uint64_t end_size = MAX(zp->z_size, new->lr_offset + new->lr_length);
118 if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
119 zp->z_blksz < ZTOZSB(zp)->z_max_blksz)) {
120 new->lr_offset = 0;
121 new->lr_length = UINT64_MAX;
122 }
123 }
124
125 /*ARGSUSED*/
126 static int
127 zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
128 {
129 znode_t *zp = buf;
130
131 inode_init_once(ZTOI(zp));
132 list_link_init(&zp->z_link_node);
133
134 mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
135 rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
136 rw_init(&zp->z_name_lock, NULL, RW_NOLOCKDEP, NULL);
137 mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
138 rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
139
140 rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);
141
142 zp->z_dirlocks = NULL;
143 zp->z_acl_cached = NULL;
144 zp->z_xattr_cached = NULL;
145 zp->z_xattr_parent = 0;
146 zp->z_moved = 0;
147 return (0);
148 }
149
150 /*ARGSUSED*/
151 static void
152 zfs_znode_cache_destructor(void *buf, void *arg)
153 {
154 znode_t *zp = buf;
155
156 ASSERT(!list_link_active(&zp->z_link_node));
157 mutex_destroy(&zp->z_lock);
158 rw_destroy(&zp->z_parent_lock);
159 rw_destroy(&zp->z_name_lock);
160 mutex_destroy(&zp->z_acl_lock);
161 rw_destroy(&zp->z_xattr_lock);
162 rangelock_fini(&zp->z_rangelock);
163
164 ASSERT(zp->z_dirlocks == NULL);
165 ASSERT(zp->z_acl_cached == NULL);
166 ASSERT(zp->z_xattr_cached == NULL);
167 }
168
169 static int
170 zfs_znode_hold_cache_constructor(void *buf, void *arg, int kmflags)
171 {
172 znode_hold_t *zh = buf;
173
174 mutex_init(&zh->zh_lock, NULL, MUTEX_DEFAULT, NULL);
175 zfs_refcount_create(&zh->zh_refcount);
176 zh->zh_obj = ZFS_NO_OBJECT;
177
178 return (0);
179 }
180
181 static void
182 zfs_znode_hold_cache_destructor(void *buf, void *arg)
183 {
184 znode_hold_t *zh = buf;
185
186 mutex_destroy(&zh->zh_lock);
187 zfs_refcount_destroy(&zh->zh_refcount);
188 }
189
190 void
191 zfs_znode_init(void)
192 {
193 /*
194 * Initialize zcache. The KMC_SLAB hint is used in order that it be
195 * backed by kmalloc() when on the Linux slab in order that any
196 * wait_on_bit() operations on the related inode operate properly.
197 */
198 ASSERT(znode_cache == NULL);
199 znode_cache = kmem_cache_create("zfs_znode_cache",
200 sizeof (znode_t), 0, zfs_znode_cache_constructor,
201 zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_SLAB);
202
203 ASSERT(znode_hold_cache == NULL);
204 znode_hold_cache = kmem_cache_create("zfs_znode_hold_cache",
205 sizeof (znode_hold_t), 0, zfs_znode_hold_cache_constructor,
206 zfs_znode_hold_cache_destructor, NULL, NULL, NULL, 0);
207 }
208
209 void
210 zfs_znode_fini(void)
211 {
212 /*
213 * Cleanup zcache
214 */
215 if (znode_cache)
216 kmem_cache_destroy(znode_cache);
217 znode_cache = NULL;
218
219 if (znode_hold_cache)
220 kmem_cache_destroy(znode_hold_cache);
221 znode_hold_cache = NULL;
222 }
223
224 /*
225 * The zfs_znode_hold_enter() / zfs_znode_hold_exit() functions are used to
226 * serialize access to a znode and its SA buffer while the object is being
227 * created or destroyed. This kind of locking would normally reside in the
228 * znode itself but in this case that's impossible because the znode and SA
229 * buffer may not yet exist. Therefore the locking is handled externally
230 * with an array of mutexs and AVLs trees which contain per-object locks.
231 *
232 * In zfs_znode_hold_enter() a per-object lock is created as needed, inserted
233 * in to the correct AVL tree and finally the per-object lock is held. In
234 * zfs_znode_hold_exit() the process is reversed. The per-object lock is
235 * released, removed from the AVL tree and destroyed if there are no waiters.
236 *
237 * This scheme has two important properties:
238 *
239 * 1) No memory allocations are performed while holding one of the z_hold_locks.
240 * This ensures evict(), which can be called from direct memory reclaim, will
241 * never block waiting on a z_hold_locks which just happens to have hashed
242 * to the same index.
243 *
244 * 2) All locks used to serialize access to an object are per-object and never
245 * shared. This minimizes lock contention without creating a large number
246 * of dedicated locks.
247 *
248 * On the downside it does require znode_lock_t structures to be frequently
249 * allocated and freed. However, because these are backed by a kmem cache
250 * and very short lived this cost is minimal.
251 */
252 int
253 zfs_znode_hold_compare(const void *a, const void *b)
254 {
255 const znode_hold_t *zh_a = (const znode_hold_t *)a;
256 const znode_hold_t *zh_b = (const znode_hold_t *)b;
257
258 return (AVL_CMP(zh_a->zh_obj, zh_b->zh_obj));
259 }
260
261 boolean_t
262 zfs_znode_held(zfsvfs_t *zfsvfs, uint64_t obj)
263 {
264 znode_hold_t *zh, search;
265 int i = ZFS_OBJ_HASH(zfsvfs, obj);
266 boolean_t held;
267
268 search.zh_obj = obj;
269
270 mutex_enter(&zfsvfs->z_hold_locks[i]);
271 zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
272 held = (zh && MUTEX_HELD(&zh->zh_lock)) ? B_TRUE : B_FALSE;
273 mutex_exit(&zfsvfs->z_hold_locks[i]);
274
275 return (held);
276 }
277
278 static znode_hold_t *
279 zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
280 {
281 znode_hold_t *zh, *zh_new, search;
282 int i = ZFS_OBJ_HASH(zfsvfs, obj);
283 boolean_t found = B_FALSE;
284
285 zh_new = kmem_cache_alloc(znode_hold_cache, KM_SLEEP);
286 zh_new->zh_obj = obj;
287 search.zh_obj = obj;
288
289 mutex_enter(&zfsvfs->z_hold_locks[i]);
290 zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
291 if (likely(zh == NULL)) {
292 zh = zh_new;
293 avl_add(&zfsvfs->z_hold_trees[i], zh);
294 } else {
295 ASSERT3U(zh->zh_obj, ==, obj);
296 found = B_TRUE;
297 }
298 zfs_refcount_add(&zh->zh_refcount, NULL);
299 mutex_exit(&zfsvfs->z_hold_locks[i]);
300
301 if (found == B_TRUE)
302 kmem_cache_free(znode_hold_cache, zh_new);
303
304 ASSERT(MUTEX_NOT_HELD(&zh->zh_lock));
305 ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
306 mutex_enter(&zh->zh_lock);
307
308 return (zh);
309 }
310
311 static void
312 zfs_znode_hold_exit(zfsvfs_t *zfsvfs, znode_hold_t *zh)
313 {
314 int i = ZFS_OBJ_HASH(zfsvfs, zh->zh_obj);
315 boolean_t remove = B_FALSE;
316
317 ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj));
318 ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
319 mutex_exit(&zh->zh_lock);
320
321 mutex_enter(&zfsvfs->z_hold_locks[i]);
322 if (zfs_refcount_remove(&zh->zh_refcount, NULL) == 0) {
323 avl_remove(&zfsvfs->z_hold_trees[i], zh);
324 remove = B_TRUE;
325 }
326 mutex_exit(&zfsvfs->z_hold_locks[i]);
327
328 if (remove == B_TRUE)
329 kmem_cache_free(znode_hold_cache, zh);
330 }
331
332 static void
333 zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
334 dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
335 {
336 ASSERT(zfs_znode_held(zfsvfs, zp->z_id));
337
338 mutex_enter(&zp->z_lock);
339
340 ASSERT(zp->z_sa_hdl == NULL);
341 ASSERT(zp->z_acl_cached == NULL);
342 if (sa_hdl == NULL) {
343 VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
344 SA_HDL_SHARED, &zp->z_sa_hdl));
345 } else {
346 zp->z_sa_hdl = sa_hdl;
347 sa_set_userp(sa_hdl, zp);
348 }
349
350 zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
351
352 mutex_exit(&zp->z_lock);
353 }
354
355 void
356 zfs_znode_dmu_fini(znode_t *zp)
357 {
358 ASSERT(zfs_znode_held(ZTOZSB(zp), zp->z_id) || zp->z_unlinked ||
359 RW_WRITE_HELD(&ZTOZSB(zp)->z_teardown_inactive_lock));
360
361 sa_handle_destroy(zp->z_sa_hdl);
362 zp->z_sa_hdl = NULL;
363 }
364
365 /*
366 * Called by new_inode() to allocate a new inode.
367 */
368 int
369 zfs_inode_alloc(struct super_block *sb, struct inode **ip)
370 {
371 znode_t *zp;
372
373 zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
374 *ip = ZTOI(zp);
375
376 return (0);
377 }
378
379 /*
380 * Called in multiple places when an inode should be destroyed.
381 */
382 void
383 zfs_inode_destroy(struct inode *ip)
384 {
385 znode_t *zp = ITOZ(ip);
386 zfsvfs_t *zfsvfs = ZTOZSB(zp);
387
388 mutex_enter(&zfsvfs->z_znodes_lock);
389 if (list_link_active(&zp->z_link_node)) {
390 list_remove(&zfsvfs->z_all_znodes, zp);
391 zfsvfs->z_nr_znodes--;
392 }
393 mutex_exit(&zfsvfs->z_znodes_lock);
394
395 if (zp->z_acl_cached) {
396 zfs_acl_free(zp->z_acl_cached);
397 zp->z_acl_cached = NULL;
398 }
399
400 if (zp->z_xattr_cached) {
401 nvlist_free(zp->z_xattr_cached);
402 zp->z_xattr_cached = NULL;
403 }
404
405 kmem_cache_free(znode_cache, zp);
406 }
407
408 static void
409 zfs_inode_set_ops(zfsvfs_t *zfsvfs, struct inode *ip)
410 {
411 uint64_t rdev = 0;
412
413 switch (ip->i_mode & S_IFMT) {
414 case S_IFREG:
415 ip->i_op = &zpl_inode_operations;
416 ip->i_fop = &zpl_file_operations;
417 ip->i_mapping->a_ops = &zpl_address_space_operations;
418 break;
419
420 case S_IFDIR:
421 ip->i_op = &zpl_dir_inode_operations;
422 ip->i_fop = &zpl_dir_file_operations;
423 ITOZ(ip)->z_zn_prefetch = B_TRUE;
424 break;
425
426 case S_IFLNK:
427 ip->i_op = &zpl_symlink_inode_operations;
428 break;
429
430 /*
431 * rdev is only stored in a SA only for device files.
432 */
433 case S_IFCHR:
434 case S_IFBLK:
435 (void) sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zfsvfs), &rdev,
436 sizeof (rdev));
437 /*FALLTHROUGH*/
438 case S_IFIFO:
439 case S_IFSOCK:
440 init_special_inode(ip, ip->i_mode, rdev);
441 ip->i_op = &zpl_special_inode_operations;
442 break;
443
444 default:
445 zfs_panic_recover("inode %llu has invalid mode: 0x%x\n",
446 (u_longlong_t)ip->i_ino, ip->i_mode);
447
448 /* Assume the inode is a file and attempt to continue */
449 ip->i_mode = S_IFREG | 0644;
450 ip->i_op = &zpl_inode_operations;
451 ip->i_fop = &zpl_file_operations;
452 ip->i_mapping->a_ops = &zpl_address_space_operations;
453 break;
454 }
455 }
456
457 void
458 zfs_set_inode_flags(znode_t *zp, struct inode *ip)
459 {
460 /*
461 * Linux and Solaris have different sets of file attributes, so we
462 * restrict this conversion to the intersection of the two.
463 */
464 #ifdef HAVE_INODE_SET_FLAGS
465 unsigned int flags = 0;
466 if (zp->z_pflags & ZFS_IMMUTABLE)
467 flags |= S_IMMUTABLE;
468 if (zp->z_pflags & ZFS_APPENDONLY)
469 flags |= S_APPEND;
470
471 inode_set_flags(ip, flags, S_IMMUTABLE|S_APPEND);
472 #else
473 if (zp->z_pflags & ZFS_IMMUTABLE)
474 ip->i_flags |= S_IMMUTABLE;
475 else
476 ip->i_flags &= ~S_IMMUTABLE;
477
478 if (zp->z_pflags & ZFS_APPENDONLY)
479 ip->i_flags |= S_APPEND;
480 else
481 ip->i_flags &= ~S_APPEND;
482 #endif
483 }
484
485 /*
486 * Update the embedded inode given the znode. We should work toward
487 * eliminating this function as soon as possible by removing values
488 * which are duplicated between the znode and inode. If the generic
489 * inode has the correct field it should be used, and the ZFS code
490 * updated to access the inode. This can be done incrementally.
491 */
492 void
493 zfs_inode_update(znode_t *zp)
494 {
495 zfsvfs_t *zfsvfs;
496 struct inode *ip;
497 uint32_t blksize;
498 u_longlong_t i_blocks;
499
500 ASSERT(zp != NULL);
501 zfsvfs = ZTOZSB(zp);
502 ip = ZTOI(zp);
503
504 /* Skip .zfs control nodes which do not exist on disk. */
505 if (zfsctl_is_node(ip))
506 return;
507
508 dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize, &i_blocks);
509
510 spin_lock(&ip->i_lock);
511 ip->i_blocks = i_blocks;
512 i_size_write(ip, zp->z_size);
513 spin_unlock(&ip->i_lock);
514 }
515
516
517 /*
518 * Construct a znode+inode and initialize.
519 *
520 * This does not do a call to dmu_set_user() that is
521 * up to the caller to do, in case you don't want to
522 * return the znode
523 */
524 static znode_t *
525 zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
526 dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl)
527 {
528 znode_t *zp;
529 struct inode *ip;
530 uint64_t mode;
531 uint64_t parent;
532 uint64_t tmp_gen;
533 uint64_t links;
534 uint64_t z_uid, z_gid;
535 uint64_t atime[2], mtime[2], ctime[2];
536 uint64_t projid = ZFS_DEFAULT_PROJID;
537 sa_bulk_attr_t bulk[11];
538 int count = 0;
539
540 ASSERT(zfsvfs != NULL);
541
542 ip = new_inode(zfsvfs->z_sb);
543 if (ip == NULL)
544 return (NULL);
545
546 zp = ITOZ(ip);
547 ASSERT(zp->z_dirlocks == NULL);
548 ASSERT3P(zp->z_acl_cached, ==, NULL);
549 ASSERT3P(zp->z_xattr_cached, ==, NULL);
550 zp->z_moved = 0;
551 zp->z_sa_hdl = NULL;
552 zp->z_unlinked = 0;
553 zp->z_atime_dirty = 0;
554 zp->z_mapcnt = 0;
555 zp->z_id = db->db_object;
556 zp->z_blksz = blksz;
557 zp->z_seq = 0x7A4653;
558 zp->z_sync_cnt = 0;
559 zp->z_is_mapped = B_FALSE;
560 zp->z_is_ctldir = B_FALSE;
561 zp->z_is_stale = B_FALSE;
562
563 zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
564
565 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
566 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &tmp_gen, 8);
567 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
568 &zp->z_size, 8);
569 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
570 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
571 &zp->z_pflags, 8);
572 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
573 &parent, 8);
574 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &z_uid, 8);
575 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &z_gid, 8);
576 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
577 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
578 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
579
580 if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0 ||
581 (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
582 (zp->z_pflags & ZFS_PROJID) &&
583 sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs), &projid, 8) != 0)) {
584 if (hdl == NULL)
585 sa_handle_destroy(zp->z_sa_hdl);
586 zp->z_sa_hdl = NULL;
587 goto error;
588 }
589
590 zp->z_projid = projid;
591 zp->z_mode = ip->i_mode = mode;
592 ip->i_generation = (uint32_t)tmp_gen;
593 ip->i_blkbits = SPA_MINBLOCKSHIFT;
594 set_nlink(ip, (uint32_t)links);
595 zfs_uid_write(ip, z_uid);
596 zfs_gid_write(ip, z_gid);
597 zfs_set_inode_flags(zp, ip);
598
599 /* Cache the xattr parent id */
600 if (zp->z_pflags & ZFS_XATTR)
601 zp->z_xattr_parent = parent;
602
603 ZFS_TIME_DECODE(&ip->i_atime, atime);
604 ZFS_TIME_DECODE(&ip->i_mtime, mtime);
605 ZFS_TIME_DECODE(&ip->i_ctime, ctime);
606
607 ip->i_ino = obj;
608 zfs_inode_update(zp);
609 zfs_inode_set_ops(zfsvfs, ip);
610
611 /*
612 * The only way insert_inode_locked() can fail is if the ip->i_ino
613 * number is already hashed for this super block. This can never
614 * happen because the inode numbers map 1:1 with the object numbers.
615 *
616 * The one exception is rolling back a mounted file system, but in
617 * this case all the active inode are unhashed during the rollback.
618 */
619 VERIFY3S(insert_inode_locked(ip), ==, 0);
620
621 mutex_enter(&zfsvfs->z_znodes_lock);
622 list_insert_tail(&zfsvfs->z_all_znodes, zp);
623 zfsvfs->z_nr_znodes++;
624 membar_producer();
625 mutex_exit(&zfsvfs->z_znodes_lock);
626
627 unlock_new_inode(ip);
628 return (zp);
629
630 error:
631 iput(ip);
632 return (NULL);
633 }
634
635 /*
636 * Safely mark an inode dirty. Inodes which are part of a read-only
637 * file system or snapshot may not be dirtied.
638 */
639 void
640 zfs_mark_inode_dirty(struct inode *ip)
641 {
642 zfsvfs_t *zfsvfs = ITOZSB(ip);
643
644 if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
645 return;
646
647 mark_inode_dirty(ip);
648 }
649
650 static uint64_t empty_xattr;
651 static uint64_t pad[4];
652 static zfs_acl_phys_t acl_phys;
653 /*
654 * Create a new DMU object to hold a zfs znode.
655 *
656 * IN: dzp - parent directory for new znode
657 * vap - file attributes for new znode
658 * tx - dmu transaction id for zap operations
659 * cr - credentials of caller
660 * flag - flags:
661 * IS_ROOT_NODE - new object will be root
662 * IS_XATTR - new object is an attribute
663 * bonuslen - length of bonus buffer
664 * setaclp - File/Dir initial ACL
665 * fuidp - Tracks fuid allocation.
666 *
667 * OUT: zpp - allocated znode
668 *
669 */
670 void
671 zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
672 uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
673 {
674 uint64_t crtime[2], atime[2], mtime[2], ctime[2];
675 uint64_t mode, size, links, parent, pflags;
676 uint64_t projid = ZFS_DEFAULT_PROJID;
677 uint64_t rdev = 0;
678 zfsvfs_t *zfsvfs = ZTOZSB(dzp);
679 dmu_buf_t *db;
680 inode_timespec_t now;
681 uint64_t gen, obj;
682 int bonuslen;
683 int dnodesize;
684 sa_handle_t *sa_hdl;
685 dmu_object_type_t obj_type;
686 sa_bulk_attr_t *sa_attrs;
687 int cnt = 0;
688 zfs_acl_locator_cb_t locate = { 0 };
689 znode_hold_t *zh;
690
691 if (zfsvfs->z_replay) {
692 obj = vap->va_nodeid;
693 now = vap->va_ctime; /* see zfs_replay_create() */
694 gen = vap->va_nblocks; /* ditto */
695 dnodesize = vap->va_fsid; /* ditto */
696 } else {
697 obj = 0;
698 gethrestime(&now);
699 gen = dmu_tx_get_txg(tx);
700 dnodesize = dmu_objset_dnodesize(zfsvfs->z_os);
701 }
702
703 if (dnodesize == 0)
704 dnodesize = DNODE_MIN_SIZE;
705
706 obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
707
708 bonuslen = (obj_type == DMU_OT_SA) ?
709 DN_BONUS_SIZE(dnodesize) : ZFS_OLD_ZNODE_PHYS_SIZE;
710
711 /*
712 * Create a new DMU object.
713 */
714 /*
715 * There's currently no mechanism for pre-reading the blocks that will
716 * be needed to allocate a new object, so we accept the small chance
717 * that there will be an i/o error and we will fail one of the
718 * assertions below.
719 */
720 if (S_ISDIR(vap->va_mode)) {
721 if (zfsvfs->z_replay) {
722 VERIFY0(zap_create_claim_norm_dnsize(zfsvfs->z_os, obj,
723 zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
724 obj_type, bonuslen, dnodesize, tx));
725 } else {
726 obj = zap_create_norm_dnsize(zfsvfs->z_os,
727 zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
728 obj_type, bonuslen, dnodesize, tx);
729 }
730 } else {
731 if (zfsvfs->z_replay) {
732 VERIFY0(dmu_object_claim_dnsize(zfsvfs->z_os, obj,
733 DMU_OT_PLAIN_FILE_CONTENTS, 0,
734 obj_type, bonuslen, dnodesize, tx));
735 } else {
736 obj = dmu_object_alloc_dnsize(zfsvfs->z_os,
737 DMU_OT_PLAIN_FILE_CONTENTS, 0,
738 obj_type, bonuslen, dnodesize, tx);
739 }
740 }
741
742 zh = zfs_znode_hold_enter(zfsvfs, obj);
743 VERIFY0(sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
744
745 /*
746 * If this is the root, fix up the half-initialized parent pointer
747 * to reference the just-allocated physical data area.
748 */
749 if (flag & IS_ROOT_NODE) {
750 dzp->z_id = obj;
751 }
752
753 /*
754 * If parent is an xattr, so am I.
755 */
756 if (dzp->z_pflags & ZFS_XATTR) {
757 flag |= IS_XATTR;
758 }
759
760 if (zfsvfs->z_use_fuids)
761 pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
762 else
763 pflags = 0;
764
765 if (S_ISDIR(vap->va_mode)) {
766 size = 2; /* contents ("." and "..") */
767 links = 2;
768 } else {
769 size = 0;
770 links = (flag & IS_TMPFILE) ? 0 : 1;
771 }
772
773 if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))
774 rdev = vap->va_rdev;
775
776 parent = dzp->z_id;
777 mode = acl_ids->z_mode;
778 if (flag & IS_XATTR)
779 pflags |= ZFS_XATTR;
780
781 if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode)) {
782 /*
783 * With ZFS_PROJID flag, we can easily know whether there is
784 * project ID stored on disk or not. See zfs_space_delta_cb().
785 */
786 if (obj_type != DMU_OT_ZNODE &&
787 dmu_objset_projectquota_enabled(zfsvfs->z_os))
788 pflags |= ZFS_PROJID;
789
790 /*
791 * Inherit project ID from parent if required.
792 */
793 projid = zfs_inherit_projid(dzp);
794 if (dzp->z_pflags & ZFS_PROJINHERIT)
795 pflags |= ZFS_PROJINHERIT;
796 }
797
798 /*
799 * No execs denied will be deterimed when zfs_mode_compute() is called.
800 */
801 pflags |= acl_ids->z_aclp->z_hints &
802 (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
803 ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
804
805 ZFS_TIME_ENCODE(&now, crtime);
806 ZFS_TIME_ENCODE(&now, ctime);
807
808 if (vap->va_mask & ATTR_ATIME) {
809 ZFS_TIME_ENCODE(&vap->va_atime, atime);
810 } else {
811 ZFS_TIME_ENCODE(&now, atime);
812 }
813
814 if (vap->va_mask & ATTR_MTIME) {
815 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
816 } else {
817 ZFS_TIME_ENCODE(&now, mtime);
818 }
819
820 /* Now add in all of the "SA" attributes */
821 VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
822 &sa_hdl));
823
824 /*
825 * Setup the array of attributes to be replaced/set on the new file
826 *
827 * order for DMU_OT_ZNODE is critical since it needs to be constructed
828 * in the old znode_phys_t format. Don't change this ordering
829 */
830 sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
831
832 if (obj_type == DMU_OT_ZNODE) {
833 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
834 NULL, &atime, 16);
835 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
836 NULL, &mtime, 16);
837 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
838 NULL, &ctime, 16);
839 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
840 NULL, &crtime, 16);
841 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
842 NULL, &gen, 8);
843 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
844 NULL, &mode, 8);
845 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
846 NULL, &size, 8);
847 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
848 NULL, &parent, 8);
849 } else {
850 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
851 NULL, &mode, 8);
852 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
853 NULL, &size, 8);
854 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
855 NULL, &gen, 8);
856 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs),
857 NULL, &acl_ids->z_fuid, 8);
858 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs),
859 NULL, &acl_ids->z_fgid, 8);
860 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
861 NULL, &parent, 8);
862 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
863 NULL, &pflags, 8);
864 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
865 NULL, &atime, 16);
866 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
867 NULL, &mtime, 16);
868 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
869 NULL, &ctime, 16);
870 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
871 NULL, &crtime, 16);
872 }
873
874 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
875
876 if (obj_type == DMU_OT_ZNODE) {
877 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
878 &empty_xattr, 8);
879 } else if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
880 pflags & ZFS_PROJID) {
881 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PROJID(zfsvfs),
882 NULL, &projid, 8);
883 }
884 if (obj_type == DMU_OT_ZNODE ||
885 (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
886 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
887 NULL, &rdev, 8);
888 }
889 if (obj_type == DMU_OT_ZNODE) {
890 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
891 NULL, &pflags, 8);
892 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
893 &acl_ids->z_fuid, 8);
894 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
895 &acl_ids->z_fgid, 8);
896 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
897 sizeof (uint64_t) * 4);
898 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
899 &acl_phys, sizeof (zfs_acl_phys_t));
900 } else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
901 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
902 &acl_ids->z_aclp->z_acl_count, 8);
903 locate.cb_aclp = acl_ids->z_aclp;
904 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
905 zfs_acl_data_locator, &locate,
906 acl_ids->z_aclp->z_acl_bytes);
907 mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
908 acl_ids->z_fuid, acl_ids->z_fgid);
909 }
910
911 VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
912
913 if (!(flag & IS_ROOT_NODE)) {
914 /*
915 * The call to zfs_znode_alloc() may fail if memory is low
916 * via the call path: alloc_inode() -> inode_init_always() ->
917 * security_inode_alloc() -> inode_alloc_security(). Since
918 * the existing code is written such that zfs_mknode() can
919 * not fail retry until sufficient memory has been reclaimed.
920 */
921 do {
922 *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, obj,
923 sa_hdl);
924 } while (*zpp == NULL);
925
926 VERIFY(*zpp != NULL);
927 VERIFY(dzp != NULL);
928 } else {
929 /*
930 * If we are creating the root node, the "parent" we
931 * passed in is the znode for the root.
932 */
933 *zpp = dzp;
934
935 (*zpp)->z_sa_hdl = sa_hdl;
936 }
937
938 (*zpp)->z_pflags = pflags;
939 (*zpp)->z_mode = ZTOI(*zpp)->i_mode = mode;
940 (*zpp)->z_dnodesize = dnodesize;
941 (*zpp)->z_projid = projid;
942
943 if (obj_type == DMU_OT_ZNODE ||
944 acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
945 VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
946 }
947 kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
948 zfs_znode_hold_exit(zfsvfs, zh);
949 }
950
951 /*
952 * Update in-core attributes. It is assumed the caller will be doing an
953 * sa_bulk_update to push the changes out.
954 */
955 void
956 zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
957 {
958 xoptattr_t *xoap;
959 boolean_t update_inode = B_FALSE;
960
961 xoap = xva_getxoptattr(xvap);
962 ASSERT(xoap);
963
964 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
965 uint64_t times[2];
966 ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
967 (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
968 &times, sizeof (times), tx);
969 XVA_SET_RTN(xvap, XAT_CREATETIME);
970 }
971 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
972 ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
973 zp->z_pflags, tx);
974 XVA_SET_RTN(xvap, XAT_READONLY);
975 }
976 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
977 ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
978 zp->z_pflags, tx);
979 XVA_SET_RTN(xvap, XAT_HIDDEN);
980 }
981 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
982 ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
983 zp->z_pflags, tx);
984 XVA_SET_RTN(xvap, XAT_SYSTEM);
985 }
986 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
987 ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
988 zp->z_pflags, tx);
989 XVA_SET_RTN(xvap, XAT_ARCHIVE);
990 }
991 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
992 ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
993 zp->z_pflags, tx);
994 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
995
996 update_inode = B_TRUE;
997 }
998 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
999 ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
1000 zp->z_pflags, tx);
1001 XVA_SET_RTN(xvap, XAT_NOUNLINK);
1002 }
1003 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
1004 ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
1005 zp->z_pflags, tx);
1006 XVA_SET_RTN(xvap, XAT_APPENDONLY);
1007
1008 update_inode = B_TRUE;
1009 }
1010 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
1011 ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
1012 zp->z_pflags, tx);
1013 XVA_SET_RTN(xvap, XAT_NODUMP);
1014 }
1015 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
1016 ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
1017 zp->z_pflags, tx);
1018 XVA_SET_RTN(xvap, XAT_OPAQUE);
1019 }
1020 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
1021 ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
1022 xoap->xoa_av_quarantined, zp->z_pflags, tx);
1023 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
1024 }
1025 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
1026 ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
1027 zp->z_pflags, tx);
1028 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
1029 }
1030 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
1031 zfs_sa_set_scanstamp(zp, xvap, tx);
1032 XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
1033 }
1034 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
1035 ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
1036 zp->z_pflags, tx);
1037 XVA_SET_RTN(xvap, XAT_REPARSE);
1038 }
1039 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
1040 ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
1041 zp->z_pflags, tx);
1042 XVA_SET_RTN(xvap, XAT_OFFLINE);
1043 }
1044 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
1045 ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
1046 zp->z_pflags, tx);
1047 XVA_SET_RTN(xvap, XAT_SPARSE);
1048 }
1049 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
1050 ZFS_ATTR_SET(zp, ZFS_PROJINHERIT, xoap->xoa_projinherit,
1051 zp->z_pflags, tx);
1052 XVA_SET_RTN(xvap, XAT_PROJINHERIT);
1053 }
1054
1055 if (update_inode)
1056 zfs_set_inode_flags(zp, ZTOI(zp));
1057 }
1058
1059 int
1060 zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
1061 {
1062 dmu_object_info_t doi;
1063 dmu_buf_t *db;
1064 znode_t *zp;
1065 znode_hold_t *zh;
1066 int err;
1067 sa_handle_t *hdl;
1068
1069 *zpp = NULL;
1070
1071 again:
1072 zh = zfs_znode_hold_enter(zfsvfs, obj_num);
1073
1074 err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
1075 if (err) {
1076 zfs_znode_hold_exit(zfsvfs, zh);
1077 return (err);
1078 }
1079
1080 dmu_object_info_from_db(db, &doi);
1081 if (doi.doi_bonus_type != DMU_OT_SA &&
1082 (doi.doi_bonus_type != DMU_OT_ZNODE ||
1083 (doi.doi_bonus_type == DMU_OT_ZNODE &&
1084 doi.doi_bonus_size < sizeof (znode_phys_t)))) {
1085 sa_buf_rele(db, NULL);
1086 zfs_znode_hold_exit(zfsvfs, zh);
1087 return (SET_ERROR(EINVAL));
1088 }
1089
1090 hdl = dmu_buf_get_user(db);
1091 if (hdl != NULL) {
1092 zp = sa_get_userdata(hdl);
1093
1094
1095 /*
1096 * Since "SA" does immediate eviction we
1097 * should never find a sa handle that doesn't
1098 * know about the znode.
1099 */
1100
1101 ASSERT3P(zp, !=, NULL);
1102
1103 mutex_enter(&zp->z_lock);
1104 ASSERT3U(zp->z_id, ==, obj_num);
1105 /*
1106 * If igrab() returns NULL the VFS has independently
1107 * determined the inode should be evicted and has
1108 * called iput_final() to start the eviction process.
1109 * The SA handle is still valid but because the VFS
1110 * requires that the eviction succeed we must drop
1111 * our locks and references to allow the eviction to
1112 * complete. The zfs_zget() may then be retried.
1113 *
1114 * This unlikely case could be optimized by registering
1115 * a sops->drop_inode() callback. The callback would
1116 * need to detect the active SA hold thereby informing
1117 * the VFS that this inode should not be evicted.
1118 */
1119 if (igrab(ZTOI(zp)) == NULL) {
1120 mutex_exit(&zp->z_lock);
1121 sa_buf_rele(db, NULL);
1122 zfs_znode_hold_exit(zfsvfs, zh);
1123 /* inode might need this to finish evict */
1124 cond_resched();
1125 goto again;
1126 }
1127 *zpp = zp;
1128 err = 0;
1129 mutex_exit(&zp->z_lock);
1130 sa_buf_rele(db, NULL);
1131 zfs_znode_hold_exit(zfsvfs, zh);
1132 return (err);
1133 }
1134
1135 /*
1136 * Not found create new znode/vnode but only if file exists.
1137 *
1138 * There is a small window where zfs_vget() could
1139 * find this object while a file create is still in
1140 * progress. This is checked for in zfs_znode_alloc()
1141 *
1142 * if zfs_znode_alloc() fails it will drop the hold on the
1143 * bonus buffer.
1144 */
1145 zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
1146 doi.doi_bonus_type, obj_num, NULL);
1147 if (zp == NULL) {
1148 err = SET_ERROR(ENOENT);
1149 } else {
1150 *zpp = zp;
1151 }
1152 zfs_znode_hold_exit(zfsvfs, zh);
1153 return (err);
1154 }
1155
1156 int
1157 zfs_rezget(znode_t *zp)
1158 {
1159 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1160 dmu_object_info_t doi;
1161 dmu_buf_t *db;
1162 uint64_t obj_num = zp->z_id;
1163 uint64_t mode;
1164 uint64_t links;
1165 sa_bulk_attr_t bulk[10];
1166 int err;
1167 int count = 0;
1168 uint64_t gen;
1169 uint64_t z_uid, z_gid;
1170 uint64_t atime[2], mtime[2], ctime[2];
1171 uint64_t projid = ZFS_DEFAULT_PROJID;
1172 znode_hold_t *zh;
1173
1174 /*
1175 * skip ctldir, otherwise they will always get invalidated. This will
1176 * cause funny behaviour for the mounted snapdirs. Especially for
1177 * Linux >= 3.18, d_invalidate will detach the mountpoint and prevent
1178 * anyone automount it again as long as someone is still using the
1179 * detached mount.
1180 */
1181 if (zp->z_is_ctldir)
1182 return (0);
1183
1184 zh = zfs_znode_hold_enter(zfsvfs, obj_num);
1185
1186 mutex_enter(&zp->z_acl_lock);
1187 if (zp->z_acl_cached) {
1188 zfs_acl_free(zp->z_acl_cached);
1189 zp->z_acl_cached = NULL;
1190 }
1191 mutex_exit(&zp->z_acl_lock);
1192
1193 rw_enter(&zp->z_xattr_lock, RW_WRITER);
1194 if (zp->z_xattr_cached) {
1195 nvlist_free(zp->z_xattr_cached);
1196 zp->z_xattr_cached = NULL;
1197 }
1198 rw_exit(&zp->z_xattr_lock);
1199
1200 ASSERT(zp->z_sa_hdl == NULL);
1201 err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
1202 if (err) {
1203 zfs_znode_hold_exit(zfsvfs, zh);
1204 return (err);
1205 }
1206
1207 dmu_object_info_from_db(db, &doi);
1208 if (doi.doi_bonus_type != DMU_OT_SA &&
1209 (doi.doi_bonus_type != DMU_OT_ZNODE ||
1210 (doi.doi_bonus_type == DMU_OT_ZNODE &&
1211 doi.doi_bonus_size < sizeof (znode_phys_t)))) {
1212 sa_buf_rele(db, NULL);
1213 zfs_znode_hold_exit(zfsvfs, zh);
1214 return (SET_ERROR(EINVAL));
1215 }
1216
1217 zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
1218
1219 /* reload cached values */
1220 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1221 &gen, sizeof (gen));
1222 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
1223 &zp->z_size, sizeof (zp->z_size));
1224 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
1225 &links, sizeof (links));
1226 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
1227 &zp->z_pflags, sizeof (zp->z_pflags));
1228 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1229 &z_uid, sizeof (z_uid));
1230 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1231 &z_gid, sizeof (z_gid));
1232 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1233 &mode, sizeof (mode));
1234 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1235 &atime, 16);
1236 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
1237 &mtime, 16);
1238 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
1239 &ctime, 16);
1240
1241 if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
1242 zfs_znode_dmu_fini(zp);
1243 zfs_znode_hold_exit(zfsvfs, zh);
1244 return (SET_ERROR(EIO));
1245 }
1246
1247 if (dmu_objset_projectquota_enabled(zfsvfs->z_os)) {
1248 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs),
1249 &projid, 8);
1250 if (err != 0 && err != ENOENT) {
1251 zfs_znode_dmu_fini(zp);
1252 zfs_znode_hold_exit(zfsvfs, zh);
1253 return (SET_ERROR(err));
1254 }
1255 }
1256
1257 zp->z_projid = projid;
1258 zp->z_mode = ZTOI(zp)->i_mode = mode;
1259 zfs_uid_write(ZTOI(zp), z_uid);
1260 zfs_gid_write(ZTOI(zp), z_gid);
1261
1262 ZFS_TIME_DECODE(&ZTOI(zp)->i_atime, atime);
1263 ZFS_TIME_DECODE(&ZTOI(zp)->i_mtime, mtime);
1264 ZFS_TIME_DECODE(&ZTOI(zp)->i_ctime, ctime);
1265
1266 if (gen != ZTOI(zp)->i_generation) {
1267 zfs_znode_dmu_fini(zp);
1268 zfs_znode_hold_exit(zfsvfs, zh);
1269 return (SET_ERROR(EIO));
1270 }
1271
1272 set_nlink(ZTOI(zp), (uint32_t)links);
1273 zfs_set_inode_flags(zp, ZTOI(zp));
1274
1275 zp->z_blksz = doi.doi_data_block_size;
1276 zp->z_atime_dirty = 0;
1277 zfs_inode_update(zp);
1278
1279 /*
1280 * If the file has zero links, then it has been unlinked on the send
1281 * side and it must be in the received unlinked set.
1282 * We call zfs_znode_dmu_fini() now to prevent any accesses to the
1283 * stale data and to prevent automatical removal of the file in
1284 * zfs_zinactive(). The file will be removed either when it is removed
1285 * on the send side and the next incremental stream is received or
1286 * when the unlinked set gets processed.
1287 */
1288 zp->z_unlinked = (ZTOI(zp)->i_nlink == 0);
1289 if (zp->z_unlinked)
1290 zfs_znode_dmu_fini(zp);
1291
1292 zfs_znode_hold_exit(zfsvfs, zh);
1293
1294 return (0);
1295 }
1296
1297 void
1298 zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
1299 {
1300 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1301 objset_t *os = zfsvfs->z_os;
1302 uint64_t obj = zp->z_id;
1303 uint64_t acl_obj = zfs_external_acl(zp);
1304 znode_hold_t *zh;
1305
1306 zh = zfs_znode_hold_enter(zfsvfs, obj);
1307 if (acl_obj) {
1308 VERIFY(!zp->z_is_sa);
1309 VERIFY(0 == dmu_object_free(os, acl_obj, tx));
1310 }
1311 VERIFY(0 == dmu_object_free(os, obj, tx));
1312 zfs_znode_dmu_fini(zp);
1313 zfs_znode_hold_exit(zfsvfs, zh);
1314 }
1315
1316 void
1317 zfs_zinactive(znode_t *zp)
1318 {
1319 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1320 uint64_t z_id = zp->z_id;
1321 znode_hold_t *zh;
1322
1323 ASSERT(zp->z_sa_hdl);
1324
1325 /*
1326 * Don't allow a zfs_zget() while were trying to release this znode.
1327 */
1328 zh = zfs_znode_hold_enter(zfsvfs, z_id);
1329
1330 mutex_enter(&zp->z_lock);
1331
1332 /*
1333 * If this was the last reference to a file with no links, remove
1334 * the file from the file system unless the file system is mounted
1335 * read-only. That can happen, for example, if the file system was
1336 * originally read-write, the file was opened, then unlinked and
1337 * the file system was made read-only before the file was finally
1338 * closed. The file will remain in the unlinked set.
1339 */
1340 if (zp->z_unlinked) {
1341 ASSERT(!zfsvfs->z_issnap);
1342 if (!zfs_is_readonly(zfsvfs)) {
1343 mutex_exit(&zp->z_lock);
1344 zfs_znode_hold_exit(zfsvfs, zh);
1345 zfs_rmnode(zp);
1346 return;
1347 }
1348 }
1349
1350 mutex_exit(&zp->z_lock);
1351 zfs_znode_dmu_fini(zp);
1352
1353 zfs_znode_hold_exit(zfsvfs, zh);
1354 }
1355
1356 static inline int
1357 zfs_compare_timespec(struct timespec *t1, struct timespec *t2)
1358 {
1359 if (t1->tv_sec < t2->tv_sec)
1360 return (-1);
1361
1362 if (t1->tv_sec > t2->tv_sec)
1363 return (1);
1364
1365 return (t1->tv_nsec - t2->tv_nsec);
1366 }
1367
1368 /*
1369 * Prepare to update znode time stamps.
1370 *
1371 * IN: zp - znode requiring timestamp update
1372 * flag - ATTR_MTIME, ATTR_CTIME flags
1373 *
1374 * OUT: zp - z_seq
1375 * mtime - new mtime
1376 * ctime - new ctime
1377 *
1378 * Note: We don't update atime here, because we rely on Linux VFS to do
1379 * atime updating.
1380 */
1381 void
1382 zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
1383 uint64_t ctime[2])
1384 {
1385 inode_timespec_t now;
1386
1387 gethrestime(&now);
1388
1389 zp->z_seq++;
1390
1391 if (flag & ATTR_MTIME) {
1392 ZFS_TIME_ENCODE(&now, mtime);
1393 ZFS_TIME_DECODE(&(ZTOI(zp)->i_mtime), mtime);
1394 if (ZTOZSB(zp)->z_use_fuids) {
1395 zp->z_pflags |= (ZFS_ARCHIVE |
1396 ZFS_AV_MODIFIED);
1397 }
1398 }
1399
1400 if (flag & ATTR_CTIME) {
1401 ZFS_TIME_ENCODE(&now, ctime);
1402 ZFS_TIME_DECODE(&(ZTOI(zp)->i_ctime), ctime);
1403 if (ZTOZSB(zp)->z_use_fuids)
1404 zp->z_pflags |= ZFS_ARCHIVE;
1405 }
1406 }
1407
1408 /*
1409 * Grow the block size for a file.
1410 *
1411 * IN: zp - znode of file to free data in.
1412 * size - requested block size
1413 * tx - open transaction.
1414 *
1415 * NOTE: this function assumes that the znode is write locked.
1416 */
1417 void
1418 zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
1419 {
1420 int error;
1421 u_longlong_t dummy;
1422
1423 if (size <= zp->z_blksz)
1424 return;
1425 /*
1426 * If the file size is already greater than the current blocksize,
1427 * we will not grow. If there is more than one block in a file,
1428 * the blocksize cannot change.
1429 */
1430 if (zp->z_blksz && zp->z_size > zp->z_blksz)
1431 return;
1432
1433 error = dmu_object_set_blocksize(ZTOZSB(zp)->z_os, zp->z_id,
1434 size, 0, tx);
1435
1436 if (error == ENOTSUP)
1437 return;
1438 ASSERT0(error);
1439
1440 /* What blocksize did we actually get? */
1441 dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
1442 }
1443
1444 /*
1445 * Increase the file length
1446 *
1447 * IN: zp - znode of file to free data in.
1448 * end - new end-of-file
1449 *
1450 * RETURN: 0 on success, error code on failure
1451 */
1452 static int
1453 zfs_extend(znode_t *zp, uint64_t end)
1454 {
1455 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1456 dmu_tx_t *tx;
1457 locked_range_t *lr;
1458 uint64_t newblksz;
1459 int error;
1460
1461 /*
1462 * We will change zp_size, lock the whole file.
1463 */
1464 lr = rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
1465
1466 /*
1467 * Nothing to do if file already at desired length.
1468 */
1469 if (end <= zp->z_size) {
1470 rangelock_exit(lr);
1471 return (0);
1472 }
1473 tx = dmu_tx_create(zfsvfs->z_os);
1474 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1475 zfs_sa_upgrade_txholds(tx, zp);
1476 if (end > zp->z_blksz &&
1477 (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
1478 /*
1479 * We are growing the file past the current block size.
1480 */
1481 if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
1482 /*
1483 * File's blocksize is already larger than the
1484 * "recordsize" property. Only let it grow to
1485 * the next power of 2.
1486 */
1487 ASSERT(!ISP2(zp->z_blksz));
1488 newblksz = MIN(end, 1 << highbit64(zp->z_blksz));
1489 } else {
1490 newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
1491 }
1492 dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
1493 } else {
1494 newblksz = 0;
1495 }
1496
1497 error = dmu_tx_assign(tx, TXG_WAIT);
1498 if (error) {
1499 dmu_tx_abort(tx);
1500 rangelock_exit(lr);
1501 return (error);
1502 }
1503
1504 if (newblksz)
1505 zfs_grow_blocksize(zp, newblksz, tx);
1506
1507 zp->z_size = end;
1508
1509 VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
1510 &zp->z_size, sizeof (zp->z_size), tx));
1511
1512 rangelock_exit(lr);
1513
1514 dmu_tx_commit(tx);
1515
1516 return (0);
1517 }
1518
1519 /*
1520 * zfs_zero_partial_page - Modeled after update_pages() but
1521 * with different arguments and semantics for use by zfs_freesp().
1522 *
1523 * Zeroes a piece of a single page cache entry for zp at offset
1524 * start and length len.
1525 *
1526 * Caller must acquire a range lock on the file for the region
1527 * being zeroed in order that the ARC and page cache stay in sync.
1528 */
1529 static void
1530 zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
1531 {
1532 struct address_space *mp = ZTOI(zp)->i_mapping;
1533 struct page *pp;
1534 int64_t off;
1535 void *pb;
1536
1537 ASSERT((start & PAGE_MASK) == ((start + len - 1) & PAGE_MASK));
1538
1539 off = start & (PAGE_SIZE - 1);
1540 start &= PAGE_MASK;
1541
1542 pp = find_lock_page(mp, start >> PAGE_SHIFT);
1543 if (pp) {
1544 if (mapping_writably_mapped(mp))
1545 flush_dcache_page(pp);
1546
1547 pb = kmap(pp);
1548 bzero(pb + off, len);
1549 kunmap(pp);
1550
1551 if (mapping_writably_mapped(mp))
1552 flush_dcache_page(pp);
1553
1554 mark_page_accessed(pp);
1555 SetPageUptodate(pp);
1556 ClearPageError(pp);
1557 unlock_page(pp);
1558 put_page(pp);
1559 }
1560 }
1561
1562 /*
1563 * Free space in a file.
1564 *
1565 * IN: zp - znode of file to free data in.
1566 * off - start of section to free.
1567 * len - length of section to free.
1568 *
1569 * RETURN: 0 on success, error code on failure
1570 */
1571 static int
1572 zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
1573 {
1574 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1575 locked_range_t *lr;
1576 int error;
1577
1578 /*
1579 * Lock the range being freed.
1580 */
1581 lr = rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);
1582
1583 /*
1584 * Nothing to do if file already at desired length.
1585 */
1586 if (off >= zp->z_size) {
1587 rangelock_exit(lr);
1588 return (0);
1589 }
1590
1591 if (off + len > zp->z_size)
1592 len = zp->z_size - off;
1593
1594 error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
1595
1596 /*
1597 * Zero partial page cache entries. This must be done under a
1598 * range lock in order to keep the ARC and page cache in sync.
1599 */
1600 if (zp->z_is_mapped) {
1601 loff_t first_page, last_page, page_len;
1602 loff_t first_page_offset, last_page_offset;
1603
1604 /* first possible full page in hole */
1605 first_page = (off + PAGE_SIZE - 1) >> PAGE_SHIFT;
1606 /* last page of hole */
1607 last_page = (off + len) >> PAGE_SHIFT;
1608
1609 /* offset of first_page */
1610 first_page_offset = first_page << PAGE_SHIFT;
1611 /* offset of last_page */
1612 last_page_offset = last_page << PAGE_SHIFT;
1613
1614 /* truncate whole pages */
1615 if (last_page_offset > first_page_offset) {
1616 truncate_inode_pages_range(ZTOI(zp)->i_mapping,
1617 first_page_offset, last_page_offset - 1);
1618 }
1619
1620 /* truncate sub-page ranges */
1621 if (first_page > last_page) {
1622 /* entire punched area within a single page */
1623 zfs_zero_partial_page(zp, off, len);
1624 } else {
1625 /* beginning of punched area at the end of a page */
1626 page_len = first_page_offset - off;
1627 if (page_len > 0)
1628 zfs_zero_partial_page(zp, off, page_len);
1629
1630 /* end of punched area at the beginning of a page */
1631 page_len = off + len - last_page_offset;
1632 if (page_len > 0)
1633 zfs_zero_partial_page(zp, last_page_offset,
1634 page_len);
1635 }
1636 }
1637 rangelock_exit(lr);
1638
1639 return (error);
1640 }
1641
1642 /*
1643 * Truncate a file
1644 *
1645 * IN: zp - znode of file to free data in.
1646 * end - new end-of-file.
1647 *
1648 * RETURN: 0 on success, error code on failure
1649 */
1650 static int
1651 zfs_trunc(znode_t *zp, uint64_t end)
1652 {
1653 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1654 dmu_tx_t *tx;
1655 locked_range_t *lr;
1656 int error;
1657 sa_bulk_attr_t bulk[2];
1658 int count = 0;
1659
1660 /*
1661 * We will change zp_size, lock the whole file.
1662 */
1663 lr = rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
1664
1665 /*
1666 * Nothing to do if file already at desired length.
1667 */
1668 if (end >= zp->z_size) {
1669 rangelock_exit(lr);
1670 return (0);
1671 }
1672
1673 error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,
1674 DMU_OBJECT_END);
1675 if (error) {
1676 rangelock_exit(lr);
1677 return (error);
1678 }
1679 tx = dmu_tx_create(zfsvfs->z_os);
1680 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1681 zfs_sa_upgrade_txholds(tx, zp);
1682 dmu_tx_mark_netfree(tx);
1683 error = dmu_tx_assign(tx, TXG_WAIT);
1684 if (error) {
1685 dmu_tx_abort(tx);
1686 rangelock_exit(lr);
1687 return (error);
1688 }
1689
1690 zp->z_size = end;
1691 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
1692 NULL, &zp->z_size, sizeof (zp->z_size));
1693
1694 if (end == 0) {
1695 zp->z_pflags &= ~ZFS_SPARSE;
1696 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
1697 NULL, &zp->z_pflags, 8);
1698 }
1699 VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
1700
1701 dmu_tx_commit(tx);
1702 rangelock_exit(lr);
1703
1704 return (0);
1705 }
1706
1707 /*
1708 * Free space in a file
1709 *
1710 * IN: zp - znode of file to free data in.
1711 * off - start of range
1712 * len - end of range (0 => EOF)
1713 * flag - current file open mode flags.
1714 * log - TRUE if this action should be logged
1715 *
1716 * RETURN: 0 on success, error code on failure
1717 */
1718 int
1719 zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
1720 {
1721 dmu_tx_t *tx;
1722 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1723 zilog_t *zilog = zfsvfs->z_log;
1724 uint64_t mode;
1725 uint64_t mtime[2], ctime[2];
1726 sa_bulk_attr_t bulk[3];
1727 int count = 0;
1728 int error;
1729
1730 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
1731 sizeof (mode))) != 0)
1732 return (error);
1733
1734 if (off > zp->z_size) {
1735 error = zfs_extend(zp, off+len);
1736 if (error == 0 && log)
1737 goto log;
1738 goto out;
1739 }
1740
1741 if (len == 0) {
1742 error = zfs_trunc(zp, off);
1743 } else {
1744 if ((error = zfs_free_range(zp, off, len)) == 0 &&
1745 off + len > zp->z_size)
1746 error = zfs_extend(zp, off+len);
1747 }
1748 if (error || !log)
1749 goto out;
1750 log:
1751 tx = dmu_tx_create(zfsvfs->z_os);
1752 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1753 zfs_sa_upgrade_txholds(tx, zp);
1754 error = dmu_tx_assign(tx, TXG_WAIT);
1755 if (error) {
1756 dmu_tx_abort(tx);
1757 goto out;
1758 }
1759
1760 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
1761 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
1762 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
1763 NULL, &zp->z_pflags, 8);
1764 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
1765 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1766 ASSERT(error == 0);
1767
1768 zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
1769
1770 dmu_tx_commit(tx);
1771
1772 zfs_inode_update(zp);
1773 error = 0;
1774
1775 out:
1776 /*
1777 * Truncate the page cache - for file truncate operations, use
1778 * the purpose-built API for truncations. For punching operations,
1779 * the truncation is handled under a range lock in zfs_free_range.
1780 */
1781 if (len == 0)
1782 truncate_setsize(ZTOI(zp), off);
1783 return (error);
1784 }
1785
1786 void
1787 zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
1788 {
1789 struct super_block *sb;
1790 zfsvfs_t *zfsvfs;
1791 uint64_t moid, obj, sa_obj, version;
1792 uint64_t sense = ZFS_CASE_SENSITIVE;
1793 uint64_t norm = 0;
1794 nvpair_t *elem;
1795 int size;
1796 int error;
1797 int i;
1798 znode_t *rootzp = NULL;
1799 vattr_t vattr;
1800 znode_t *zp;
1801 zfs_acl_ids_t acl_ids;
1802
1803 /*
1804 * First attempt to create master node.
1805 */
1806 /*
1807 * In an empty objset, there are no blocks to read and thus
1808 * there can be no i/o errors (which we assert below).
1809 */
1810 moid = MASTER_NODE_OBJ;
1811 error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
1812 DMU_OT_NONE, 0, tx);
1813 ASSERT(error == 0);
1814
1815 /*
1816 * Set starting attributes.
1817 */
1818 version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
1819 elem = NULL;
1820 while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
1821 /* For the moment we expect all zpl props to be uint64_ts */
1822 uint64_t val;
1823 char *name;
1824
1825 ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
1826 VERIFY(nvpair_value_uint64(elem, &val) == 0);
1827 name = nvpair_name(elem);
1828 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
1829 if (val < version)
1830 version = val;
1831 } else {
1832 error = zap_update(os, moid, name, 8, 1, &val, tx);
1833 }
1834 ASSERT(error == 0);
1835 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
1836 norm = val;
1837 else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
1838 sense = val;
1839 }
1840 ASSERT(version != 0);
1841 error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
1842
1843 /*
1844 * Create zap object used for SA attribute registration
1845 */
1846
1847 if (version >= ZPL_VERSION_SA) {
1848 sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
1849 DMU_OT_NONE, 0, tx);
1850 error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
1851 ASSERT(error == 0);
1852 } else {
1853 sa_obj = 0;
1854 }
1855 /*
1856 * Create a delete queue.
1857 */
1858 obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
1859
1860 error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
1861 ASSERT(error == 0);
1862
1863 /*
1864 * Create root znode. Create minimal znode/inode/zfsvfs/sb
1865 * to allow zfs_mknode to work.
1866 */
1867 vattr.va_mask = ATTR_MODE|ATTR_UID|ATTR_GID;
1868 vattr.va_mode = S_IFDIR|0755;
1869 vattr.va_uid = crgetuid(cr);
1870 vattr.va_gid = crgetgid(cr);
1871
1872 rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
1873 rootzp->z_moved = 0;
1874 rootzp->z_unlinked = 0;
1875 rootzp->z_atime_dirty = 0;
1876 rootzp->z_is_sa = USE_SA(version, os);
1877 rootzp->z_pflags = 0;
1878
1879 zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
1880 zfsvfs->z_os = os;
1881 zfsvfs->z_parent = zfsvfs;
1882 zfsvfs->z_version = version;
1883 zfsvfs->z_use_fuids = USE_FUIDS(version, os);
1884 zfsvfs->z_use_sa = USE_SA(version, os);
1885 zfsvfs->z_norm = norm;
1886
1887 sb = kmem_zalloc(sizeof (struct super_block), KM_SLEEP);
1888 sb->s_fs_info = zfsvfs;
1889
1890 ZTOI(rootzp)->i_sb = sb;
1891
1892 error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
1893 &zfsvfs->z_attr_table);
1894
1895 ASSERT(error == 0);
1896
1897 /*
1898 * Fold case on file systems that are always or sometimes case
1899 * insensitive.
1900 */
1901 if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
1902 zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
1903
1904 mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
1905 list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
1906 offsetof(znode_t, z_link_node));
1907
1908 size = MIN(1 << (highbit64(zfs_object_mutex_size)-1), ZFS_OBJ_MTX_MAX);
1909 zfsvfs->z_hold_size = size;
1910 zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
1911 KM_SLEEP);
1912 zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
1913 for (i = 0; i != size; i++) {
1914 avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
1915 sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
1916 mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
1917 }
1918
1919 VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
1920 cr, NULL, &acl_ids));
1921 zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
1922 ASSERT3P(zp, ==, rootzp);
1923 error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
1924 ASSERT(error == 0);
1925 zfs_acl_ids_free(&acl_ids);
1926
1927 atomic_set(&ZTOI(rootzp)->i_count, 0);
1928 sa_handle_destroy(rootzp->z_sa_hdl);
1929 kmem_cache_free(znode_cache, rootzp);
1930
1931 for (i = 0; i != size; i++) {
1932 avl_destroy(&zfsvfs->z_hold_trees[i]);
1933 mutex_destroy(&zfsvfs->z_hold_locks[i]);
1934 }
1935
1936 mutex_destroy(&zfsvfs->z_znodes_lock);
1937
1938 vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
1939 vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
1940 kmem_free(sb, sizeof (struct super_block));
1941 kmem_free(zfsvfs, sizeof (zfsvfs_t));
1942 }
1943 #endif /* _KERNEL */
1944
1945 static int
1946 zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
1947 {
1948 uint64_t sa_obj = 0;
1949 int error;
1950
1951 error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
1952 if (error != 0 && error != ENOENT)
1953 return (error);
1954
1955 error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
1956 return (error);
1957 }
1958
1959 static int
1960 zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
1961 dmu_buf_t **db, void *tag)
1962 {
1963 dmu_object_info_t doi;
1964 int error;
1965
1966 if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
1967 return (error);
1968
1969 dmu_object_info_from_db(*db, &doi);
1970 if ((doi.doi_bonus_type != DMU_OT_SA &&
1971 doi.doi_bonus_type != DMU_OT_ZNODE) ||
1972 (doi.doi_bonus_type == DMU_OT_ZNODE &&
1973 doi.doi_bonus_size < sizeof (znode_phys_t))) {
1974 sa_buf_rele(*db, tag);
1975 return (SET_ERROR(ENOTSUP));
1976 }
1977
1978 error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
1979 if (error != 0) {
1980 sa_buf_rele(*db, tag);
1981 return (error);
1982 }
1983
1984 return (0);
1985 }
1986
1987 void
1988 zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, void *tag)
1989 {
1990 sa_handle_destroy(hdl);
1991 sa_buf_rele(db, tag);
1992 }
1993
1994 /*
1995 * Given an object number, return its parent object number and whether
1996 * or not the object is an extended attribute directory.
1997 */
1998 static int
1999 zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table,
2000 uint64_t *pobjp, int *is_xattrdir)
2001 {
2002 uint64_t parent;
2003 uint64_t pflags;
2004 uint64_t mode;
2005 uint64_t parent_mode;
2006 sa_bulk_attr_t bulk[3];
2007 sa_handle_t *sa_hdl;
2008 dmu_buf_t *sa_db;
2009 int count = 0;
2010 int error;
2011
2012 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
2013 &parent, sizeof (parent));
2014 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
2015 &pflags, sizeof (pflags));
2016 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
2017 &mode, sizeof (mode));
2018
2019 if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
2020 return (error);
2021
2022 /*
2023 * When a link is removed its parent pointer is not changed and will
2024 * be invalid. There are two cases where a link is removed but the
2025 * file stays around, when it goes to the delete queue and when there
2026 * are additional links.
2027 */
2028 error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG);
2029 if (error != 0)
2030 return (error);
2031
2032 error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode));
2033 zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
2034 if (error != 0)
2035 return (error);
2036
2037 *is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
2038
2039 /*
2040 * Extended attributes can be applied to files, directories, etc.
2041 * Otherwise the parent must be a directory.
2042 */
2043 if (!*is_xattrdir && !S_ISDIR(parent_mode))
2044 return (SET_ERROR(EINVAL));
2045
2046 *pobjp = parent;
2047
2048 return (0);
2049 }
2050
2051 /*
2052 * Given an object number, return some zpl level statistics
2053 */
2054 static int
2055 zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
2056 zfs_stat_t *sb)
2057 {
2058 sa_bulk_attr_t bulk[4];
2059 int count = 0;
2060
2061 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
2062 &sb->zs_mode, sizeof (sb->zs_mode));
2063 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
2064 &sb->zs_gen, sizeof (sb->zs_gen));
2065 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
2066 &sb->zs_links, sizeof (sb->zs_links));
2067 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
2068 &sb->zs_ctime, sizeof (sb->zs_ctime));
2069
2070 return (sa_bulk_lookup(hdl, bulk, count));
2071 }
2072
2073 static int
2074 zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
2075 sa_attr_type_t *sa_table, char *buf, int len)
2076 {
2077 sa_handle_t *sa_hdl;
2078 sa_handle_t *prevhdl = NULL;
2079 dmu_buf_t *prevdb = NULL;
2080 dmu_buf_t *sa_db = NULL;
2081 char *path = buf + len - 1;
2082 int error;
2083
2084 *path = '\0';
2085 sa_hdl = hdl;
2086
2087 uint64_t deleteq_obj;
2088 VERIFY0(zap_lookup(osp, MASTER_NODE_OBJ,
2089 ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
2090 error = zap_lookup_int(osp, deleteq_obj, obj);
2091 if (error == 0) {
2092 return (ESTALE);
2093 } else if (error != ENOENT) {
2094 return (error);
2095 }
2096 error = 0;
2097
2098 for (;;) {
2099 uint64_t pobj = 0;
2100 char component[MAXNAMELEN + 2];
2101 size_t complen;
2102 int is_xattrdir = 0;
2103
2104 if (prevdb)
2105 zfs_release_sa_handle(prevhdl, prevdb, FTAG);
2106
2107 if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj,
2108 &is_xattrdir)) != 0)
2109 break;
2110
2111 if (pobj == obj) {
2112 if (path[0] != '/')
2113 *--path = '/';
2114 break;
2115 }
2116
2117 component[0] = '/';
2118 if (is_xattrdir) {
2119 (void) sprintf(component + 1, "<xattrdir>");
2120 } else {
2121 error = zap_value_search(osp, pobj, obj,
2122 ZFS_DIRENT_OBJ(-1ULL), component + 1);
2123 if (error != 0)
2124 break;
2125 }
2126
2127 complen = strlen(component);
2128 path -= complen;
2129 ASSERT(path >= buf);
2130 bcopy(component, path, complen);
2131 obj = pobj;
2132
2133 if (sa_hdl != hdl) {
2134 prevhdl = sa_hdl;
2135 prevdb = sa_db;
2136 }
2137 error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
2138 if (error != 0) {
2139 sa_hdl = prevhdl;
2140 sa_db = prevdb;
2141 break;
2142 }
2143 }
2144
2145 if (sa_hdl != NULL && sa_hdl != hdl) {
2146 ASSERT(sa_db != NULL);
2147 zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
2148 }
2149
2150 if (error == 0)
2151 (void) memmove(buf, path, buf + len - path);
2152
2153 return (error);
2154 }
2155
2156 int
2157 zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
2158 {
2159 sa_attr_type_t *sa_table;
2160 sa_handle_t *hdl;
2161 dmu_buf_t *db;
2162 int error;
2163
2164 error = zfs_sa_setup(osp, &sa_table);
2165 if (error != 0)
2166 return (error);
2167
2168 error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2169 if (error != 0)
2170 return (error);
2171
2172 error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2173
2174 zfs_release_sa_handle(hdl, db, FTAG);
2175 return (error);
2176 }
2177
2178 int
2179 zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
2180 char *buf, int len)
2181 {
2182 char *path = buf + len - 1;
2183 sa_attr_type_t *sa_table;
2184 sa_handle_t *hdl;
2185 dmu_buf_t *db;
2186 int error;
2187
2188 *path = '\0';
2189
2190 error = zfs_sa_setup(osp, &sa_table);
2191 if (error != 0)
2192 return (error);
2193
2194 error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2195 if (error != 0)
2196 return (error);
2197
2198 error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
2199 if (error != 0) {
2200 zfs_release_sa_handle(hdl, db, FTAG);
2201 return (error);
2202 }
2203
2204 error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2205
2206 zfs_release_sa_handle(hdl, db, FTAG);
2207 return (error);
2208 }
2209
2210 #if defined(_KERNEL)
2211 EXPORT_SYMBOL(zfs_create_fs);
2212 EXPORT_SYMBOL(zfs_obj_to_path);
2213
2214 /* CSTYLED */
2215 module_param(zfs_object_mutex_size, uint, 0644);
2216 MODULE_PARM_DESC(zfs_object_mutex_size, "Size of znode hold array");
2217 #endif