]> git.proxmox.com Git - mirror_zfs.git/blob - module/os/freebsd/zfs/zfs_vnops_os.c
aa1d4855e6639ad3a71fc765edb111e1d597660f
[mirror_zfs.git] / module / os / freebsd / zfs / zfs_vnops_os.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2017 Nexenta Systems, Inc.
27 */
28
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
31
32 #include <sys/param.h>
33 #include <sys/time.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/resource.h>
37 #include <security/mac/mac_framework.h>
38 #include <sys/vfs.h>
39 #include <sys/endian.h>
40 #include <sys/vm.h>
41 #include <sys/vnode.h>
42 #if __FreeBSD_version >= 1300102
43 #include <sys/smr.h>
44 #endif
45 #include <sys/dirent.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/kmem.h>
49 #include <sys/taskq.h>
50 #include <sys/uio.h>
51 #include <sys/atomic.h>
52 #include <sys/namei.h>
53 #include <sys/mman.h>
54 #include <sys/cmn_err.h>
55 #include <sys/kdb.h>
56 #include <sys/sysproto.h>
57 #include <sys/errno.h>
58 #include <sys/unistd.h>
59 #include <sys/zfs_dir.h>
60 #include <sys/zfs_ioctl.h>
61 #include <sys/fs/zfs.h>
62 #include <sys/dmu.h>
63 #include <sys/dmu_objset.h>
64 #include <sys/spa.h>
65 #include <sys/txg.h>
66 #include <sys/dbuf.h>
67 #include <sys/zap.h>
68 #include <sys/sa.h>
69 #include <sys/policy.h>
70 #include <sys/sunddi.h>
71 #include <sys/filio.h>
72 #include <sys/sid.h>
73 #include <sys/zfs_ctldir.h>
74 #include <sys/zfs_fuid.h>
75 #include <sys/zfs_quota.h>
76 #include <sys/zfs_sa.h>
77 #include <sys/zfs_rlock.h>
78 #include <sys/bio.h>
79 #include <sys/buf.h>
80 #include <sys/sched.h>
81 #include <sys/acl.h>
82 #include <sys/vmmeter.h>
83 #include <vm/vm_param.h>
84 #include <sys/zil.h>
85 #include <sys/zfs_vnops.h>
86 #include <sys/module.h>
87 #include <sys/sysent.h>
88 #include <sys/dmu_impl.h>
89 #include <sys/brt.h>
90 #include <sys/zfeature.h>
91
92 #include <vm/vm_object.h>
93
94 #include <sys/extattr.h>
95 #include <sys/priv.h>
96
97 #ifndef VN_OPEN_INVFS
98 #define VN_OPEN_INVFS 0x0
99 #endif
100
101 VFS_SMR_DECLARE;
102
103 #if __FreeBSD_version < 1300103
104 #define NDFREE_PNBUF(ndp) NDFREE((ndp), NDF_ONLY_PNBUF)
105 #endif
106
107 #if __FreeBSD_version >= 1300047
108 #define vm_page_wire_lock(pp)
109 #define vm_page_wire_unlock(pp)
110 #else
111 #define vm_page_wire_lock(pp) vm_page_lock(pp)
112 #define vm_page_wire_unlock(pp) vm_page_unlock(pp)
113 #endif
114
115 #ifdef DEBUG_VFS_LOCKS
116 #define VNCHECKREF(vp) \
117 VNASSERT((vp)->v_holdcnt > 0 && (vp)->v_usecount > 0, vp, \
118 ("%s: wrong ref counts", __func__));
119 #else
120 #define VNCHECKREF(vp)
121 #endif
122
123 #if __FreeBSD_version >= 1400045
124 typedef uint64_t cookie_t;
125 #else
126 typedef ulong_t cookie_t;
127 #endif
128
129 /*
130 * Programming rules.
131 *
132 * Each vnode op performs some logical unit of work. To do this, the ZPL must
133 * properly lock its in-core state, create a DMU transaction, do the work,
134 * record this work in the intent log (ZIL), commit the DMU transaction,
135 * and wait for the intent log to commit if it is a synchronous operation.
136 * Moreover, the vnode ops must work in both normal and log replay context.
137 * The ordering of events is important to avoid deadlocks and references
138 * to freed memory. The example below illustrates the following Big Rules:
139 *
140 * (1) A check must be made in each zfs thread for a mounted file system.
141 * This is done avoiding races using zfs_enter(zfsvfs).
142 * A zfs_exit(zfsvfs) is needed before all returns. Any znodes
143 * must be checked with zfs_verify_zp(zp). Both of these macros
144 * can return EIO from the calling function.
145 *
146 * (2) VN_RELE() should always be the last thing except for zil_commit()
147 * (if necessary) and zfs_exit(). This is for 3 reasons:
148 * First, if it's the last reference, the vnode/znode
149 * can be freed, so the zp may point to freed memory. Second, the last
150 * reference will call zfs_zinactive(), which may induce a lot of work --
151 * pushing cached pages (which acquires range locks) and syncing out
152 * cached atime changes. Third, zfs_zinactive() may require a new tx,
153 * which could deadlock the system if you were already holding one.
154 * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
155 *
156 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
157 * as they can span dmu_tx_assign() calls.
158 *
159 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
160 * dmu_tx_assign(). This is critical because we don't want to block
161 * while holding locks.
162 *
163 * If no ZPL locks are held (aside from zfs_enter()), use TXG_WAIT. This
164 * reduces lock contention and CPU usage when we must wait (note that if
165 * throughput is constrained by the storage, nearly every transaction
166 * must wait).
167 *
168 * Note, in particular, that if a lock is sometimes acquired before
169 * the tx assigns, and sometimes after (e.g. z_lock), then failing
170 * to use a non-blocking assign can deadlock the system. The scenario:
171 *
172 * Thread A has grabbed a lock before calling dmu_tx_assign().
173 * Thread B is in an already-assigned tx, and blocks for this lock.
174 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
175 * forever, because the previous txg can't quiesce until B's tx commits.
176 *
177 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
178 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
179 * calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
180 * to indicate that this operation has already called dmu_tx_wait().
181 * This will ensure that we don't retry forever, waiting a short bit
182 * each time.
183 *
184 * (5) If the operation succeeded, generate the intent log entry for it
185 * before dropping locks. This ensures that the ordering of events
186 * in the intent log matches the order in which they actually occurred.
187 * During ZIL replay the zfs_log_* functions will update the sequence
188 * number to indicate the zil transaction has replayed.
189 *
190 * (6) At the end of each vnode op, the DMU tx must always commit,
191 * regardless of whether there were any errors.
192 *
193 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
194 * to ensure that synchronous semantics are provided when necessary.
195 *
196 * In general, this is how things should be ordered in each vnode op:
197 *
198 * zfs_enter(zfsvfs); // exit if unmounted
199 * top:
200 * zfs_dirent_lookup(&dl, ...) // lock directory entry (may VN_HOLD())
201 * rw_enter(...); // grab any other locks you need
202 * tx = dmu_tx_create(...); // get DMU tx
203 * dmu_tx_hold_*(); // hold each object you might modify
204 * error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
205 * if (error) {
206 * rw_exit(...); // drop locks
207 * zfs_dirent_unlock(dl); // unlock directory entry
208 * VN_RELE(...); // release held vnodes
209 * if (error == ERESTART) {
210 * waited = B_TRUE;
211 * dmu_tx_wait(tx);
212 * dmu_tx_abort(tx);
213 * goto top;
214 * }
215 * dmu_tx_abort(tx); // abort DMU tx
216 * zfs_exit(zfsvfs); // finished in zfs
217 * return (error); // really out of space
218 * }
219 * error = do_real_work(); // do whatever this VOP does
220 * if (error == 0)
221 * zfs_log_*(...); // on success, make ZIL entry
222 * dmu_tx_commit(tx); // commit DMU tx -- error or not
223 * rw_exit(...); // drop locks
224 * zfs_dirent_unlock(dl); // unlock directory entry
225 * VN_RELE(...); // release held vnodes
226 * zil_commit(zilog, foid); // synchronous when necessary
227 * zfs_exit(zfsvfs); // finished in zfs
228 * return (error); // done, report error
229 */
230 static int
231 zfs_open(vnode_t **vpp, int flag, cred_t *cr)
232 {
233 (void) cr;
234 znode_t *zp = VTOZ(*vpp);
235 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
236 int error;
237
238 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
239 return (error);
240
241 if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
242 ((flag & FAPPEND) == 0)) {
243 zfs_exit(zfsvfs, FTAG);
244 return (SET_ERROR(EPERM));
245 }
246
247 /*
248 * Keep a count of the synchronous opens in the znode. On first
249 * synchronous open we must convert all previous async transactions
250 * into sync to keep correct ordering.
251 */
252 if (flag & O_SYNC) {
253 if (atomic_inc_32_nv(&zp->z_sync_cnt) == 1)
254 zil_async_to_sync(zfsvfs->z_log, zp->z_id);
255 }
256
257 zfs_exit(zfsvfs, FTAG);
258 return (0);
259 }
260
261 static int
262 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr)
263 {
264 (void) offset, (void) cr;
265 znode_t *zp = VTOZ(vp);
266 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
267 int error;
268
269 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
270 return (error);
271
272 /* Decrement the synchronous opens in the znode */
273 if ((flag & O_SYNC) && (count == 1))
274 atomic_dec_32(&zp->z_sync_cnt);
275
276 zfs_exit(zfsvfs, FTAG);
277 return (0);
278 }
279
280 static int
281 zfs_ioctl(vnode_t *vp, ulong_t com, intptr_t data, int flag, cred_t *cred,
282 int *rvalp)
283 {
284 (void) flag, (void) cred, (void) rvalp;
285 loff_t off;
286 int error;
287
288 switch (com) {
289 case _FIOFFS:
290 {
291 return (0);
292
293 /*
294 * The following two ioctls are used by bfu. Faking out,
295 * necessary to avoid bfu errors.
296 */
297 }
298 case _FIOGDIO:
299 case _FIOSDIO:
300 {
301 return (0);
302 }
303
304 case F_SEEK_DATA:
305 case F_SEEK_HOLE:
306 {
307 off = *(offset_t *)data;
308 /* offset parameter is in/out */
309 error = zfs_holey(VTOZ(vp), com, &off);
310 if (error)
311 return (error);
312 *(offset_t *)data = off;
313 return (0);
314 }
315 }
316 return (SET_ERROR(ENOTTY));
317 }
318
319 static vm_page_t
320 page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
321 {
322 vm_object_t obj;
323 vm_page_t pp;
324 int64_t end;
325
326 /*
327 * At present vm_page_clear_dirty extends the cleared range to DEV_BSIZE
328 * aligned boundaries, if the range is not aligned. As a result a
329 * DEV_BSIZE subrange with partially dirty data may get marked as clean.
330 * It may happen that all DEV_BSIZE subranges are marked clean and thus
331 * the whole page would be considered clean despite have some
332 * dirty data.
333 * For this reason we should shrink the range to DEV_BSIZE aligned
334 * boundaries before calling vm_page_clear_dirty.
335 */
336 end = rounddown2(off + nbytes, DEV_BSIZE);
337 off = roundup2(off, DEV_BSIZE);
338 nbytes = end - off;
339
340 obj = vp->v_object;
341 zfs_vmobject_assert_wlocked_12(obj);
342 #if __FreeBSD_version < 1300050
343 for (;;) {
344 if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
345 pp->valid) {
346 if (vm_page_xbusied(pp)) {
347 /*
348 * Reference the page before unlocking and
349 * sleeping so that the page daemon is less
350 * likely to reclaim it.
351 */
352 vm_page_reference(pp);
353 vm_page_lock(pp);
354 zfs_vmobject_wunlock(obj);
355 vm_page_busy_sleep(pp, "zfsmwb", true);
356 zfs_vmobject_wlock(obj);
357 continue;
358 }
359 vm_page_sbusy(pp);
360 } else if (pp != NULL) {
361 ASSERT(!pp->valid);
362 pp = NULL;
363 }
364 if (pp != NULL) {
365 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
366 vm_object_pip_add(obj, 1);
367 pmap_remove_write(pp);
368 if (nbytes != 0)
369 vm_page_clear_dirty(pp, off, nbytes);
370 }
371 break;
372 }
373 #else
374 vm_page_grab_valid_unlocked(&pp, obj, OFF_TO_IDX(start),
375 VM_ALLOC_NOCREAT | VM_ALLOC_SBUSY | VM_ALLOC_NORMAL |
376 VM_ALLOC_IGN_SBUSY);
377 if (pp != NULL) {
378 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
379 vm_object_pip_add(obj, 1);
380 pmap_remove_write(pp);
381 if (nbytes != 0)
382 vm_page_clear_dirty(pp, off, nbytes);
383 }
384 #endif
385 return (pp);
386 }
387
388 static void
389 page_unbusy(vm_page_t pp)
390 {
391
392 vm_page_sunbusy(pp);
393 #if __FreeBSD_version >= 1300041
394 vm_object_pip_wakeup(pp->object);
395 #else
396 vm_object_pip_subtract(pp->object, 1);
397 #endif
398 }
399
400 #if __FreeBSD_version > 1300051
401 static vm_page_t
402 page_hold(vnode_t *vp, int64_t start)
403 {
404 vm_object_t obj;
405 vm_page_t m;
406
407 obj = vp->v_object;
408 vm_page_grab_valid_unlocked(&m, obj, OFF_TO_IDX(start),
409 VM_ALLOC_NOCREAT | VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY |
410 VM_ALLOC_NOBUSY);
411 return (m);
412 }
413 #else
414 static vm_page_t
415 page_hold(vnode_t *vp, int64_t start)
416 {
417 vm_object_t obj;
418 vm_page_t pp;
419
420 obj = vp->v_object;
421 zfs_vmobject_assert_wlocked(obj);
422
423 for (;;) {
424 if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
425 pp->valid) {
426 if (vm_page_xbusied(pp)) {
427 /*
428 * Reference the page before unlocking and
429 * sleeping so that the page daemon is less
430 * likely to reclaim it.
431 */
432 vm_page_reference(pp);
433 vm_page_lock(pp);
434 zfs_vmobject_wunlock(obj);
435 vm_page_busy_sleep(pp, "zfsmwb", true);
436 zfs_vmobject_wlock(obj);
437 continue;
438 }
439
440 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
441 vm_page_wire_lock(pp);
442 vm_page_hold(pp);
443 vm_page_wire_unlock(pp);
444
445 } else
446 pp = NULL;
447 break;
448 }
449 return (pp);
450 }
451 #endif
452
453 static void
454 page_unhold(vm_page_t pp)
455 {
456
457 vm_page_wire_lock(pp);
458 #if __FreeBSD_version >= 1300035
459 vm_page_unwire(pp, PQ_ACTIVE);
460 #else
461 vm_page_unhold(pp);
462 #endif
463 vm_page_wire_unlock(pp);
464 }
465
466 /*
467 * When a file is memory mapped, we must keep the IO data synchronized
468 * between the DMU cache and the memory mapped pages. What this means:
469 *
470 * On Write: If we find a memory mapped page, we write to *both*
471 * the page and the dmu buffer.
472 */
473 void
474 update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
475 {
476 vm_object_t obj;
477 struct sf_buf *sf;
478 vnode_t *vp = ZTOV(zp);
479 caddr_t va;
480 int off;
481
482 ASSERT3P(vp->v_mount, !=, NULL);
483 obj = vp->v_object;
484 ASSERT3P(obj, !=, NULL);
485
486 off = start & PAGEOFFSET;
487 zfs_vmobject_wlock_12(obj);
488 #if __FreeBSD_version >= 1300041
489 vm_object_pip_add(obj, 1);
490 #endif
491 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
492 vm_page_t pp;
493 int nbytes = imin(PAGESIZE - off, len);
494
495 if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
496 zfs_vmobject_wunlock_12(obj);
497
498 va = zfs_map_page(pp, &sf);
499 (void) dmu_read(os, zp->z_id, start + off, nbytes,
500 va + off, DMU_READ_PREFETCH);
501 zfs_unmap_page(sf);
502
503 zfs_vmobject_wlock_12(obj);
504 page_unbusy(pp);
505 }
506 len -= nbytes;
507 off = 0;
508 }
509 #if __FreeBSD_version >= 1300041
510 vm_object_pip_wakeup(obj);
511 #else
512 vm_object_pip_wakeupn(obj, 0);
513 #endif
514 zfs_vmobject_wunlock_12(obj);
515 }
516
517 /*
518 * Read with UIO_NOCOPY flag means that sendfile(2) requests
519 * ZFS to populate a range of page cache pages with data.
520 *
521 * NOTE: this function could be optimized to pre-allocate
522 * all pages in advance, drain exclusive busy on all of them,
523 * map them into contiguous KVA region and populate them
524 * in one single dmu_read() call.
525 */
526 int
527 mappedread_sf(znode_t *zp, int nbytes, zfs_uio_t *uio)
528 {
529 vnode_t *vp = ZTOV(zp);
530 objset_t *os = zp->z_zfsvfs->z_os;
531 struct sf_buf *sf;
532 vm_object_t obj;
533 vm_page_t pp;
534 int64_t start;
535 caddr_t va;
536 int len = nbytes;
537 int error = 0;
538
539 ASSERT3U(zfs_uio_segflg(uio), ==, UIO_NOCOPY);
540 ASSERT3P(vp->v_mount, !=, NULL);
541 obj = vp->v_object;
542 ASSERT3P(obj, !=, NULL);
543 ASSERT0(zfs_uio_offset(uio) & PAGEOFFSET);
544
545 zfs_vmobject_wlock_12(obj);
546 for (start = zfs_uio_offset(uio); len > 0; start += PAGESIZE) {
547 int bytes = MIN(PAGESIZE, len);
548
549 pp = vm_page_grab_unlocked(obj, OFF_TO_IDX(start),
550 VM_ALLOC_SBUSY | VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
551 if (vm_page_none_valid(pp)) {
552 zfs_vmobject_wunlock_12(obj);
553 va = zfs_map_page(pp, &sf);
554 error = dmu_read(os, zp->z_id, start, bytes, va,
555 DMU_READ_PREFETCH);
556 if (bytes != PAGESIZE && error == 0)
557 memset(va + bytes, 0, PAGESIZE - bytes);
558 zfs_unmap_page(sf);
559 zfs_vmobject_wlock_12(obj);
560 #if __FreeBSD_version >= 1300081
561 if (error == 0) {
562 vm_page_valid(pp);
563 vm_page_activate(pp);
564 vm_page_do_sunbusy(pp);
565 } else {
566 zfs_vmobject_wlock(obj);
567 if (!vm_page_wired(pp) && pp->valid == 0 &&
568 vm_page_busy_tryupgrade(pp))
569 vm_page_free(pp);
570 else
571 vm_page_sunbusy(pp);
572 zfs_vmobject_wunlock(obj);
573 }
574 #else
575 vm_page_do_sunbusy(pp);
576 vm_page_lock(pp);
577 if (error) {
578 if (pp->wire_count == 0 && pp->valid == 0 &&
579 !vm_page_busied(pp))
580 vm_page_free(pp);
581 } else {
582 pp->valid = VM_PAGE_BITS_ALL;
583 vm_page_activate(pp);
584 }
585 vm_page_unlock(pp);
586 #endif
587 } else {
588 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
589 vm_page_do_sunbusy(pp);
590 }
591 if (error)
592 break;
593 zfs_uio_advance(uio, bytes);
594 len -= bytes;
595 }
596 zfs_vmobject_wunlock_12(obj);
597 return (error);
598 }
599
600 /*
601 * When a file is memory mapped, we must keep the IO data synchronized
602 * between the DMU cache and the memory mapped pages. What this means:
603 *
604 * On Read: We "read" preferentially from memory mapped pages,
605 * else we default from the dmu buffer.
606 *
607 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
608 * the file is memory mapped.
609 */
610 int
611 mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
612 {
613 vnode_t *vp = ZTOV(zp);
614 vm_object_t obj;
615 int64_t start;
616 int len = nbytes;
617 int off;
618 int error = 0;
619
620 ASSERT3P(vp->v_mount, !=, NULL);
621 obj = vp->v_object;
622 ASSERT3P(obj, !=, NULL);
623
624 start = zfs_uio_offset(uio);
625 off = start & PAGEOFFSET;
626 zfs_vmobject_wlock_12(obj);
627 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
628 vm_page_t pp;
629 uint64_t bytes = MIN(PAGESIZE - off, len);
630
631 if ((pp = page_hold(vp, start))) {
632 struct sf_buf *sf;
633 caddr_t va;
634
635 zfs_vmobject_wunlock_12(obj);
636 va = zfs_map_page(pp, &sf);
637 error = vn_io_fault_uiomove(va + off, bytes,
638 GET_UIO_STRUCT(uio));
639 zfs_unmap_page(sf);
640 zfs_vmobject_wlock_12(obj);
641 page_unhold(pp);
642 } else {
643 zfs_vmobject_wunlock_12(obj);
644 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
645 uio, bytes);
646 zfs_vmobject_wlock_12(obj);
647 }
648 len -= bytes;
649 off = 0;
650 if (error)
651 break;
652 }
653 zfs_vmobject_wunlock_12(obj);
654 return (error);
655 }
656
657 int
658 zfs_write_simple(znode_t *zp, const void *data, size_t len,
659 loff_t pos, size_t *presid)
660 {
661 int error = 0;
662 ssize_t resid;
663
664 error = vn_rdwr(UIO_WRITE, ZTOV(zp), __DECONST(void *, data), len, pos,
665 UIO_SYSSPACE, IO_SYNC, kcred, NOCRED, &resid, curthread);
666
667 if (error) {
668 return (SET_ERROR(error));
669 } else if (presid == NULL) {
670 if (resid != 0) {
671 error = SET_ERROR(EIO);
672 }
673 } else {
674 *presid = resid;
675 }
676 return (error);
677 }
678
679 void
680 zfs_zrele_async(znode_t *zp)
681 {
682 vnode_t *vp = ZTOV(zp);
683 objset_t *os = ITOZSB(vp)->z_os;
684
685 VN_RELE_ASYNC(vp, dsl_pool_zrele_taskq(dmu_objset_pool(os)));
686 }
687
688 static int
689 zfs_dd_callback(struct mount *mp, void *arg, int lkflags, struct vnode **vpp)
690 {
691 int error;
692
693 *vpp = arg;
694 error = vn_lock(*vpp, lkflags);
695 if (error != 0)
696 vrele(*vpp);
697 return (error);
698 }
699
700 static int
701 zfs_lookup_lock(vnode_t *dvp, vnode_t *vp, const char *name, int lkflags)
702 {
703 znode_t *zdp = VTOZ(dvp);
704 zfsvfs_t *zfsvfs __unused = zdp->z_zfsvfs;
705 int error;
706 int ltype;
707
708 if (zfsvfs->z_replay == B_FALSE)
709 ASSERT_VOP_LOCKED(dvp, __func__);
710
711 if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
712 ASSERT3P(dvp, ==, vp);
713 vref(dvp);
714 ltype = lkflags & LK_TYPE_MASK;
715 if (ltype != VOP_ISLOCKED(dvp)) {
716 if (ltype == LK_EXCLUSIVE)
717 vn_lock(dvp, LK_UPGRADE | LK_RETRY);
718 else /* if (ltype == LK_SHARED) */
719 vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
720
721 /*
722 * Relock for the "." case could leave us with
723 * reclaimed vnode.
724 */
725 if (VN_IS_DOOMED(dvp)) {
726 vrele(dvp);
727 return (SET_ERROR(ENOENT));
728 }
729 }
730 return (0);
731 } else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
732 /*
733 * Note that in this case, dvp is the child vnode, and we
734 * are looking up the parent vnode - exactly reverse from
735 * normal operation. Unlocking dvp requires some rather
736 * tricky unlock/relock dance to prevent mp from being freed;
737 * use vn_vget_ino_gen() which takes care of all that.
738 *
739 * XXX Note that there is a time window when both vnodes are
740 * unlocked. It is possible, although highly unlikely, that
741 * during that window the parent-child relationship between
742 * the vnodes may change, for example, get reversed.
743 * In that case we would have a wrong lock order for the vnodes.
744 * All other filesystems seem to ignore this problem, so we
745 * do the same here.
746 * A potential solution could be implemented as follows:
747 * - using LK_NOWAIT when locking the second vnode and retrying
748 * if necessary
749 * - checking that the parent-child relationship still holds
750 * after locking both vnodes and retrying if it doesn't
751 */
752 error = vn_vget_ino_gen(dvp, zfs_dd_callback, vp, lkflags, &vp);
753 return (error);
754 } else {
755 error = vn_lock(vp, lkflags);
756 if (error != 0)
757 vrele(vp);
758 return (error);
759 }
760 }
761
762 /*
763 * Lookup an entry in a directory, or an extended attribute directory.
764 * If it exists, return a held vnode reference for it.
765 *
766 * IN: dvp - vnode of directory to search.
767 * nm - name of entry to lookup.
768 * pnp - full pathname to lookup [UNUSED].
769 * flags - LOOKUP_XATTR set if looking for an attribute.
770 * rdir - root directory vnode [UNUSED].
771 * cr - credentials of caller.
772 * ct - caller context
773 *
774 * OUT: vpp - vnode of located entry, NULL if not found.
775 *
776 * RETURN: 0 on success, error code on failure.
777 *
778 * Timestamps:
779 * NA
780 */
781 static int
782 zfs_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp,
783 struct componentname *cnp, int nameiop, cred_t *cr, int flags,
784 boolean_t cached)
785 {
786 znode_t *zdp = VTOZ(dvp);
787 znode_t *zp;
788 zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
789 #if __FreeBSD_version > 1300124
790 seqc_t dvp_seqc;
791 #endif
792 int error = 0;
793
794 /*
795 * Fast path lookup, however we must skip DNLC lookup
796 * for case folding or normalizing lookups because the
797 * DNLC code only stores the passed in name. This means
798 * creating 'a' and removing 'A' on a case insensitive
799 * file system would work, but DNLC still thinks 'a'
800 * exists and won't let you create it again on the next
801 * pass through fast path.
802 */
803 if (!(flags & LOOKUP_XATTR)) {
804 if (dvp->v_type != VDIR) {
805 return (SET_ERROR(ENOTDIR));
806 } else if (zdp->z_sa_hdl == NULL) {
807 return (SET_ERROR(EIO));
808 }
809 }
810
811 DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp,
812 const char *, nm);
813
814 if ((error = zfs_enter_verify_zp(zfsvfs, zdp, FTAG)) != 0)
815 return (error);
816
817 #if __FreeBSD_version > 1300124
818 dvp_seqc = vn_seqc_read_notmodify(dvp);
819 #endif
820
821 *vpp = NULL;
822
823 if (flags & LOOKUP_XATTR) {
824 /*
825 * If the xattr property is off, refuse the lookup request.
826 */
827 if (!(zfsvfs->z_flags & ZSB_XATTR)) {
828 zfs_exit(zfsvfs, FTAG);
829 return (SET_ERROR(EOPNOTSUPP));
830 }
831
832 /*
833 * We don't allow recursive attributes..
834 * Maybe someday we will.
835 */
836 if (zdp->z_pflags & ZFS_XATTR) {
837 zfs_exit(zfsvfs, FTAG);
838 return (SET_ERROR(EINVAL));
839 }
840
841 if ((error = zfs_get_xattrdir(VTOZ(dvp), &zp, cr, flags))) {
842 zfs_exit(zfsvfs, FTAG);
843 return (error);
844 }
845 *vpp = ZTOV(zp);
846
847 /*
848 * Do we have permission to get into attribute directory?
849 */
850 error = zfs_zaccess(zp, ACE_EXECUTE, 0, B_FALSE, cr, NULL);
851 if (error) {
852 vrele(ZTOV(zp));
853 }
854
855 zfs_exit(zfsvfs, FTAG);
856 return (error);
857 }
858
859 /*
860 * Check accessibility of directory if we're not coming in via
861 * VOP_CACHEDLOOKUP.
862 */
863 if (!cached) {
864 #ifdef NOEXECCHECK
865 if ((cnp->cn_flags & NOEXECCHECK) != 0) {
866 cnp->cn_flags &= ~NOEXECCHECK;
867 } else
868 #endif
869 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr,
870 NULL))) {
871 zfs_exit(zfsvfs, FTAG);
872 return (error);
873 }
874 }
875
876 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
877 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
878 zfs_exit(zfsvfs, FTAG);
879 return (SET_ERROR(EILSEQ));
880 }
881
882
883 /*
884 * First handle the special cases.
885 */
886 if ((cnp->cn_flags & ISDOTDOT) != 0) {
887 /*
888 * If we are a snapshot mounted under .zfs, return
889 * the vp for the snapshot directory.
890 */
891 if (zdp->z_id == zfsvfs->z_root && zfsvfs->z_parent != zfsvfs) {
892 struct componentname cn;
893 vnode_t *zfsctl_vp;
894 int ltype;
895
896 zfs_exit(zfsvfs, FTAG);
897 ltype = VOP_ISLOCKED(dvp);
898 VOP_UNLOCK1(dvp);
899 error = zfsctl_root(zfsvfs->z_parent, LK_SHARED,
900 &zfsctl_vp);
901 if (error == 0) {
902 cn.cn_nameptr = "snapshot";
903 cn.cn_namelen = strlen(cn.cn_nameptr);
904 cn.cn_nameiop = cnp->cn_nameiop;
905 cn.cn_flags = cnp->cn_flags & ~ISDOTDOT;
906 cn.cn_lkflags = cnp->cn_lkflags;
907 error = VOP_LOOKUP(zfsctl_vp, vpp, &cn);
908 vput(zfsctl_vp);
909 }
910 vn_lock(dvp, ltype | LK_RETRY);
911 return (error);
912 }
913 }
914 if (zfs_has_ctldir(zdp) && strcmp(nm, ZFS_CTLDIR_NAME) == 0) {
915 zfs_exit(zfsvfs, FTAG);
916 if ((cnp->cn_flags & ISLASTCN) != 0 && nameiop != LOOKUP)
917 return (SET_ERROR(ENOTSUP));
918 error = zfsctl_root(zfsvfs, cnp->cn_lkflags, vpp);
919 return (error);
920 }
921
922 /*
923 * The loop is retry the lookup if the parent-child relationship
924 * changes during the dot-dot locking complexities.
925 */
926 for (;;) {
927 uint64_t parent;
928
929 error = zfs_dirlook(zdp, nm, &zp);
930 if (error == 0)
931 *vpp = ZTOV(zp);
932
933 zfs_exit(zfsvfs, FTAG);
934 if (error != 0)
935 break;
936
937 error = zfs_lookup_lock(dvp, *vpp, nm, cnp->cn_lkflags);
938 if (error != 0) {
939 /*
940 * If we've got a locking error, then the vnode
941 * got reclaimed because of a force unmount.
942 * We never enter doomed vnodes into the name cache.
943 */
944 *vpp = NULL;
945 return (error);
946 }
947
948 if ((cnp->cn_flags & ISDOTDOT) == 0)
949 break;
950
951 if ((error = zfs_enter(zfsvfs, FTAG)) != 0) {
952 vput(ZTOV(zp));
953 *vpp = NULL;
954 return (error);
955 }
956 if (zdp->z_sa_hdl == NULL) {
957 error = SET_ERROR(EIO);
958 } else {
959 error = sa_lookup(zdp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
960 &parent, sizeof (parent));
961 }
962 if (error != 0) {
963 zfs_exit(zfsvfs, FTAG);
964 vput(ZTOV(zp));
965 break;
966 }
967 if (zp->z_id == parent) {
968 zfs_exit(zfsvfs, FTAG);
969 break;
970 }
971 vput(ZTOV(zp));
972 }
973
974 if (error != 0)
975 *vpp = NULL;
976
977 /* Translate errors and add SAVENAME when needed. */
978 if (cnp->cn_flags & ISLASTCN) {
979 switch (nameiop) {
980 case CREATE:
981 case RENAME:
982 if (error == ENOENT) {
983 error = EJUSTRETURN;
984 #if __FreeBSD_version < 1400068
985 cnp->cn_flags |= SAVENAME;
986 #endif
987 break;
988 }
989 zfs_fallthrough;
990 case DELETE:
991 #if __FreeBSD_version < 1400068
992 if (error == 0)
993 cnp->cn_flags |= SAVENAME;
994 #endif
995 break;
996 }
997 }
998
999 #if __FreeBSD_version > 1300124
1000 if ((cnp->cn_flags & ISDOTDOT) != 0) {
1001 /*
1002 * FIXME: zfs_lookup_lock relocks vnodes and does nothing to
1003 * handle races. In particular different callers may end up
1004 * with different vnodes and will try to add conflicting
1005 * entries to the namecache.
1006 *
1007 * While finding different result may be acceptable in face
1008 * of concurrent modification, adding conflicting entries
1009 * trips over an assert in the namecache.
1010 *
1011 * Ultimately let an entry through once everything settles.
1012 */
1013 if (!vn_seqc_consistent(dvp, dvp_seqc)) {
1014 cnp->cn_flags &= ~MAKEENTRY;
1015 }
1016 }
1017 #endif
1018
1019 /* Insert name into cache (as non-existent) if appropriate. */
1020 if (zfsvfs->z_use_namecache && !zfsvfs->z_replay &&
1021 error == ENOENT && (cnp->cn_flags & MAKEENTRY) != 0)
1022 cache_enter(dvp, NULL, cnp);
1023
1024 /* Insert name into cache if appropriate. */
1025 if (zfsvfs->z_use_namecache && !zfsvfs->z_replay &&
1026 error == 0 && (cnp->cn_flags & MAKEENTRY)) {
1027 if (!(cnp->cn_flags & ISLASTCN) ||
1028 (nameiop != DELETE && nameiop != RENAME)) {
1029 cache_enter(dvp, *vpp, cnp);
1030 }
1031 }
1032
1033 return (error);
1034 }
1035
1036 /*
1037 * Attempt to create a new entry in a directory. If the entry
1038 * already exists, truncate the file if permissible, else return
1039 * an error. Return the vp of the created or trunc'd file.
1040 *
1041 * IN: dvp - vnode of directory to put new file entry in.
1042 * name - name of new file entry.
1043 * vap - attributes of new file.
1044 * excl - flag indicating exclusive or non-exclusive mode.
1045 * mode - mode to open file with.
1046 * cr - credentials of caller.
1047 * flag - large file flag [UNUSED].
1048 * ct - caller context
1049 * vsecp - ACL to be set
1050 * mnt_ns - Unused on FreeBSD
1051 *
1052 * OUT: vpp - vnode of created or trunc'd entry.
1053 *
1054 * RETURN: 0 on success, error code on failure.
1055 *
1056 * Timestamps:
1057 * dvp - ctime|mtime updated if new entry created
1058 * vp - ctime|mtime always, atime if new
1059 */
1060 int
1061 zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
1062 znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp, zidmap_t *mnt_ns)
1063 {
1064 (void) excl, (void) mode, (void) flag;
1065 znode_t *zp;
1066 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1067 zilog_t *zilog;
1068 objset_t *os;
1069 dmu_tx_t *tx;
1070 int error;
1071 uid_t uid = crgetuid(cr);
1072 gid_t gid = crgetgid(cr);
1073 uint64_t projid = ZFS_DEFAULT_PROJID;
1074 zfs_acl_ids_t acl_ids;
1075 boolean_t fuid_dirtied;
1076 uint64_t txtype;
1077 #ifdef DEBUG_VFS_LOCKS
1078 vnode_t *dvp = ZTOV(dzp);
1079 #endif
1080
1081 /*
1082 * If we have an ephemeral id, ACL, or XVATTR then
1083 * make sure file system is at proper version
1084 */
1085 if (zfsvfs->z_use_fuids == B_FALSE &&
1086 (vsecp || (vap->va_mask & AT_XVATTR) ||
1087 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1088 return (SET_ERROR(EINVAL));
1089
1090 if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
1091 return (error);
1092 os = zfsvfs->z_os;
1093 zilog = zfsvfs->z_log;
1094
1095 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1096 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1097 zfs_exit(zfsvfs, FTAG);
1098 return (SET_ERROR(EILSEQ));
1099 }
1100
1101 if (vap->va_mask & AT_XVATTR) {
1102 if ((error = secpolicy_xvattr(ZTOV(dzp), (xvattr_t *)vap,
1103 crgetuid(cr), cr, vap->va_type)) != 0) {
1104 zfs_exit(zfsvfs, FTAG);
1105 return (error);
1106 }
1107 }
1108
1109 *zpp = NULL;
1110
1111 if ((vap->va_mode & S_ISVTX) && secpolicy_vnode_stky_modify(cr))
1112 vap->va_mode &= ~S_ISVTX;
1113
1114 error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
1115 if (error) {
1116 zfs_exit(zfsvfs, FTAG);
1117 return (error);
1118 }
1119 ASSERT3P(zp, ==, NULL);
1120
1121 /*
1122 * Create a new file object and update the directory
1123 * to reference it.
1124 */
1125 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr, mnt_ns))) {
1126 goto out;
1127 }
1128
1129 /*
1130 * We only support the creation of regular files in
1131 * extended attribute directories.
1132 */
1133
1134 if ((dzp->z_pflags & ZFS_XATTR) &&
1135 (vap->va_type != VREG)) {
1136 error = SET_ERROR(EINVAL);
1137 goto out;
1138 }
1139
1140 if ((error = zfs_acl_ids_create(dzp, 0, vap,
1141 cr, vsecp, &acl_ids, NULL)) != 0)
1142 goto out;
1143
1144 if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
1145 projid = zfs_inherit_projid(dzp);
1146 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
1147 zfs_acl_ids_free(&acl_ids);
1148 error = SET_ERROR(EDQUOT);
1149 goto out;
1150 }
1151
1152 getnewvnode_reserve_();
1153
1154 tx = dmu_tx_create(os);
1155
1156 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1157 ZFS_SA_BASE_ATTR_SIZE);
1158
1159 fuid_dirtied = zfsvfs->z_fuid_dirty;
1160 if (fuid_dirtied)
1161 zfs_fuid_txhold(zfsvfs, tx);
1162 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1163 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1164 if (!zfsvfs->z_use_sa &&
1165 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1166 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1167 0, acl_ids.z_aclp->z_acl_bytes);
1168 }
1169 error = dmu_tx_assign(tx, TXG_WAIT);
1170 if (error) {
1171 zfs_acl_ids_free(&acl_ids);
1172 dmu_tx_abort(tx);
1173 getnewvnode_drop_reserve();
1174 zfs_exit(zfsvfs, FTAG);
1175 return (error);
1176 }
1177 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1178 if (fuid_dirtied)
1179 zfs_fuid_sync(zfsvfs, tx);
1180
1181 (void) zfs_link_create(dzp, name, zp, tx, ZNEW);
1182 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1183 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1184 vsecp, acl_ids.z_fuidp, vap);
1185 zfs_acl_ids_free(&acl_ids);
1186 dmu_tx_commit(tx);
1187
1188 getnewvnode_drop_reserve();
1189
1190 out:
1191 VNCHECKREF(dvp);
1192 if (error == 0) {
1193 *zpp = zp;
1194 }
1195
1196 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1197 zil_commit(zilog, 0);
1198
1199 zfs_exit(zfsvfs, FTAG);
1200 return (error);
1201 }
1202
1203 /*
1204 * Remove an entry from a directory.
1205 *
1206 * IN: dvp - vnode of directory to remove entry from.
1207 * name - name of entry to remove.
1208 * cr - credentials of caller.
1209 * ct - caller context
1210 * flags - case flags
1211 *
1212 * RETURN: 0 on success, error code on failure.
1213 *
1214 * Timestamps:
1215 * dvp - ctime|mtime
1216 * vp - ctime (if nlink > 0)
1217 */
1218 static int
1219 zfs_remove_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
1220 {
1221 znode_t *dzp = VTOZ(dvp);
1222 znode_t *zp;
1223 znode_t *xzp;
1224 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1225 zilog_t *zilog;
1226 uint64_t xattr_obj;
1227 uint64_t obj = 0;
1228 dmu_tx_t *tx;
1229 boolean_t unlinked;
1230 uint64_t txtype;
1231 int error;
1232
1233
1234 if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
1235 return (error);
1236 zp = VTOZ(vp);
1237 if ((error = zfs_verify_zp(zp)) != 0) {
1238 zfs_exit(zfsvfs, FTAG);
1239 return (error);
1240 }
1241 zilog = zfsvfs->z_log;
1242
1243 xattr_obj = 0;
1244 xzp = NULL;
1245
1246 if ((error = zfs_zaccess_delete(dzp, zp, cr, NULL))) {
1247 goto out;
1248 }
1249
1250 /*
1251 * Need to use rmdir for removing directories.
1252 */
1253 if (vp->v_type == VDIR) {
1254 error = SET_ERROR(EPERM);
1255 goto out;
1256 }
1257
1258 vnevent_remove(vp, dvp, name, ct);
1259
1260 obj = zp->z_id;
1261
1262 /* are there any extended attributes? */
1263 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1264 &xattr_obj, sizeof (xattr_obj));
1265 if (error == 0 && xattr_obj) {
1266 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1267 ASSERT0(error);
1268 }
1269
1270 /*
1271 * We may delete the znode now, or we may put it in the unlinked set;
1272 * it depends on whether we're the last link, and on whether there are
1273 * other holds on the vnode. So we dmu_tx_hold() the right things to
1274 * allow for either case.
1275 */
1276 tx = dmu_tx_create(zfsvfs->z_os);
1277 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1278 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1279 zfs_sa_upgrade_txholds(tx, zp);
1280 zfs_sa_upgrade_txholds(tx, dzp);
1281
1282 if (xzp) {
1283 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1284 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1285 }
1286
1287 /* charge as an update -- would be nice not to charge at all */
1288 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1289
1290 /*
1291 * Mark this transaction as typically resulting in a net free of space
1292 */
1293 dmu_tx_mark_netfree(tx);
1294
1295 error = dmu_tx_assign(tx, TXG_WAIT);
1296 if (error) {
1297 dmu_tx_abort(tx);
1298 zfs_exit(zfsvfs, FTAG);
1299 return (error);
1300 }
1301
1302 /*
1303 * Remove the directory entry.
1304 */
1305 error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, &unlinked);
1306
1307 if (error) {
1308 dmu_tx_commit(tx);
1309 goto out;
1310 }
1311
1312 if (unlinked) {
1313 zfs_unlinked_add(zp, tx);
1314 vp->v_vflag |= VV_NOSYNC;
1315 }
1316 /* XXX check changes to linux vnops */
1317 txtype = TX_REMOVE;
1318 zfs_log_remove(zilog, tx, txtype, dzp, name, obj, unlinked);
1319
1320 dmu_tx_commit(tx);
1321 out:
1322
1323 if (xzp)
1324 vrele(ZTOV(xzp));
1325
1326 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1327 zil_commit(zilog, 0);
1328
1329
1330 zfs_exit(zfsvfs, FTAG);
1331 return (error);
1332 }
1333
1334
1335 static int
1336 zfs_lookup_internal(znode_t *dzp, const char *name, vnode_t **vpp,
1337 struct componentname *cnp, int nameiop)
1338 {
1339 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1340 int error;
1341
1342 cnp->cn_nameptr = __DECONST(char *, name);
1343 cnp->cn_namelen = strlen(name);
1344 cnp->cn_nameiop = nameiop;
1345 cnp->cn_flags = ISLASTCN;
1346 #if __FreeBSD_version < 1400068
1347 cnp->cn_flags |= SAVENAME;
1348 #endif
1349 cnp->cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
1350 cnp->cn_cred = kcred;
1351 #if __FreeBSD_version < 1400037
1352 cnp->cn_thread = curthread;
1353 #endif
1354
1355 if (zfsvfs->z_use_namecache && !zfsvfs->z_replay) {
1356 struct vop_lookup_args a;
1357
1358 a.a_gen.a_desc = &vop_lookup_desc;
1359 a.a_dvp = ZTOV(dzp);
1360 a.a_vpp = vpp;
1361 a.a_cnp = cnp;
1362 error = vfs_cache_lookup(&a);
1363 } else {
1364 error = zfs_lookup(ZTOV(dzp), name, vpp, cnp, nameiop, kcred, 0,
1365 B_FALSE);
1366 }
1367 #ifdef ZFS_DEBUG
1368 if (error) {
1369 printf("got error %d on name %s on op %d\n", error, name,
1370 nameiop);
1371 kdb_backtrace();
1372 }
1373 #endif
1374 return (error);
1375 }
1376
1377 int
1378 zfs_remove(znode_t *dzp, const char *name, cred_t *cr, int flags)
1379 {
1380 vnode_t *vp;
1381 int error;
1382 struct componentname cn;
1383
1384 if ((error = zfs_lookup_internal(dzp, name, &vp, &cn, DELETE)))
1385 return (error);
1386
1387 error = zfs_remove_(ZTOV(dzp), vp, name, cr);
1388 vput(vp);
1389 return (error);
1390 }
1391 /*
1392 * Create a new directory and insert it into dvp using the name
1393 * provided. Return a pointer to the inserted directory.
1394 *
1395 * IN: dvp - vnode of directory to add subdir to.
1396 * dirname - name of new directory.
1397 * vap - attributes of new directory.
1398 * cr - credentials of caller.
1399 * ct - caller context
1400 * flags - case flags
1401 * vsecp - ACL to be set
1402 * mnt_ns - Unused on FreeBSD
1403 *
1404 * OUT: vpp - vnode of created directory.
1405 *
1406 * RETURN: 0 on success, error code on failure.
1407 *
1408 * Timestamps:
1409 * dvp - ctime|mtime updated
1410 * vp - ctime|mtime|atime updated
1411 */
1412 int
1413 zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
1414 cred_t *cr, int flags, vsecattr_t *vsecp, zidmap_t *mnt_ns)
1415 {
1416 (void) flags, (void) vsecp;
1417 znode_t *zp;
1418 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1419 zilog_t *zilog;
1420 uint64_t txtype;
1421 dmu_tx_t *tx;
1422 int error;
1423 uid_t uid = crgetuid(cr);
1424 gid_t gid = crgetgid(cr);
1425 zfs_acl_ids_t acl_ids;
1426 boolean_t fuid_dirtied;
1427
1428 ASSERT3U(vap->va_type, ==, VDIR);
1429
1430 /*
1431 * If we have an ephemeral id, ACL, or XVATTR then
1432 * make sure file system is at proper version
1433 */
1434 if (zfsvfs->z_use_fuids == B_FALSE &&
1435 ((vap->va_mask & AT_XVATTR) ||
1436 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1437 return (SET_ERROR(EINVAL));
1438
1439 if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
1440 return (error);
1441 zilog = zfsvfs->z_log;
1442
1443 if (dzp->z_pflags & ZFS_XATTR) {
1444 zfs_exit(zfsvfs, FTAG);
1445 return (SET_ERROR(EINVAL));
1446 }
1447
1448 if (zfsvfs->z_utf8 && u8_validate(dirname,
1449 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1450 zfs_exit(zfsvfs, FTAG);
1451 return (SET_ERROR(EILSEQ));
1452 }
1453
1454 if (vap->va_mask & AT_XVATTR) {
1455 if ((error = secpolicy_xvattr(ZTOV(dzp), (xvattr_t *)vap,
1456 crgetuid(cr), cr, vap->va_type)) != 0) {
1457 zfs_exit(zfsvfs, FTAG);
1458 return (error);
1459 }
1460 }
1461
1462 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1463 NULL, &acl_ids, NULL)) != 0) {
1464 zfs_exit(zfsvfs, FTAG);
1465 return (error);
1466 }
1467
1468 /*
1469 * First make sure the new directory doesn't exist.
1470 *
1471 * Existence is checked first to make sure we don't return
1472 * EACCES instead of EEXIST which can cause some applications
1473 * to fail.
1474 */
1475 *zpp = NULL;
1476
1477 if ((error = zfs_dirent_lookup(dzp, dirname, &zp, ZNEW))) {
1478 zfs_acl_ids_free(&acl_ids);
1479 zfs_exit(zfsvfs, FTAG);
1480 return (error);
1481 }
1482 ASSERT3P(zp, ==, NULL);
1483
1484 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr,
1485 mnt_ns))) {
1486 zfs_acl_ids_free(&acl_ids);
1487 zfs_exit(zfsvfs, FTAG);
1488 return (error);
1489 }
1490
1491 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
1492 zfs_acl_ids_free(&acl_ids);
1493 zfs_exit(zfsvfs, FTAG);
1494 return (SET_ERROR(EDQUOT));
1495 }
1496
1497 /*
1498 * Add a new entry to the directory.
1499 */
1500 getnewvnode_reserve_();
1501 tx = dmu_tx_create(zfsvfs->z_os);
1502 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1503 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1504 fuid_dirtied = zfsvfs->z_fuid_dirty;
1505 if (fuid_dirtied)
1506 zfs_fuid_txhold(zfsvfs, tx);
1507 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1508 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1509 acl_ids.z_aclp->z_acl_bytes);
1510 }
1511
1512 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1513 ZFS_SA_BASE_ATTR_SIZE);
1514
1515 error = dmu_tx_assign(tx, TXG_WAIT);
1516 if (error) {
1517 zfs_acl_ids_free(&acl_ids);
1518 dmu_tx_abort(tx);
1519 getnewvnode_drop_reserve();
1520 zfs_exit(zfsvfs, FTAG);
1521 return (error);
1522 }
1523
1524 /*
1525 * Create new node.
1526 */
1527 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1528
1529 if (fuid_dirtied)
1530 zfs_fuid_sync(zfsvfs, tx);
1531
1532 /*
1533 * Now put new name in parent dir.
1534 */
1535 (void) zfs_link_create(dzp, dirname, zp, tx, ZNEW);
1536
1537 *zpp = zp;
1538
1539 txtype = zfs_log_create_txtype(Z_DIR, NULL, vap);
1540 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, NULL,
1541 acl_ids.z_fuidp, vap);
1542
1543 zfs_acl_ids_free(&acl_ids);
1544
1545 dmu_tx_commit(tx);
1546
1547 getnewvnode_drop_reserve();
1548
1549 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1550 zil_commit(zilog, 0);
1551
1552 zfs_exit(zfsvfs, FTAG);
1553 return (0);
1554 }
1555
1556 #if __FreeBSD_version < 1300124
1557 static void
1558 cache_vop_rmdir(struct vnode *dvp, struct vnode *vp)
1559 {
1560
1561 cache_purge(dvp);
1562 cache_purge(vp);
1563 }
1564 #endif
1565
1566 /*
1567 * Remove a directory subdir entry. If the current working
1568 * directory is the same as the subdir to be removed, the
1569 * remove will fail.
1570 *
1571 * IN: dvp - vnode of directory to remove from.
1572 * name - name of directory to be removed.
1573 * cwd - vnode of current working directory.
1574 * cr - credentials of caller.
1575 * ct - caller context
1576 * flags - case flags
1577 *
1578 * RETURN: 0 on success, error code on failure.
1579 *
1580 * Timestamps:
1581 * dvp - ctime|mtime updated
1582 */
1583 static int
1584 zfs_rmdir_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
1585 {
1586 znode_t *dzp = VTOZ(dvp);
1587 znode_t *zp = VTOZ(vp);
1588 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1589 zilog_t *zilog;
1590 dmu_tx_t *tx;
1591 int error;
1592
1593 if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
1594 return (error);
1595 if ((error = zfs_verify_zp(zp)) != 0) {
1596 zfs_exit(zfsvfs, FTAG);
1597 return (error);
1598 }
1599 zilog = zfsvfs->z_log;
1600
1601
1602 if ((error = zfs_zaccess_delete(dzp, zp, cr, NULL))) {
1603 goto out;
1604 }
1605
1606 if (vp->v_type != VDIR) {
1607 error = SET_ERROR(ENOTDIR);
1608 goto out;
1609 }
1610
1611 vnevent_rmdir(vp, dvp, name, ct);
1612
1613 tx = dmu_tx_create(zfsvfs->z_os);
1614 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1615 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1616 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1617 zfs_sa_upgrade_txholds(tx, zp);
1618 zfs_sa_upgrade_txholds(tx, dzp);
1619 dmu_tx_mark_netfree(tx);
1620 error = dmu_tx_assign(tx, TXG_WAIT);
1621 if (error) {
1622 dmu_tx_abort(tx);
1623 zfs_exit(zfsvfs, FTAG);
1624 return (error);
1625 }
1626
1627 error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, NULL);
1628
1629 if (error == 0) {
1630 uint64_t txtype = TX_RMDIR;
1631 zfs_log_remove(zilog, tx, txtype, dzp, name,
1632 ZFS_NO_OBJECT, B_FALSE);
1633 }
1634
1635 dmu_tx_commit(tx);
1636
1637 if (zfsvfs->z_use_namecache)
1638 cache_vop_rmdir(dvp, vp);
1639 out:
1640 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1641 zil_commit(zilog, 0);
1642
1643 zfs_exit(zfsvfs, FTAG);
1644 return (error);
1645 }
1646
1647 int
1648 zfs_rmdir(znode_t *dzp, const char *name, znode_t *cwd, cred_t *cr, int flags)
1649 {
1650 struct componentname cn;
1651 vnode_t *vp;
1652 int error;
1653
1654 if ((error = zfs_lookup_internal(dzp, name, &vp, &cn, DELETE)))
1655 return (error);
1656
1657 error = zfs_rmdir_(ZTOV(dzp), vp, name, cr);
1658 vput(vp);
1659 return (error);
1660 }
1661
1662 /*
1663 * Read as many directory entries as will fit into the provided
1664 * buffer from the given directory cursor position (specified in
1665 * the uio structure).
1666 *
1667 * IN: vp - vnode of directory to read.
1668 * uio - structure supplying read location, range info,
1669 * and return buffer.
1670 * cr - credentials of caller.
1671 * ct - caller context
1672 *
1673 * OUT: uio - updated offset and range, buffer filled.
1674 * eofp - set to true if end-of-file detected.
1675 * ncookies- number of entries in cookies
1676 * cookies - offsets to directory entries
1677 *
1678 * RETURN: 0 on success, error code on failure.
1679 *
1680 * Timestamps:
1681 * vp - atime updated
1682 *
1683 * Note that the low 4 bits of the cookie returned by zap is always zero.
1684 * This allows us to use the low range for "special" directory entries:
1685 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
1686 * we use the offset 2 for the '.zfs' directory.
1687 */
1688 static int
1689 zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
1690 int *ncookies, cookie_t **cookies)
1691 {
1692 znode_t *zp = VTOZ(vp);
1693 iovec_t *iovp;
1694 dirent64_t *odp;
1695 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1696 objset_t *os;
1697 caddr_t outbuf;
1698 size_t bufsize;
1699 zap_cursor_t zc;
1700 zap_attribute_t zap;
1701 uint_t bytes_wanted;
1702 uint64_t offset; /* must be unsigned; checks for < 1 */
1703 uint64_t parent;
1704 int local_eof;
1705 int outcount;
1706 int error;
1707 uint8_t prefetch;
1708 uint8_t type;
1709 int ncooks;
1710 cookie_t *cooks = NULL;
1711
1712 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
1713 return (error);
1714
1715 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
1716 &parent, sizeof (parent))) != 0) {
1717 zfs_exit(zfsvfs, FTAG);
1718 return (error);
1719 }
1720
1721 /*
1722 * If we are not given an eof variable,
1723 * use a local one.
1724 */
1725 if (eofp == NULL)
1726 eofp = &local_eof;
1727
1728 /*
1729 * Check for valid iov_len.
1730 */
1731 if (GET_UIO_STRUCT(uio)->uio_iov->iov_len <= 0) {
1732 zfs_exit(zfsvfs, FTAG);
1733 return (SET_ERROR(EINVAL));
1734 }
1735
1736 /*
1737 * Quit if directory has been removed (posix)
1738 */
1739 if ((*eofp = zp->z_unlinked) != 0) {
1740 zfs_exit(zfsvfs, FTAG);
1741 return (0);
1742 }
1743
1744 error = 0;
1745 os = zfsvfs->z_os;
1746 offset = zfs_uio_offset(uio);
1747 prefetch = zp->z_zn_prefetch;
1748
1749 /*
1750 * Initialize the iterator cursor.
1751 */
1752 if (offset <= 3) {
1753 /*
1754 * Start iteration from the beginning of the directory.
1755 */
1756 zap_cursor_init(&zc, os, zp->z_id);
1757 } else {
1758 /*
1759 * The offset is a serialized cursor.
1760 */
1761 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
1762 }
1763
1764 /*
1765 * Get space to change directory entries into fs independent format.
1766 */
1767 iovp = GET_UIO_STRUCT(uio)->uio_iov;
1768 bytes_wanted = iovp->iov_len;
1769 if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1) {
1770 bufsize = bytes_wanted;
1771 outbuf = kmem_alloc(bufsize, KM_SLEEP);
1772 odp = (struct dirent64 *)outbuf;
1773 } else {
1774 bufsize = bytes_wanted;
1775 outbuf = NULL;
1776 odp = (struct dirent64 *)iovp->iov_base;
1777 }
1778
1779 if (ncookies != NULL) {
1780 /*
1781 * Minimum entry size is dirent size and 1 byte for a file name.
1782 */
1783 ncooks = zfs_uio_resid(uio) / (sizeof (struct dirent) -
1784 sizeof (((struct dirent *)NULL)->d_name) + 1);
1785 cooks = malloc(ncooks * sizeof (*cooks), M_TEMP, M_WAITOK);
1786 *cookies = cooks;
1787 *ncookies = ncooks;
1788 }
1789
1790 /*
1791 * Transform to file-system independent format
1792 */
1793 outcount = 0;
1794 while (outcount < bytes_wanted) {
1795 ino64_t objnum;
1796 ushort_t reclen;
1797 off64_t *next = NULL;
1798
1799 /*
1800 * Special case `.', `..', and `.zfs'.
1801 */
1802 if (offset == 0) {
1803 (void) strcpy(zap.za_name, ".");
1804 zap.za_normalization_conflict = 0;
1805 objnum = zp->z_id;
1806 type = DT_DIR;
1807 } else if (offset == 1) {
1808 (void) strcpy(zap.za_name, "..");
1809 zap.za_normalization_conflict = 0;
1810 objnum = parent;
1811 type = DT_DIR;
1812 } else if (offset == 2 && zfs_show_ctldir(zp)) {
1813 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
1814 zap.za_normalization_conflict = 0;
1815 objnum = ZFSCTL_INO_ROOT;
1816 type = DT_DIR;
1817 } else {
1818 /*
1819 * Grab next entry.
1820 */
1821 if ((error = zap_cursor_retrieve(&zc, &zap))) {
1822 if ((*eofp = (error == ENOENT)) != 0)
1823 break;
1824 else
1825 goto update;
1826 }
1827
1828 if (zap.za_integer_length != 8 ||
1829 zap.za_num_integers != 1) {
1830 cmn_err(CE_WARN, "zap_readdir: bad directory "
1831 "entry, obj = %lld, offset = %lld\n",
1832 (u_longlong_t)zp->z_id,
1833 (u_longlong_t)offset);
1834 error = SET_ERROR(ENXIO);
1835 goto update;
1836 }
1837
1838 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
1839 /*
1840 * MacOS X can extract the object type here such as:
1841 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
1842 */
1843 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
1844 }
1845
1846 reclen = DIRENT64_RECLEN(strlen(zap.za_name));
1847
1848 /*
1849 * Will this entry fit in the buffer?
1850 */
1851 if (outcount + reclen > bufsize) {
1852 /*
1853 * Did we manage to fit anything in the buffer?
1854 */
1855 if (!outcount) {
1856 error = SET_ERROR(EINVAL);
1857 goto update;
1858 }
1859 break;
1860 }
1861 /*
1862 * Add normal entry:
1863 */
1864 odp->d_ino = objnum;
1865 odp->d_reclen = reclen;
1866 odp->d_namlen = strlen(zap.za_name);
1867 /* NOTE: d_off is the offset for the *next* entry. */
1868 next = &odp->d_off;
1869 strlcpy(odp->d_name, zap.za_name, odp->d_namlen + 1);
1870 odp->d_type = type;
1871 dirent_terminate(odp);
1872 odp = (dirent64_t *)((intptr_t)odp + reclen);
1873
1874 outcount += reclen;
1875
1876 ASSERT3S(outcount, <=, bufsize);
1877
1878 if (prefetch)
1879 dmu_prefetch_dnode(os, objnum, ZIO_PRIORITY_SYNC_READ);
1880
1881 /*
1882 * Move to the next entry, fill in the previous offset.
1883 */
1884 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
1885 zap_cursor_advance(&zc);
1886 offset = zap_cursor_serialize(&zc);
1887 } else {
1888 offset += 1;
1889 }
1890
1891 /* Fill the offset right after advancing the cursor. */
1892 if (next != NULL)
1893 *next = offset;
1894 if (cooks != NULL) {
1895 *cooks++ = offset;
1896 ncooks--;
1897 KASSERT(ncooks >= 0, ("ncookies=%d", ncooks));
1898 }
1899 }
1900 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
1901
1902 /* Subtract unused cookies */
1903 if (ncookies != NULL)
1904 *ncookies -= ncooks;
1905
1906 if (zfs_uio_segflg(uio) == UIO_SYSSPACE && zfs_uio_iovcnt(uio) == 1) {
1907 iovp->iov_base += outcount;
1908 iovp->iov_len -= outcount;
1909 zfs_uio_resid(uio) -= outcount;
1910 } else if ((error =
1911 zfs_uiomove(outbuf, (long)outcount, UIO_READ, uio))) {
1912 /*
1913 * Reset the pointer.
1914 */
1915 offset = zfs_uio_offset(uio);
1916 }
1917
1918 update:
1919 zap_cursor_fini(&zc);
1920 if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1)
1921 kmem_free(outbuf, bufsize);
1922
1923 if (error == ENOENT)
1924 error = 0;
1925
1926 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
1927
1928 zfs_uio_setoffset(uio, offset);
1929 zfs_exit(zfsvfs, FTAG);
1930 if (error != 0 && cookies != NULL) {
1931 free(*cookies, M_TEMP);
1932 *cookies = NULL;
1933 *ncookies = 0;
1934 }
1935 return (error);
1936 }
1937
1938 /*
1939 * Get the requested file attributes and place them in the provided
1940 * vattr structure.
1941 *
1942 * IN: vp - vnode of file.
1943 * vap - va_mask identifies requested attributes.
1944 * If AT_XVATTR set, then optional attrs are requested
1945 * flags - ATTR_NOACLCHECK (CIFS server context)
1946 * cr - credentials of caller.
1947 *
1948 * OUT: vap - attribute values.
1949 *
1950 * RETURN: 0 (always succeeds).
1951 */
1952 static int
1953 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr)
1954 {
1955 znode_t *zp = VTOZ(vp);
1956 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1957 int error = 0;
1958 uint32_t blksize;
1959 u_longlong_t nblocks;
1960 uint64_t mtime[2], ctime[2], crtime[2], rdev;
1961 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
1962 xoptattr_t *xoap = NULL;
1963 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
1964 sa_bulk_attr_t bulk[4];
1965 int count = 0;
1966
1967 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
1968 return (error);
1969
1970 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
1971
1972 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
1973 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
1974 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
1975 if (vp->v_type == VBLK || vp->v_type == VCHR)
1976 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
1977 &rdev, 8);
1978
1979 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
1980 zfs_exit(zfsvfs, FTAG);
1981 return (error);
1982 }
1983
1984 /*
1985 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
1986 * Also, if we are the owner don't bother, since owner should
1987 * always be allowed to read basic attributes of file.
1988 */
1989 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
1990 (vap->va_uid != crgetuid(cr))) {
1991 if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
1992 skipaclchk, cr, NULL))) {
1993 zfs_exit(zfsvfs, FTAG);
1994 return (error);
1995 }
1996 }
1997
1998 /*
1999 * Return all attributes. It's cheaper to provide the answer
2000 * than to determine whether we were asked the question.
2001 */
2002
2003 vap->va_type = IFTOVT(zp->z_mode);
2004 vap->va_mode = zp->z_mode & ~S_IFMT;
2005 vn_fsid(vp, vap);
2006 vap->va_nodeid = zp->z_id;
2007 vap->va_nlink = zp->z_links;
2008 if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp) &&
2009 zp->z_links < ZFS_LINK_MAX)
2010 vap->va_nlink++;
2011 vap->va_size = zp->z_size;
2012 if (vp->v_type == VBLK || vp->v_type == VCHR)
2013 vap->va_rdev = zfs_cmpldev(rdev);
2014 vap->va_gen = zp->z_gen;
2015 vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
2016 vap->va_filerev = zp->z_seq;
2017
2018 /*
2019 * Add in any requested optional attributes and the create time.
2020 * Also set the corresponding bits in the returned attribute bitmap.
2021 */
2022 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2023 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2024 xoap->xoa_archive =
2025 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2026 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2027 }
2028
2029 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2030 xoap->xoa_readonly =
2031 ((zp->z_pflags & ZFS_READONLY) != 0);
2032 XVA_SET_RTN(xvap, XAT_READONLY);
2033 }
2034
2035 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2036 xoap->xoa_system =
2037 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2038 XVA_SET_RTN(xvap, XAT_SYSTEM);
2039 }
2040
2041 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2042 xoap->xoa_hidden =
2043 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2044 XVA_SET_RTN(xvap, XAT_HIDDEN);
2045 }
2046
2047 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2048 xoap->xoa_nounlink =
2049 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2050 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2051 }
2052
2053 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2054 xoap->xoa_immutable =
2055 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2056 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2057 }
2058
2059 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2060 xoap->xoa_appendonly =
2061 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2062 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2063 }
2064
2065 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2066 xoap->xoa_nodump =
2067 ((zp->z_pflags & ZFS_NODUMP) != 0);
2068 XVA_SET_RTN(xvap, XAT_NODUMP);
2069 }
2070
2071 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2072 xoap->xoa_opaque =
2073 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2074 XVA_SET_RTN(xvap, XAT_OPAQUE);
2075 }
2076
2077 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2078 xoap->xoa_av_quarantined =
2079 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2080 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2081 }
2082
2083 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2084 xoap->xoa_av_modified =
2085 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2086 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2087 }
2088
2089 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2090 vp->v_type == VREG) {
2091 zfs_sa_get_scanstamp(zp, xvap);
2092 }
2093
2094 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2095 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2096 XVA_SET_RTN(xvap, XAT_REPARSE);
2097 }
2098 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2099 xoap->xoa_generation = zp->z_gen;
2100 XVA_SET_RTN(xvap, XAT_GEN);
2101 }
2102
2103 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2104 xoap->xoa_offline =
2105 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2106 XVA_SET_RTN(xvap, XAT_OFFLINE);
2107 }
2108
2109 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2110 xoap->xoa_sparse =
2111 ((zp->z_pflags & ZFS_SPARSE) != 0);
2112 XVA_SET_RTN(xvap, XAT_SPARSE);
2113 }
2114
2115 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
2116 xoap->xoa_projinherit =
2117 ((zp->z_pflags & ZFS_PROJINHERIT) != 0);
2118 XVA_SET_RTN(xvap, XAT_PROJINHERIT);
2119 }
2120
2121 if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
2122 xoap->xoa_projid = zp->z_projid;
2123 XVA_SET_RTN(xvap, XAT_PROJID);
2124 }
2125 }
2126
2127 ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
2128 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2129 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2130 ZFS_TIME_DECODE(&vap->va_birthtime, crtime);
2131
2132
2133 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
2134 vap->va_blksize = blksize;
2135 vap->va_bytes = nblocks << 9; /* nblocks * 512 */
2136
2137 if (zp->z_blksz == 0) {
2138 /*
2139 * Block size hasn't been set; suggest maximal I/O transfers.
2140 */
2141 vap->va_blksize = zfsvfs->z_max_blksz;
2142 }
2143
2144 zfs_exit(zfsvfs, FTAG);
2145 return (0);
2146 }
2147
2148 /*
2149 * Set the file attributes to the values contained in the
2150 * vattr structure.
2151 *
2152 * IN: zp - znode of file to be modified.
2153 * vap - new attribute values.
2154 * If AT_XVATTR set, then optional attrs are being set
2155 * flags - ATTR_UTIME set if non-default time values provided.
2156 * - ATTR_NOACLCHECK (CIFS context only).
2157 * cr - credentials of caller.
2158 * mnt_ns - Unused on FreeBSD
2159 *
2160 * RETURN: 0 on success, error code on failure.
2161 *
2162 * Timestamps:
2163 * vp - ctime updated, mtime updated if size changed.
2164 */
2165 int
2166 zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr, zidmap_t *mnt_ns)
2167 {
2168 vnode_t *vp = ZTOV(zp);
2169 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2170 objset_t *os;
2171 zilog_t *zilog;
2172 dmu_tx_t *tx;
2173 vattr_t oldva;
2174 xvattr_t tmpxvattr;
2175 uint_t mask = vap->va_mask;
2176 uint_t saved_mask = 0;
2177 uint64_t saved_mode;
2178 int trim_mask = 0;
2179 uint64_t new_mode;
2180 uint64_t new_uid, new_gid;
2181 uint64_t xattr_obj;
2182 uint64_t mtime[2], ctime[2];
2183 uint64_t projid = ZFS_INVALID_PROJID;
2184 znode_t *attrzp;
2185 int need_policy = FALSE;
2186 int err, err2;
2187 zfs_fuid_info_t *fuidp = NULL;
2188 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2189 xoptattr_t *xoap;
2190 zfs_acl_t *aclp;
2191 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2192 boolean_t fuid_dirtied = B_FALSE;
2193 sa_bulk_attr_t bulk[7], xattr_bulk[7];
2194 int count = 0, xattr_count = 0;
2195
2196 if (mask == 0)
2197 return (0);
2198
2199 if (mask & AT_NOSET)
2200 return (SET_ERROR(EINVAL));
2201
2202 if ((err = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
2203 return (err);
2204
2205 os = zfsvfs->z_os;
2206 zilog = zfsvfs->z_log;
2207
2208 /*
2209 * Make sure that if we have ephemeral uid/gid or xvattr specified
2210 * that file system is at proper version level
2211 */
2212
2213 if (zfsvfs->z_use_fuids == B_FALSE &&
2214 (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2215 ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2216 (mask & AT_XVATTR))) {
2217 zfs_exit(zfsvfs, FTAG);
2218 return (SET_ERROR(EINVAL));
2219 }
2220
2221 if (mask & AT_SIZE && vp->v_type == VDIR) {
2222 zfs_exit(zfsvfs, FTAG);
2223 return (SET_ERROR(EISDIR));
2224 }
2225
2226 if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
2227 zfs_exit(zfsvfs, FTAG);
2228 return (SET_ERROR(EINVAL));
2229 }
2230
2231 /*
2232 * If this is an xvattr_t, then get a pointer to the structure of
2233 * optional attributes. If this is NULL, then we have a vattr_t.
2234 */
2235 xoap = xva_getxoptattr(xvap);
2236
2237 xva_init(&tmpxvattr);
2238
2239 /*
2240 * Immutable files can only alter immutable bit and atime
2241 */
2242 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2243 ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
2244 ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2245 zfs_exit(zfsvfs, FTAG);
2246 return (SET_ERROR(EPERM));
2247 }
2248
2249 /*
2250 * Note: ZFS_READONLY is handled in zfs_zaccess_common.
2251 */
2252
2253 /*
2254 * Verify timestamps doesn't overflow 32 bits.
2255 * ZFS can handle large timestamps, but 32bit syscalls can't
2256 * handle times greater than 2039. This check should be removed
2257 * once large timestamps are fully supported.
2258 */
2259 if (mask & (AT_ATIME | AT_MTIME)) {
2260 if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2261 ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2262 zfs_exit(zfsvfs, FTAG);
2263 return (SET_ERROR(EOVERFLOW));
2264 }
2265 }
2266 if (xoap != NULL && (mask & AT_XVATTR)) {
2267 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME) &&
2268 TIMESPEC_OVERFLOW(&vap->va_birthtime)) {
2269 zfs_exit(zfsvfs, FTAG);
2270 return (SET_ERROR(EOVERFLOW));
2271 }
2272
2273 if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
2274 if (!dmu_objset_projectquota_enabled(os) ||
2275 (!S_ISREG(zp->z_mode) && !S_ISDIR(zp->z_mode))) {
2276 zfs_exit(zfsvfs, FTAG);
2277 return (SET_ERROR(EOPNOTSUPP));
2278 }
2279
2280 projid = xoap->xoa_projid;
2281 if (unlikely(projid == ZFS_INVALID_PROJID)) {
2282 zfs_exit(zfsvfs, FTAG);
2283 return (SET_ERROR(EINVAL));
2284 }
2285
2286 if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
2287 projid = ZFS_INVALID_PROJID;
2288 else
2289 need_policy = TRUE;
2290 }
2291
2292 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
2293 (xoap->xoa_projinherit !=
2294 ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
2295 (!dmu_objset_projectquota_enabled(os) ||
2296 (!S_ISREG(zp->z_mode) && !S_ISDIR(zp->z_mode)))) {
2297 zfs_exit(zfsvfs, FTAG);
2298 return (SET_ERROR(EOPNOTSUPP));
2299 }
2300 }
2301
2302 attrzp = NULL;
2303 aclp = NULL;
2304
2305 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
2306 zfs_exit(zfsvfs, FTAG);
2307 return (SET_ERROR(EROFS));
2308 }
2309
2310 /*
2311 * First validate permissions
2312 */
2313
2314 if (mask & AT_SIZE) {
2315 /*
2316 * XXX - Note, we are not providing any open
2317 * mode flags here (like FNDELAY), so we may
2318 * block if there are locks present... this
2319 * should be addressed in openat().
2320 */
2321 /* XXX - would it be OK to generate a log record here? */
2322 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2323 if (err) {
2324 zfs_exit(zfsvfs, FTAG);
2325 return (err);
2326 }
2327 }
2328
2329 if (mask & (AT_ATIME|AT_MTIME) ||
2330 ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2331 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2332 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2333 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2334 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2335 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2336 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2337 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2338 skipaclchk, cr, mnt_ns);
2339 }
2340
2341 if (mask & (AT_UID|AT_GID)) {
2342 int idmask = (mask & (AT_UID|AT_GID));
2343 int take_owner;
2344 int take_group;
2345
2346 /*
2347 * NOTE: even if a new mode is being set,
2348 * we may clear S_ISUID/S_ISGID bits.
2349 */
2350
2351 if (!(mask & AT_MODE))
2352 vap->va_mode = zp->z_mode;
2353
2354 /*
2355 * Take ownership or chgrp to group we are a member of
2356 */
2357
2358 take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
2359 take_group = (mask & AT_GID) &&
2360 zfs_groupmember(zfsvfs, vap->va_gid, cr);
2361
2362 /*
2363 * If both AT_UID and AT_GID are set then take_owner and
2364 * take_group must both be set in order to allow taking
2365 * ownership.
2366 *
2367 * Otherwise, send the check through secpolicy_vnode_setattr()
2368 *
2369 */
2370
2371 if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
2372 ((idmask == AT_UID) && take_owner) ||
2373 ((idmask == AT_GID) && take_group)) {
2374 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2375 skipaclchk, cr, mnt_ns) == 0) {
2376 /*
2377 * Remove setuid/setgid for non-privileged users
2378 */
2379 secpolicy_setid_clear(vap, vp, cr);
2380 trim_mask = (mask & (AT_UID|AT_GID));
2381 } else {
2382 need_policy = TRUE;
2383 }
2384 } else {
2385 need_policy = TRUE;
2386 }
2387 }
2388
2389 oldva.va_mode = zp->z_mode;
2390 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2391 if (mask & AT_XVATTR) {
2392 /*
2393 * Update xvattr mask to include only those attributes
2394 * that are actually changing.
2395 *
2396 * the bits will be restored prior to actually setting
2397 * the attributes so the caller thinks they were set.
2398 */
2399 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2400 if (xoap->xoa_appendonly !=
2401 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2402 need_policy = TRUE;
2403 } else {
2404 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2405 XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
2406 }
2407 }
2408
2409 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
2410 if (xoap->xoa_projinherit !=
2411 ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
2412 need_policy = TRUE;
2413 } else {
2414 XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
2415 XVA_SET_REQ(&tmpxvattr, XAT_PROJINHERIT);
2416 }
2417 }
2418
2419 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2420 if (xoap->xoa_nounlink !=
2421 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2422 need_policy = TRUE;
2423 } else {
2424 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2425 XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
2426 }
2427 }
2428
2429 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2430 if (xoap->xoa_immutable !=
2431 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2432 need_policy = TRUE;
2433 } else {
2434 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2435 XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
2436 }
2437 }
2438
2439 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2440 if (xoap->xoa_nodump !=
2441 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2442 need_policy = TRUE;
2443 } else {
2444 XVA_CLR_REQ(xvap, XAT_NODUMP);
2445 XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
2446 }
2447 }
2448
2449 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2450 if (xoap->xoa_av_modified !=
2451 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2452 need_policy = TRUE;
2453 } else {
2454 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2455 XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
2456 }
2457 }
2458
2459 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2460 if ((vp->v_type != VREG &&
2461 xoap->xoa_av_quarantined) ||
2462 xoap->xoa_av_quarantined !=
2463 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2464 need_policy = TRUE;
2465 } else {
2466 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2467 XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
2468 }
2469 }
2470
2471 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2472 zfs_exit(zfsvfs, FTAG);
2473 return (SET_ERROR(EPERM));
2474 }
2475
2476 if (need_policy == FALSE &&
2477 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2478 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2479 need_policy = TRUE;
2480 }
2481 }
2482
2483 if (mask & AT_MODE) {
2484 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr,
2485 mnt_ns) == 0) {
2486 err = secpolicy_setid_setsticky_clear(vp, vap,
2487 &oldva, cr);
2488 if (err) {
2489 zfs_exit(zfsvfs, FTAG);
2490 return (err);
2491 }
2492 trim_mask |= AT_MODE;
2493 } else {
2494 need_policy = TRUE;
2495 }
2496 }
2497
2498 if (need_policy) {
2499 /*
2500 * If trim_mask is set then take ownership
2501 * has been granted or write_acl is present and user
2502 * has the ability to modify mode. In that case remove
2503 * UID|GID and or MODE from mask so that
2504 * secpolicy_vnode_setattr() doesn't revoke it.
2505 */
2506
2507 if (trim_mask) {
2508 saved_mask = vap->va_mask;
2509 vap->va_mask &= ~trim_mask;
2510 if (trim_mask & AT_MODE) {
2511 /*
2512 * Save the mode, as secpolicy_vnode_setattr()
2513 * will overwrite it with ova.va_mode.
2514 */
2515 saved_mode = vap->va_mode;
2516 }
2517 }
2518 err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
2519 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
2520 if (err) {
2521 zfs_exit(zfsvfs, FTAG);
2522 return (err);
2523 }
2524
2525 if (trim_mask) {
2526 vap->va_mask |= saved_mask;
2527 if (trim_mask & AT_MODE) {
2528 /*
2529 * Recover the mode after
2530 * secpolicy_vnode_setattr().
2531 */
2532 vap->va_mode = saved_mode;
2533 }
2534 }
2535 }
2536
2537 /*
2538 * secpolicy_vnode_setattr, or take ownership may have
2539 * changed va_mask
2540 */
2541 mask = vap->va_mask;
2542
2543 if ((mask & (AT_UID | AT_GID)) || projid != ZFS_INVALID_PROJID) {
2544 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
2545 &xattr_obj, sizeof (xattr_obj));
2546
2547 if (err == 0 && xattr_obj) {
2548 err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
2549 if (err == 0) {
2550 err = vn_lock(ZTOV(attrzp), LK_EXCLUSIVE);
2551 if (err != 0)
2552 vrele(ZTOV(attrzp));
2553 }
2554 if (err)
2555 goto out2;
2556 }
2557 if (mask & AT_UID) {
2558 new_uid = zfs_fuid_create(zfsvfs,
2559 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
2560 if (new_uid != zp->z_uid &&
2561 zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
2562 new_uid)) {
2563 if (attrzp)
2564 vput(ZTOV(attrzp));
2565 err = SET_ERROR(EDQUOT);
2566 goto out2;
2567 }
2568 }
2569
2570 if (mask & AT_GID) {
2571 new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
2572 cr, ZFS_GROUP, &fuidp);
2573 if (new_gid != zp->z_gid &&
2574 zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
2575 new_gid)) {
2576 if (attrzp)
2577 vput(ZTOV(attrzp));
2578 err = SET_ERROR(EDQUOT);
2579 goto out2;
2580 }
2581 }
2582
2583 if (projid != ZFS_INVALID_PROJID &&
2584 zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
2585 if (attrzp)
2586 vput(ZTOV(attrzp));
2587 err = SET_ERROR(EDQUOT);
2588 goto out2;
2589 }
2590 }
2591 tx = dmu_tx_create(os);
2592
2593 if (mask & AT_MODE) {
2594 uint64_t pmode = zp->z_mode;
2595 uint64_t acl_obj;
2596 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2597
2598 if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
2599 !(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
2600 err = SET_ERROR(EPERM);
2601 goto out;
2602 }
2603
2604 if ((err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)))
2605 goto out;
2606
2607 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
2608 /*
2609 * Are we upgrading ACL from old V0 format
2610 * to V1 format?
2611 */
2612 if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
2613 zfs_znode_acl_version(zp) ==
2614 ZFS_ACL_VERSION_INITIAL) {
2615 dmu_tx_hold_free(tx, acl_obj, 0,
2616 DMU_OBJECT_END);
2617 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2618 0, aclp->z_acl_bytes);
2619 } else {
2620 dmu_tx_hold_write(tx, acl_obj, 0,
2621 aclp->z_acl_bytes);
2622 }
2623 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2624 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2625 0, aclp->z_acl_bytes);
2626 }
2627 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2628 } else {
2629 if (((mask & AT_XVATTR) &&
2630 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
2631 (projid != ZFS_INVALID_PROJID &&
2632 !(zp->z_pflags & ZFS_PROJID)))
2633 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2634 else
2635 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2636 }
2637
2638 if (attrzp) {
2639 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
2640 }
2641
2642 fuid_dirtied = zfsvfs->z_fuid_dirty;
2643 if (fuid_dirtied)
2644 zfs_fuid_txhold(zfsvfs, tx);
2645
2646 zfs_sa_upgrade_txholds(tx, zp);
2647
2648 err = dmu_tx_assign(tx, TXG_WAIT);
2649 if (err)
2650 goto out;
2651
2652 count = 0;
2653 /*
2654 * Set each attribute requested.
2655 * We group settings according to the locks they need to acquire.
2656 *
2657 * Note: you cannot set ctime directly, although it will be
2658 * updated as a side-effect of calling this function.
2659 */
2660
2661 if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
2662 /*
2663 * For the existed object that is upgraded from old system,
2664 * its on-disk layout has no slot for the project ID attribute.
2665 * But quota accounting logic needs to access related slots by
2666 * offset directly. So we need to adjust old objects' layout
2667 * to make the project ID to some unified and fixed offset.
2668 */
2669 if (attrzp)
2670 err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
2671 if (err == 0)
2672 err = sa_add_projid(zp->z_sa_hdl, tx, projid);
2673
2674 if (unlikely(err == EEXIST))
2675 err = 0;
2676 else if (err != 0)
2677 goto out;
2678 else
2679 projid = ZFS_INVALID_PROJID;
2680 }
2681
2682 if (mask & (AT_UID|AT_GID|AT_MODE))
2683 mutex_enter(&zp->z_acl_lock);
2684
2685 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
2686 &zp->z_pflags, sizeof (zp->z_pflags));
2687
2688 if (attrzp) {
2689 if (mask & (AT_UID|AT_GID|AT_MODE))
2690 mutex_enter(&attrzp->z_acl_lock);
2691 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2692 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
2693 sizeof (attrzp->z_pflags));
2694 if (projid != ZFS_INVALID_PROJID) {
2695 attrzp->z_projid = projid;
2696 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2697 SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
2698 sizeof (attrzp->z_projid));
2699 }
2700 }
2701
2702 if (mask & (AT_UID|AT_GID)) {
2703
2704 if (mask & AT_UID) {
2705 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
2706 &new_uid, sizeof (new_uid));
2707 zp->z_uid = new_uid;
2708 if (attrzp) {
2709 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2710 SA_ZPL_UID(zfsvfs), NULL, &new_uid,
2711 sizeof (new_uid));
2712 attrzp->z_uid = new_uid;
2713 }
2714 }
2715
2716 if (mask & AT_GID) {
2717 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
2718 NULL, &new_gid, sizeof (new_gid));
2719 zp->z_gid = new_gid;
2720 if (attrzp) {
2721 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2722 SA_ZPL_GID(zfsvfs), NULL, &new_gid,
2723 sizeof (new_gid));
2724 attrzp->z_gid = new_gid;
2725 }
2726 }
2727 if (!(mask & AT_MODE)) {
2728 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
2729 NULL, &new_mode, sizeof (new_mode));
2730 new_mode = zp->z_mode;
2731 }
2732 err = zfs_acl_chown_setattr(zp);
2733 ASSERT0(err);
2734 if (attrzp) {
2735 vn_seqc_write_begin(ZTOV(attrzp));
2736 err = zfs_acl_chown_setattr(attrzp);
2737 vn_seqc_write_end(ZTOV(attrzp));
2738 ASSERT0(err);
2739 }
2740 }
2741
2742 if (mask & AT_MODE) {
2743 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
2744 &new_mode, sizeof (new_mode));
2745 zp->z_mode = new_mode;
2746 ASSERT3P(aclp, !=, NULL);
2747 err = zfs_aclset_common(zp, aclp, cr, tx);
2748 ASSERT0(err);
2749 if (zp->z_acl_cached)
2750 zfs_acl_free(zp->z_acl_cached);
2751 zp->z_acl_cached = aclp;
2752 aclp = NULL;
2753 }
2754
2755
2756 if (mask & AT_ATIME) {
2757 ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
2758 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
2759 &zp->z_atime, sizeof (zp->z_atime));
2760 }
2761
2762 if (mask & AT_MTIME) {
2763 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
2764 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
2765 mtime, sizeof (mtime));
2766 }
2767
2768 if (projid != ZFS_INVALID_PROJID) {
2769 zp->z_projid = projid;
2770 SA_ADD_BULK_ATTR(bulk, count,
2771 SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
2772 sizeof (zp->z_projid));
2773 }
2774
2775 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
2776 if (mask & AT_SIZE && !(mask & AT_MTIME)) {
2777 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
2778 NULL, mtime, sizeof (mtime));
2779 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
2780 &ctime, sizeof (ctime));
2781 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
2782 } else if (mask != 0) {
2783 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
2784 &ctime, sizeof (ctime));
2785 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime);
2786 if (attrzp) {
2787 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2788 SA_ZPL_CTIME(zfsvfs), NULL,
2789 &ctime, sizeof (ctime));
2790 zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
2791 mtime, ctime);
2792 }
2793 }
2794
2795 /*
2796 * Do this after setting timestamps to prevent timestamp
2797 * update from toggling bit
2798 */
2799
2800 if (xoap && (mask & AT_XVATTR)) {
2801
2802 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
2803 xoap->xoa_createtime = vap->va_birthtime;
2804 /*
2805 * restore trimmed off masks
2806 * so that return masks can be set for caller.
2807 */
2808
2809 if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
2810 XVA_SET_REQ(xvap, XAT_APPENDONLY);
2811 }
2812 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
2813 XVA_SET_REQ(xvap, XAT_NOUNLINK);
2814 }
2815 if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
2816 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
2817 }
2818 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
2819 XVA_SET_REQ(xvap, XAT_NODUMP);
2820 }
2821 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
2822 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
2823 }
2824 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
2825 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
2826 }
2827 if (XVA_ISSET_REQ(&tmpxvattr, XAT_PROJINHERIT)) {
2828 XVA_SET_REQ(xvap, XAT_PROJINHERIT);
2829 }
2830
2831 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
2832 ASSERT3S(vp->v_type, ==, VREG);
2833
2834 zfs_xvattr_set(zp, xvap, tx);
2835 }
2836
2837 if (fuid_dirtied)
2838 zfs_fuid_sync(zfsvfs, tx);
2839
2840 if (mask != 0)
2841 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
2842
2843 if (mask & (AT_UID|AT_GID|AT_MODE))
2844 mutex_exit(&zp->z_acl_lock);
2845
2846 if (attrzp) {
2847 if (mask & (AT_UID|AT_GID|AT_MODE))
2848 mutex_exit(&attrzp->z_acl_lock);
2849 }
2850 out:
2851 if (err == 0 && attrzp) {
2852 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
2853 xattr_count, tx);
2854 ASSERT0(err2);
2855 }
2856
2857 if (attrzp)
2858 vput(ZTOV(attrzp));
2859
2860 if (aclp)
2861 zfs_acl_free(aclp);
2862
2863 if (fuidp) {
2864 zfs_fuid_info_free(fuidp);
2865 fuidp = NULL;
2866 }
2867
2868 if (err) {
2869 dmu_tx_abort(tx);
2870 } else {
2871 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
2872 dmu_tx_commit(tx);
2873 }
2874
2875 out2:
2876 if (os->os_sync == ZFS_SYNC_ALWAYS)
2877 zil_commit(zilog, 0);
2878
2879 zfs_exit(zfsvfs, FTAG);
2880 return (err);
2881 }
2882
2883 /*
2884 * Look up the directory entries corresponding to the source and target
2885 * directory/name pairs.
2886 */
2887 static int
2888 zfs_rename_relock_lookup(znode_t *sdzp, const struct componentname *scnp,
2889 znode_t **szpp, znode_t *tdzp, const struct componentname *tcnp,
2890 znode_t **tzpp)
2891 {
2892 zfsvfs_t *zfsvfs;
2893 znode_t *szp, *tzp;
2894 int error;
2895
2896 /*
2897 * Before using sdzp and tdzp we must ensure that they are live.
2898 * As a porting legacy from illumos we have two things to worry
2899 * about. One is typical for FreeBSD and it is that the vnode is
2900 * not reclaimed (doomed). The other is that the znode is live.
2901 * The current code can invalidate the znode without acquiring the
2902 * corresponding vnode lock if the object represented by the znode
2903 * and vnode is no longer valid after a rollback or receive operation.
2904 * z_teardown_lock hidden behind zfs_enter and zfs_exit is the lock
2905 * that protects the znodes from the invalidation.
2906 */
2907 zfsvfs = sdzp->z_zfsvfs;
2908 ASSERT3P(zfsvfs, ==, tdzp->z_zfsvfs);
2909 if ((error = zfs_enter_verify_zp(zfsvfs, sdzp, FTAG)) != 0)
2910 return (error);
2911 if ((error = zfs_verify_zp(tdzp)) != 0) {
2912 zfs_exit(zfsvfs, FTAG);
2913 return (error);
2914 }
2915
2916 /*
2917 * Re-resolve svp to be certain it still exists and fetch the
2918 * correct vnode.
2919 */
2920 error = zfs_dirent_lookup(sdzp, scnp->cn_nameptr, &szp, ZEXISTS);
2921 if (error != 0) {
2922 /* Source entry invalid or not there. */
2923 if ((scnp->cn_flags & ISDOTDOT) != 0 ||
2924 (scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.'))
2925 error = SET_ERROR(EINVAL);
2926 goto out;
2927 }
2928 *szpp = szp;
2929
2930 /*
2931 * Re-resolve tvp, if it disappeared we just carry on.
2932 */
2933 error = zfs_dirent_lookup(tdzp, tcnp->cn_nameptr, &tzp, 0);
2934 if (error != 0) {
2935 vrele(ZTOV(szp));
2936 if ((tcnp->cn_flags & ISDOTDOT) != 0)
2937 error = SET_ERROR(EINVAL);
2938 goto out;
2939 }
2940 *tzpp = tzp;
2941 out:
2942 zfs_exit(zfsvfs, FTAG);
2943 return (error);
2944 }
2945
2946 /*
2947 * We acquire all but fdvp locks using non-blocking acquisitions. If we
2948 * fail to acquire any lock in the path we will drop all held locks,
2949 * acquire the new lock in a blocking fashion, and then release it and
2950 * restart the rename. This acquire/release step ensures that we do not
2951 * spin on a lock waiting for release. On error release all vnode locks
2952 * and decrement references the way tmpfs_rename() would do.
2953 */
2954 static int
2955 zfs_rename_relock(struct vnode *sdvp, struct vnode **svpp,
2956 struct vnode *tdvp, struct vnode **tvpp,
2957 const struct componentname *scnp, const struct componentname *tcnp)
2958 {
2959 struct vnode *nvp, *svp, *tvp;
2960 znode_t *sdzp, *tdzp, *szp, *tzp;
2961 int error;
2962
2963 VOP_UNLOCK1(tdvp);
2964 if (*tvpp != NULL && *tvpp != tdvp)
2965 VOP_UNLOCK1(*tvpp);
2966
2967 relock:
2968 error = vn_lock(sdvp, LK_EXCLUSIVE);
2969 if (error)
2970 goto out;
2971 error = vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT);
2972 if (error != 0) {
2973 VOP_UNLOCK1(sdvp);
2974 if (error != EBUSY)
2975 goto out;
2976 error = vn_lock(tdvp, LK_EXCLUSIVE);
2977 if (error)
2978 goto out;
2979 VOP_UNLOCK1(tdvp);
2980 goto relock;
2981 }
2982 tdzp = VTOZ(tdvp);
2983 sdzp = VTOZ(sdvp);
2984
2985 error = zfs_rename_relock_lookup(sdzp, scnp, &szp, tdzp, tcnp, &tzp);
2986 if (error != 0) {
2987 VOP_UNLOCK1(sdvp);
2988 VOP_UNLOCK1(tdvp);
2989 goto out;
2990 }
2991 svp = ZTOV(szp);
2992 tvp = tzp != NULL ? ZTOV(tzp) : NULL;
2993
2994 /*
2995 * Now try acquire locks on svp and tvp.
2996 */
2997 nvp = svp;
2998 error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
2999 if (error != 0) {
3000 VOP_UNLOCK1(sdvp);
3001 VOP_UNLOCK1(tdvp);
3002 if (tvp != NULL)
3003 vrele(tvp);
3004 if (error != EBUSY) {
3005 vrele(nvp);
3006 goto out;
3007 }
3008 error = vn_lock(nvp, LK_EXCLUSIVE);
3009 if (error != 0) {
3010 vrele(nvp);
3011 goto out;
3012 }
3013 VOP_UNLOCK1(nvp);
3014 /*
3015 * Concurrent rename race.
3016 * XXX ?
3017 */
3018 if (nvp == tdvp) {
3019 vrele(nvp);
3020 error = SET_ERROR(EINVAL);
3021 goto out;
3022 }
3023 vrele(*svpp);
3024 *svpp = nvp;
3025 goto relock;
3026 }
3027 vrele(*svpp);
3028 *svpp = nvp;
3029
3030 if (*tvpp != NULL)
3031 vrele(*tvpp);
3032 *tvpp = NULL;
3033 if (tvp != NULL) {
3034 nvp = tvp;
3035 error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
3036 if (error != 0) {
3037 VOP_UNLOCK1(sdvp);
3038 VOP_UNLOCK1(tdvp);
3039 VOP_UNLOCK1(*svpp);
3040 if (error != EBUSY) {
3041 vrele(nvp);
3042 goto out;
3043 }
3044 error = vn_lock(nvp, LK_EXCLUSIVE);
3045 if (error != 0) {
3046 vrele(nvp);
3047 goto out;
3048 }
3049 vput(nvp);
3050 goto relock;
3051 }
3052 *tvpp = nvp;
3053 }
3054
3055 return (0);
3056
3057 out:
3058 return (error);
3059 }
3060
3061 /*
3062 * Note that we must use VRELE_ASYNC in this function as it walks
3063 * up the directory tree and vrele may need to acquire an exclusive
3064 * lock if a last reference to a vnode is dropped.
3065 */
3066 static int
3067 zfs_rename_check(znode_t *szp, znode_t *sdzp, znode_t *tdzp)
3068 {
3069 zfsvfs_t *zfsvfs;
3070 znode_t *zp, *zp1;
3071 uint64_t parent;
3072 int error;
3073
3074 zfsvfs = tdzp->z_zfsvfs;
3075 if (tdzp == szp)
3076 return (SET_ERROR(EINVAL));
3077 if (tdzp == sdzp)
3078 return (0);
3079 if (tdzp->z_id == zfsvfs->z_root)
3080 return (0);
3081 zp = tdzp;
3082 for (;;) {
3083 ASSERT(!zp->z_unlinked);
3084 if ((error = sa_lookup(zp->z_sa_hdl,
3085 SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0)
3086 break;
3087
3088 if (parent == szp->z_id) {
3089 error = SET_ERROR(EINVAL);
3090 break;
3091 }
3092 if (parent == zfsvfs->z_root)
3093 break;
3094 if (parent == sdzp->z_id)
3095 break;
3096
3097 error = zfs_zget(zfsvfs, parent, &zp1);
3098 if (error != 0)
3099 break;
3100
3101 if (zp != tdzp)
3102 VN_RELE_ASYNC(ZTOV(zp),
3103 dsl_pool_zrele_taskq(
3104 dmu_objset_pool(zfsvfs->z_os)));
3105 zp = zp1;
3106 }
3107
3108 if (error == ENOTDIR)
3109 panic("checkpath: .. not a directory\n");
3110 if (zp != tdzp)
3111 VN_RELE_ASYNC(ZTOV(zp),
3112 dsl_pool_zrele_taskq(dmu_objset_pool(zfsvfs->z_os)));
3113 return (error);
3114 }
3115
3116 #if __FreeBSD_version < 1300124
3117 static void
3118 cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp,
3119 struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp)
3120 {
3121
3122 cache_purge(fvp);
3123 if (tvp != NULL)
3124 cache_purge(tvp);
3125 cache_purge_negative(tdvp);
3126 }
3127 #endif
3128
3129 static int
3130 zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
3131 vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
3132 cred_t *cr);
3133
3134 /*
3135 * Move an entry from the provided source directory to the target
3136 * directory. Change the entry name as indicated.
3137 *
3138 * IN: sdvp - Source directory containing the "old entry".
3139 * scnp - Old entry name.
3140 * tdvp - Target directory to contain the "new entry".
3141 * tcnp - New entry name.
3142 * cr - credentials of caller.
3143 * INOUT: svpp - Source file
3144 * tvpp - Target file, may point to NULL initially
3145 *
3146 * RETURN: 0 on success, error code on failure.
3147 *
3148 * Timestamps:
3149 * sdvp,tdvp - ctime|mtime updated
3150 */
3151 static int
3152 zfs_do_rename(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
3153 vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
3154 cred_t *cr)
3155 {
3156 int error;
3157
3158 ASSERT_VOP_ELOCKED(tdvp, __func__);
3159 if (*tvpp != NULL)
3160 ASSERT_VOP_ELOCKED(*tvpp, __func__);
3161
3162 /* Reject renames across filesystems. */
3163 if ((*svpp)->v_mount != tdvp->v_mount ||
3164 ((*tvpp) != NULL && (*svpp)->v_mount != (*tvpp)->v_mount)) {
3165 error = SET_ERROR(EXDEV);
3166 goto out;
3167 }
3168
3169 if (zfsctl_is_node(tdvp)) {
3170 error = SET_ERROR(EXDEV);
3171 goto out;
3172 }
3173
3174 /*
3175 * Lock all four vnodes to ensure safety and semantics of renaming.
3176 */
3177 error = zfs_rename_relock(sdvp, svpp, tdvp, tvpp, scnp, tcnp);
3178 if (error != 0) {
3179 /* no vnodes are locked in the case of error here */
3180 return (error);
3181 }
3182
3183 error = zfs_do_rename_impl(sdvp, svpp, scnp, tdvp, tvpp, tcnp, cr);
3184 VOP_UNLOCK1(sdvp);
3185 VOP_UNLOCK1(*svpp);
3186 out:
3187 if (*tvpp != NULL)
3188 VOP_UNLOCK1(*tvpp);
3189 if (tdvp != *tvpp)
3190 VOP_UNLOCK1(tdvp);
3191
3192 return (error);
3193 }
3194
3195 static int
3196 zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
3197 vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
3198 cred_t *cr)
3199 {
3200 dmu_tx_t *tx;
3201 zfsvfs_t *zfsvfs;
3202 zilog_t *zilog;
3203 znode_t *tdzp, *sdzp, *tzp, *szp;
3204 const char *snm = scnp->cn_nameptr;
3205 const char *tnm = tcnp->cn_nameptr;
3206 int error;
3207
3208 tdzp = VTOZ(tdvp);
3209 sdzp = VTOZ(sdvp);
3210 zfsvfs = tdzp->z_zfsvfs;
3211
3212 if ((error = zfs_enter_verify_zp(zfsvfs, tdzp, FTAG)) != 0)
3213 return (error);
3214 if ((error = zfs_verify_zp(sdzp)) != 0) {
3215 zfs_exit(zfsvfs, FTAG);
3216 return (error);
3217 }
3218 zilog = zfsvfs->z_log;
3219
3220 if (zfsvfs->z_utf8 && u8_validate(tnm,
3221 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3222 error = SET_ERROR(EILSEQ);
3223 goto out;
3224 }
3225
3226 /* If source and target are the same file, there is nothing to do. */
3227 if ((*svpp) == (*tvpp)) {
3228 error = 0;
3229 goto out;
3230 }
3231
3232 if (((*svpp)->v_type == VDIR && (*svpp)->v_mountedhere != NULL) ||
3233 ((*tvpp) != NULL && (*tvpp)->v_type == VDIR &&
3234 (*tvpp)->v_mountedhere != NULL)) {
3235 error = SET_ERROR(EXDEV);
3236 goto out;
3237 }
3238
3239 szp = VTOZ(*svpp);
3240 if ((error = zfs_verify_zp(szp)) != 0) {
3241 zfs_exit(zfsvfs, FTAG);
3242 return (error);
3243 }
3244 tzp = *tvpp == NULL ? NULL : VTOZ(*tvpp);
3245 if (tzp != NULL) {
3246 if ((error = zfs_verify_zp(tzp)) != 0) {
3247 zfs_exit(zfsvfs, FTAG);
3248 return (error);
3249 }
3250 }
3251
3252 /*
3253 * This is to prevent the creation of links into attribute space
3254 * by renaming a linked file into/outof an attribute directory.
3255 * See the comment in zfs_link() for why this is considered bad.
3256 */
3257 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3258 error = SET_ERROR(EINVAL);
3259 goto out;
3260 }
3261
3262 /*
3263 * If we are using project inheritance, means if the directory has
3264 * ZFS_PROJINHERIT set, then its descendant directories will inherit
3265 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
3266 * such case, we only allow renames into our tree when the project
3267 * IDs are the same.
3268 */
3269 if (tdzp->z_pflags & ZFS_PROJINHERIT &&
3270 tdzp->z_projid != szp->z_projid) {
3271 error = SET_ERROR(EXDEV);
3272 goto out;
3273 }
3274
3275 /*
3276 * Must have write access at the source to remove the old entry
3277 * and write access at the target to create the new entry.
3278 * Note that if target and source are the same, this can be
3279 * done in a single check.
3280 */
3281 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr, NULL)))
3282 goto out;
3283
3284 if ((*svpp)->v_type == VDIR) {
3285 /*
3286 * Avoid ".", "..", and aliases of "." for obvious reasons.
3287 */
3288 if ((scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.') ||
3289 sdzp == szp ||
3290 (scnp->cn_flags | tcnp->cn_flags) & ISDOTDOT) {
3291 error = EINVAL;
3292 goto out;
3293 }
3294
3295 /*
3296 * Check to make sure rename is valid.
3297 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3298 */
3299 if ((error = zfs_rename_check(szp, sdzp, tdzp)))
3300 goto out;
3301 }
3302
3303 /*
3304 * Does target exist?
3305 */
3306 if (tzp) {
3307 /*
3308 * Source and target must be the same type.
3309 */
3310 if ((*svpp)->v_type == VDIR) {
3311 if ((*tvpp)->v_type != VDIR) {
3312 error = SET_ERROR(ENOTDIR);
3313 goto out;
3314 } else {
3315 cache_purge(tdvp);
3316 if (sdvp != tdvp)
3317 cache_purge(sdvp);
3318 }
3319 } else {
3320 if ((*tvpp)->v_type == VDIR) {
3321 error = SET_ERROR(EISDIR);
3322 goto out;
3323 }
3324 }
3325 }
3326
3327 vn_seqc_write_begin(*svpp);
3328 vn_seqc_write_begin(sdvp);
3329 if (*tvpp != NULL)
3330 vn_seqc_write_begin(*tvpp);
3331 if (tdvp != *tvpp)
3332 vn_seqc_write_begin(tdvp);
3333
3334 vnevent_rename_src(*svpp, sdvp, scnp->cn_nameptr, ct);
3335 if (tzp)
3336 vnevent_rename_dest(*tvpp, tdvp, tnm, ct);
3337
3338 /*
3339 * notify the target directory if it is not the same
3340 * as source directory.
3341 */
3342 if (tdvp != sdvp) {
3343 vnevent_rename_dest_dir(tdvp, ct);
3344 }
3345
3346 tx = dmu_tx_create(zfsvfs->z_os);
3347 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3348 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3349 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3350 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3351 if (sdzp != tdzp) {
3352 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3353 zfs_sa_upgrade_txholds(tx, tdzp);
3354 }
3355 if (tzp) {
3356 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3357 zfs_sa_upgrade_txholds(tx, tzp);
3358 }
3359
3360 zfs_sa_upgrade_txholds(tx, szp);
3361 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3362 error = dmu_tx_assign(tx, TXG_WAIT);
3363 if (error) {
3364 dmu_tx_abort(tx);
3365 goto out_seq;
3366 }
3367
3368 if (tzp) /* Attempt to remove the existing target */
3369 error = zfs_link_destroy(tdzp, tnm, tzp, tx, 0, NULL);
3370
3371 if (error == 0) {
3372 error = zfs_link_create(tdzp, tnm, szp, tx, ZRENAMING);
3373 if (error == 0) {
3374 szp->z_pflags |= ZFS_AV_MODIFIED;
3375
3376 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3377 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3378 ASSERT0(error);
3379
3380 error = zfs_link_destroy(sdzp, snm, szp, tx, ZRENAMING,
3381 NULL);
3382 if (error == 0) {
3383 zfs_log_rename(zilog, tx, TX_RENAME, sdzp,
3384 snm, tdzp, tnm, szp);
3385 } else {
3386 /*
3387 * At this point, we have successfully created
3388 * the target name, but have failed to remove
3389 * the source name. Since the create was done
3390 * with the ZRENAMING flag, there are
3391 * complications; for one, the link count is
3392 * wrong. The easiest way to deal with this
3393 * is to remove the newly created target, and
3394 * return the original error. This must
3395 * succeed; fortunately, it is very unlikely to
3396 * fail, since we just created it.
3397 */
3398 VERIFY0(zfs_link_destroy(tdzp, tnm, szp, tx,
3399 ZRENAMING, NULL));
3400 }
3401 }
3402 if (error == 0) {
3403 cache_vop_rename(sdvp, *svpp, tdvp, *tvpp, scnp, tcnp);
3404 }
3405 }
3406
3407 dmu_tx_commit(tx);
3408
3409 out_seq:
3410 vn_seqc_write_end(*svpp);
3411 vn_seqc_write_end(sdvp);
3412 if (*tvpp != NULL)
3413 vn_seqc_write_end(*tvpp);
3414 if (tdvp != *tvpp)
3415 vn_seqc_write_end(tdvp);
3416
3417 out:
3418 if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3419 zil_commit(zilog, 0);
3420 zfs_exit(zfsvfs, FTAG);
3421
3422 return (error);
3423 }
3424
3425 int
3426 zfs_rename(znode_t *sdzp, const char *sname, znode_t *tdzp, const char *tname,
3427 cred_t *cr, int flags, uint64_t rflags, vattr_t *wo_vap, zidmap_t *mnt_ns)
3428 {
3429 struct componentname scn, tcn;
3430 vnode_t *sdvp, *tdvp;
3431 vnode_t *svp, *tvp;
3432 int error;
3433 svp = tvp = NULL;
3434
3435 if (rflags != 0 || wo_vap != NULL)
3436 return (SET_ERROR(EINVAL));
3437
3438 sdvp = ZTOV(sdzp);
3439 tdvp = ZTOV(tdzp);
3440 error = zfs_lookup_internal(sdzp, sname, &svp, &scn, DELETE);
3441 if (sdzp->z_zfsvfs->z_replay == B_FALSE)
3442 VOP_UNLOCK1(sdvp);
3443 if (error != 0)
3444 goto fail;
3445 VOP_UNLOCK1(svp);
3446
3447 vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY);
3448 error = zfs_lookup_internal(tdzp, tname, &tvp, &tcn, RENAME);
3449 if (error == EJUSTRETURN)
3450 tvp = NULL;
3451 else if (error != 0) {
3452 VOP_UNLOCK1(tdvp);
3453 goto fail;
3454 }
3455
3456 error = zfs_do_rename(sdvp, &svp, &scn, tdvp, &tvp, &tcn, cr);
3457 fail:
3458 if (svp != NULL)
3459 vrele(svp);
3460 if (tvp != NULL)
3461 vrele(tvp);
3462
3463 return (error);
3464 }
3465
3466 /*
3467 * Insert the indicated symbolic reference entry into the directory.
3468 *
3469 * IN: dvp - Directory to contain new symbolic link.
3470 * link - Name for new symlink entry.
3471 * vap - Attributes of new entry.
3472 * cr - credentials of caller.
3473 * ct - caller context
3474 * flags - case flags
3475 * mnt_ns - Unused on FreeBSD
3476 *
3477 * RETURN: 0 on success, error code on failure.
3478 *
3479 * Timestamps:
3480 * dvp - ctime|mtime updated
3481 */
3482 int
3483 zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
3484 const char *link, znode_t **zpp, cred_t *cr, int flags, zidmap_t *mnt_ns)
3485 {
3486 (void) flags;
3487 znode_t *zp;
3488 dmu_tx_t *tx;
3489 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
3490 zilog_t *zilog;
3491 uint64_t len = strlen(link);
3492 int error;
3493 zfs_acl_ids_t acl_ids;
3494 boolean_t fuid_dirtied;
3495 uint64_t txtype = TX_SYMLINK;
3496
3497 ASSERT3S(vap->va_type, ==, VLNK);
3498
3499 if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
3500 return (error);
3501 zilog = zfsvfs->z_log;
3502
3503 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
3504 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3505 zfs_exit(zfsvfs, FTAG);
3506 return (SET_ERROR(EILSEQ));
3507 }
3508
3509 if (len > MAXPATHLEN) {
3510 zfs_exit(zfsvfs, FTAG);
3511 return (SET_ERROR(ENAMETOOLONG));
3512 }
3513
3514 if ((error = zfs_acl_ids_create(dzp, 0,
3515 vap, cr, NULL, &acl_ids, NULL)) != 0) {
3516 zfs_exit(zfsvfs, FTAG);
3517 return (error);
3518 }
3519
3520 /*
3521 * Attempt to lock directory; fail if entry already exists.
3522 */
3523 error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
3524 if (error) {
3525 zfs_acl_ids_free(&acl_ids);
3526 zfs_exit(zfsvfs, FTAG);
3527 return (error);
3528 }
3529
3530 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr, mnt_ns))) {
3531 zfs_acl_ids_free(&acl_ids);
3532 zfs_exit(zfsvfs, FTAG);
3533 return (error);
3534 }
3535
3536 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids,
3537 0 /* projid */)) {
3538 zfs_acl_ids_free(&acl_ids);
3539 zfs_exit(zfsvfs, FTAG);
3540 return (SET_ERROR(EDQUOT));
3541 }
3542
3543 getnewvnode_reserve_();
3544 tx = dmu_tx_create(zfsvfs->z_os);
3545 fuid_dirtied = zfsvfs->z_fuid_dirty;
3546 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3547 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3548 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3549 ZFS_SA_BASE_ATTR_SIZE + len);
3550 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3551 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3552 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3553 acl_ids.z_aclp->z_acl_bytes);
3554 }
3555 if (fuid_dirtied)
3556 zfs_fuid_txhold(zfsvfs, tx);
3557 error = dmu_tx_assign(tx, TXG_WAIT);
3558 if (error) {
3559 zfs_acl_ids_free(&acl_ids);
3560 dmu_tx_abort(tx);
3561 getnewvnode_drop_reserve();
3562 zfs_exit(zfsvfs, FTAG);
3563 return (error);
3564 }
3565
3566 /*
3567 * Create a new object for the symlink.
3568 * for version 4 ZPL datasets the symlink will be an SA attribute
3569 */
3570 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3571
3572 if (fuid_dirtied)
3573 zfs_fuid_sync(zfsvfs, tx);
3574
3575 if (zp->z_is_sa)
3576 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
3577 __DECONST(void *, link), len, tx);
3578 else
3579 zfs_sa_symlink(zp, __DECONST(char *, link), len, tx);
3580
3581 zp->z_size = len;
3582 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
3583 &zp->z_size, sizeof (zp->z_size), tx);
3584 /*
3585 * Insert the new object into the directory.
3586 */
3587 (void) zfs_link_create(dzp, name, zp, tx, ZNEW);
3588
3589 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3590 *zpp = zp;
3591
3592 zfs_acl_ids_free(&acl_ids);
3593
3594 dmu_tx_commit(tx);
3595
3596 getnewvnode_drop_reserve();
3597
3598 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3599 zil_commit(zilog, 0);
3600
3601 zfs_exit(zfsvfs, FTAG);
3602 return (error);
3603 }
3604
3605 /*
3606 * Return, in the buffer contained in the provided uio structure,
3607 * the symbolic path referred to by vp.
3608 *
3609 * IN: vp - vnode of symbolic link.
3610 * uio - structure to contain the link path.
3611 * cr - credentials of caller.
3612 * ct - caller context
3613 *
3614 * OUT: uio - structure containing the link path.
3615 *
3616 * RETURN: 0 on success, error code on failure.
3617 *
3618 * Timestamps:
3619 * vp - atime updated
3620 */
3621 static int
3622 zfs_readlink(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, caller_context_t *ct)
3623 {
3624 (void) cr, (void) ct;
3625 znode_t *zp = VTOZ(vp);
3626 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3627 int error;
3628
3629 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
3630 return (error);
3631
3632 if (zp->z_is_sa)
3633 error = sa_lookup_uio(zp->z_sa_hdl,
3634 SA_ZPL_SYMLINK(zfsvfs), uio);
3635 else
3636 error = zfs_sa_readlink(zp, uio);
3637
3638 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
3639
3640 zfs_exit(zfsvfs, FTAG);
3641 return (error);
3642 }
3643
3644 /*
3645 * Insert a new entry into directory tdvp referencing svp.
3646 *
3647 * IN: tdvp - Directory to contain new entry.
3648 * svp - vnode of new entry.
3649 * name - name of new entry.
3650 * cr - credentials of caller.
3651 *
3652 * RETURN: 0 on success, error code on failure.
3653 *
3654 * Timestamps:
3655 * tdvp - ctime|mtime updated
3656 * svp - ctime updated
3657 */
3658 int
3659 zfs_link(znode_t *tdzp, znode_t *szp, const char *name, cred_t *cr,
3660 int flags)
3661 {
3662 (void) flags;
3663 znode_t *tzp;
3664 zfsvfs_t *zfsvfs = tdzp->z_zfsvfs;
3665 zilog_t *zilog;
3666 dmu_tx_t *tx;
3667 int error;
3668 uint64_t parent;
3669 uid_t owner;
3670
3671 ASSERT3S(ZTOV(tdzp)->v_type, ==, VDIR);
3672
3673 if ((error = zfs_enter_verify_zp(zfsvfs, tdzp, FTAG)) != 0)
3674 return (error);
3675 zilog = zfsvfs->z_log;
3676
3677 /*
3678 * POSIX dictates that we return EPERM here.
3679 * Better choices include ENOTSUP or EISDIR.
3680 */
3681 if (ZTOV(szp)->v_type == VDIR) {
3682 zfs_exit(zfsvfs, FTAG);
3683 return (SET_ERROR(EPERM));
3684 }
3685
3686 if ((error = zfs_verify_zp(szp)) != 0) {
3687 zfs_exit(zfsvfs, FTAG);
3688 return (error);
3689 }
3690
3691 /*
3692 * If we are using project inheritance, means if the directory has
3693 * ZFS_PROJINHERIT set, then its descendant directories will inherit
3694 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
3695 * such case, we only allow hard link creation in our tree when the
3696 * project IDs are the same.
3697 */
3698 if (tdzp->z_pflags & ZFS_PROJINHERIT &&
3699 tdzp->z_projid != szp->z_projid) {
3700 zfs_exit(zfsvfs, FTAG);
3701 return (SET_ERROR(EXDEV));
3702 }
3703
3704 if (szp->z_pflags & (ZFS_APPENDONLY |
3705 ZFS_IMMUTABLE | ZFS_READONLY)) {
3706 zfs_exit(zfsvfs, FTAG);
3707 return (SET_ERROR(EPERM));
3708 }
3709
3710 /* Prevent links to .zfs/shares files */
3711
3712 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
3713 &parent, sizeof (uint64_t))) != 0) {
3714 zfs_exit(zfsvfs, FTAG);
3715 return (error);
3716 }
3717 if (parent == zfsvfs->z_shares_dir) {
3718 zfs_exit(zfsvfs, FTAG);
3719 return (SET_ERROR(EPERM));
3720 }
3721
3722 if (zfsvfs->z_utf8 && u8_validate(name,
3723 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3724 zfs_exit(zfsvfs, FTAG);
3725 return (SET_ERROR(EILSEQ));
3726 }
3727
3728 /*
3729 * We do not support links between attributes and non-attributes
3730 * because of the potential security risk of creating links
3731 * into "normal" file space in order to circumvent restrictions
3732 * imposed in attribute space.
3733 */
3734 if ((szp->z_pflags & ZFS_XATTR) != (tdzp->z_pflags & ZFS_XATTR)) {
3735 zfs_exit(zfsvfs, FTAG);
3736 return (SET_ERROR(EINVAL));
3737 }
3738
3739
3740 owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
3741 if (owner != crgetuid(cr) && secpolicy_basic_link(ZTOV(szp), cr) != 0) {
3742 zfs_exit(zfsvfs, FTAG);
3743 return (SET_ERROR(EPERM));
3744 }
3745
3746 if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr, NULL))) {
3747 zfs_exit(zfsvfs, FTAG);
3748 return (error);
3749 }
3750
3751 /*
3752 * Attempt to lock directory; fail if entry already exists.
3753 */
3754 error = zfs_dirent_lookup(tdzp, name, &tzp, ZNEW);
3755 if (error) {
3756 zfs_exit(zfsvfs, FTAG);
3757 return (error);
3758 }
3759
3760 tx = dmu_tx_create(zfsvfs->z_os);
3761 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3762 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
3763 zfs_sa_upgrade_txholds(tx, szp);
3764 zfs_sa_upgrade_txholds(tx, tdzp);
3765 error = dmu_tx_assign(tx, TXG_WAIT);
3766 if (error) {
3767 dmu_tx_abort(tx);
3768 zfs_exit(zfsvfs, FTAG);
3769 return (error);
3770 }
3771
3772 error = zfs_link_create(tdzp, name, szp, tx, 0);
3773
3774 if (error == 0) {
3775 uint64_t txtype = TX_LINK;
3776 zfs_log_link(zilog, tx, txtype, tdzp, szp, name);
3777 }
3778
3779 dmu_tx_commit(tx);
3780
3781 if (error == 0) {
3782 vnevent_link(ZTOV(szp), ct);
3783 }
3784
3785 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3786 zil_commit(zilog, 0);
3787
3788 zfs_exit(zfsvfs, FTAG);
3789 return (error);
3790 }
3791
3792 /*
3793 * Free or allocate space in a file. Currently, this function only
3794 * supports the `F_FREESP' command. However, this command is somewhat
3795 * misnamed, as its functionality includes the ability to allocate as
3796 * well as free space.
3797 *
3798 * IN: ip - inode of file to free data in.
3799 * cmd - action to take (only F_FREESP supported).
3800 * bfp - section of file to free/alloc.
3801 * flag - current file open mode flags.
3802 * offset - current file offset.
3803 * cr - credentials of caller.
3804 *
3805 * RETURN: 0 on success, error code on failure.
3806 *
3807 * Timestamps:
3808 * ip - ctime|mtime updated
3809 */
3810 int
3811 zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
3812 offset_t offset, cred_t *cr)
3813 {
3814 (void) offset;
3815 zfsvfs_t *zfsvfs = ZTOZSB(zp);
3816 uint64_t off, len;
3817 int error;
3818
3819 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
3820 return (error);
3821
3822 if (cmd != F_FREESP) {
3823 zfs_exit(zfsvfs, FTAG);
3824 return (SET_ERROR(EINVAL));
3825 }
3826
3827 /*
3828 * Callers might not be able to detect properly that we are read-only,
3829 * so check it explicitly here.
3830 */
3831 if (zfs_is_readonly(zfsvfs)) {
3832 zfs_exit(zfsvfs, FTAG);
3833 return (SET_ERROR(EROFS));
3834 }
3835
3836 if (bfp->l_len < 0) {
3837 zfs_exit(zfsvfs, FTAG);
3838 return (SET_ERROR(EINVAL));
3839 }
3840
3841 /*
3842 * Permissions aren't checked on Solaris because on this OS
3843 * zfs_space() can only be called with an opened file handle.
3844 * On Linux we can get here through truncate_range() which
3845 * operates directly on inodes, so we need to check access rights.
3846 */
3847 if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr, NULL))) {
3848 zfs_exit(zfsvfs, FTAG);
3849 return (error);
3850 }
3851
3852 off = bfp->l_start;
3853 len = bfp->l_len; /* 0 means from off to end of file */
3854
3855 error = zfs_freesp(zp, off, len, flag, TRUE);
3856
3857 zfs_exit(zfsvfs, FTAG);
3858 return (error);
3859 }
3860
3861 static void
3862 zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
3863 {
3864 (void) cr, (void) ct;
3865 znode_t *zp = VTOZ(vp);
3866 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3867 int error;
3868
3869 ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs);
3870 if (zp->z_sa_hdl == NULL) {
3871 /*
3872 * The fs has been unmounted, or we did a
3873 * suspend/resume and this file no longer exists.
3874 */
3875 ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
3876 vrecycle(vp);
3877 return;
3878 }
3879
3880 if (zp->z_unlinked) {
3881 /*
3882 * Fast path to recycle a vnode of a removed file.
3883 */
3884 ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
3885 vrecycle(vp);
3886 return;
3887 }
3888
3889 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
3890 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
3891
3892 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3893 zfs_sa_upgrade_txholds(tx, zp);
3894 error = dmu_tx_assign(tx, TXG_WAIT);
3895 if (error) {
3896 dmu_tx_abort(tx);
3897 } else {
3898 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
3899 (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
3900 zp->z_atime_dirty = 0;
3901 dmu_tx_commit(tx);
3902 }
3903 }
3904 ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
3905 }
3906
3907
3908 _Static_assert(sizeof (struct zfid_short) <= sizeof (struct fid),
3909 "struct zfid_short bigger than struct fid");
3910 _Static_assert(sizeof (struct zfid_long) <= sizeof (struct fid),
3911 "struct zfid_long bigger than struct fid");
3912
3913 static int
3914 zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
3915 {
3916 (void) ct;
3917 znode_t *zp = VTOZ(vp);
3918 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3919 uint32_t gen;
3920 uint64_t gen64;
3921 uint64_t object = zp->z_id;
3922 zfid_short_t *zfid;
3923 int size, i, error;
3924
3925 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
3926 return (error);
3927
3928 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
3929 &gen64, sizeof (uint64_t))) != 0) {
3930 zfs_exit(zfsvfs, FTAG);
3931 return (error);
3932 }
3933
3934 gen = (uint32_t)gen64;
3935
3936 size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
3937 fidp->fid_len = size;
3938
3939 zfid = (zfid_short_t *)fidp;
3940
3941 zfid->zf_len = size;
3942
3943 for (i = 0; i < sizeof (zfid->zf_object); i++)
3944 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
3945
3946 /* Must have a non-zero generation number to distinguish from .zfs */
3947 if (gen == 0)
3948 gen = 1;
3949 for (i = 0; i < sizeof (zfid->zf_gen); i++)
3950 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
3951
3952 if (size == LONG_FID_LEN) {
3953 uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
3954 zfid_long_t *zlfid;
3955
3956 zlfid = (zfid_long_t *)fidp;
3957
3958 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
3959 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
3960
3961 /* XXX - this should be the generation number for the objset */
3962 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
3963 zlfid->zf_setgen[i] = 0;
3964 }
3965
3966 zfs_exit(zfsvfs, FTAG);
3967 return (0);
3968 }
3969
3970 static int
3971 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
3972 caller_context_t *ct)
3973 {
3974 znode_t *zp;
3975 zfsvfs_t *zfsvfs;
3976 int error;
3977
3978 switch (cmd) {
3979 case _PC_LINK_MAX:
3980 *valp = MIN(LONG_MAX, ZFS_LINK_MAX);
3981 return (0);
3982
3983 case _PC_FILESIZEBITS:
3984 *valp = 64;
3985 return (0);
3986 case _PC_MIN_HOLE_SIZE:
3987 *valp = (int)SPA_MINBLOCKSIZE;
3988 return (0);
3989 case _PC_ACL_EXTENDED:
3990 #if 0 /* POSIX ACLs are not implemented for ZFS on FreeBSD yet. */
3991 zp = VTOZ(vp);
3992 zfsvfs = zp->z_zfsvfs;
3993 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
3994 return (error);
3995 *valp = zfsvfs->z_acl_type == ZFSACLTYPE_POSIX ? 1 : 0;
3996 zfs_exit(zfsvfs, FTAG);
3997 #else
3998 *valp = 0;
3999 #endif
4000 return (0);
4001
4002 case _PC_ACL_NFS4:
4003 zp = VTOZ(vp);
4004 zfsvfs = zp->z_zfsvfs;
4005 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
4006 return (error);
4007 *valp = zfsvfs->z_acl_type == ZFS_ACLTYPE_NFSV4 ? 1 : 0;
4008 zfs_exit(zfsvfs, FTAG);
4009 return (0);
4010
4011 case _PC_ACL_PATH_MAX:
4012 *valp = ACL_MAX_ENTRIES;
4013 return (0);
4014
4015 default:
4016 return (EOPNOTSUPP);
4017 }
4018 }
4019
4020 static int
4021 zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
4022 int *rahead)
4023 {
4024 znode_t *zp = VTOZ(vp);
4025 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4026 zfs_locked_range_t *lr;
4027 vm_object_t object;
4028 off_t start, end, obj_size;
4029 uint_t blksz;
4030 int pgsin_b, pgsin_a;
4031 int error;
4032
4033 if (zfs_enter_verify_zp(zfsvfs, zp, FTAG) != 0)
4034 return (zfs_vm_pagerret_error);
4035
4036 start = IDX_TO_OFF(ma[0]->pindex);
4037 end = IDX_TO_OFF(ma[count - 1]->pindex + 1);
4038
4039 /*
4040 * Lock a range covering all required and optional pages.
4041 * Note that we need to handle the case of the block size growing.
4042 */
4043 for (;;) {
4044 blksz = zp->z_blksz;
4045 lr = zfs_rangelock_tryenter(&zp->z_rangelock,
4046 rounddown(start, blksz),
4047 roundup(end, blksz) - rounddown(start, blksz), RL_READER);
4048 if (lr == NULL) {
4049 if (rahead != NULL) {
4050 *rahead = 0;
4051 rahead = NULL;
4052 }
4053 if (rbehind != NULL) {
4054 *rbehind = 0;
4055 rbehind = NULL;
4056 }
4057 break;
4058 }
4059 if (blksz == zp->z_blksz)
4060 break;
4061 zfs_rangelock_exit(lr);
4062 }
4063
4064 object = ma[0]->object;
4065 zfs_vmobject_wlock(object);
4066 obj_size = object->un_pager.vnp.vnp_size;
4067 zfs_vmobject_wunlock(object);
4068 if (IDX_TO_OFF(ma[count - 1]->pindex) >= obj_size) {
4069 if (lr != NULL)
4070 zfs_rangelock_exit(lr);
4071 zfs_exit(zfsvfs, FTAG);
4072 return (zfs_vm_pagerret_bad);
4073 }
4074
4075 pgsin_b = 0;
4076 if (rbehind != NULL) {
4077 pgsin_b = OFF_TO_IDX(start - rounddown(start, blksz));
4078 pgsin_b = MIN(*rbehind, pgsin_b);
4079 }
4080
4081 pgsin_a = 0;
4082 if (rahead != NULL) {
4083 pgsin_a = OFF_TO_IDX(roundup(end, blksz) - end);
4084 if (end + IDX_TO_OFF(pgsin_a) >= obj_size)
4085 pgsin_a = OFF_TO_IDX(round_page(obj_size) - end);
4086 pgsin_a = MIN(*rahead, pgsin_a);
4087 }
4088
4089 /*
4090 * NB: we need to pass the exact byte size of the data that we expect
4091 * to read after accounting for the file size. This is required because
4092 * ZFS will panic if we request DMU to read beyond the end of the last
4093 * allocated block.
4094 */
4095 error = dmu_read_pages(zfsvfs->z_os, zp->z_id, ma, count, &pgsin_b,
4096 &pgsin_a, MIN(end, obj_size) - (end - PAGE_SIZE));
4097
4098 if (lr != NULL)
4099 zfs_rangelock_exit(lr);
4100 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4101
4102 dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, count*PAGE_SIZE);
4103
4104 zfs_exit(zfsvfs, FTAG);
4105
4106 if (error != 0)
4107 return (zfs_vm_pagerret_error);
4108
4109 VM_CNT_INC(v_vnodein);
4110 VM_CNT_ADD(v_vnodepgsin, count + pgsin_b + pgsin_a);
4111 if (rbehind != NULL)
4112 *rbehind = pgsin_b;
4113 if (rahead != NULL)
4114 *rahead = pgsin_a;
4115 return (zfs_vm_pagerret_ok);
4116 }
4117
4118 #ifndef _SYS_SYSPROTO_H_
4119 struct vop_getpages_args {
4120 struct vnode *a_vp;
4121 vm_page_t *a_m;
4122 int a_count;
4123 int *a_rbehind;
4124 int *a_rahead;
4125 };
4126 #endif
4127
4128 static int
4129 zfs_freebsd_getpages(struct vop_getpages_args *ap)
4130 {
4131
4132 return (zfs_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
4133 ap->a_rahead));
4134 }
4135
4136 static int
4137 zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
4138 int *rtvals)
4139 {
4140 znode_t *zp = VTOZ(vp);
4141 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4142 zfs_locked_range_t *lr;
4143 dmu_tx_t *tx;
4144 struct sf_buf *sf;
4145 vm_object_t object;
4146 vm_page_t m;
4147 caddr_t va;
4148 size_t tocopy;
4149 size_t lo_len;
4150 vm_ooffset_t lo_off;
4151 vm_ooffset_t off;
4152 uint_t blksz;
4153 int ncount;
4154 int pcount;
4155 int err;
4156 int i;
4157
4158 object = vp->v_object;
4159 KASSERT(ma[0]->object == object, ("mismatching object"));
4160 KASSERT(len > 0 && (len & PAGE_MASK) == 0, ("unexpected length"));
4161
4162 pcount = btoc(len);
4163 ncount = pcount;
4164 for (i = 0; i < pcount; i++)
4165 rtvals[i] = zfs_vm_pagerret_error;
4166
4167 if (zfs_enter_verify_zp(zfsvfs, zp, FTAG) != 0)
4168 return (zfs_vm_pagerret_error);
4169
4170 off = IDX_TO_OFF(ma[0]->pindex);
4171 blksz = zp->z_blksz;
4172 lo_off = rounddown(off, blksz);
4173 lo_len = roundup(len + (off - lo_off), blksz);
4174 lr = zfs_rangelock_enter(&zp->z_rangelock, lo_off, lo_len, RL_WRITER);
4175
4176 zfs_vmobject_wlock(object);
4177 if (len + off > object->un_pager.vnp.vnp_size) {
4178 if (object->un_pager.vnp.vnp_size > off) {
4179 int pgoff;
4180
4181 len = object->un_pager.vnp.vnp_size - off;
4182 ncount = btoc(len);
4183 if ((pgoff = (int)len & PAGE_MASK) != 0) {
4184 /*
4185 * If the object is locked and the following
4186 * conditions hold, then the page's dirty
4187 * field cannot be concurrently changed by a
4188 * pmap operation.
4189 */
4190 m = ma[ncount - 1];
4191 vm_page_assert_sbusied(m);
4192 KASSERT(!pmap_page_is_write_mapped(m),
4193 ("zfs_putpages: page %p is not read-only",
4194 m));
4195 vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
4196 pgoff);
4197 }
4198 } else {
4199 len = 0;
4200 ncount = 0;
4201 }
4202 if (ncount < pcount) {
4203 for (i = ncount; i < pcount; i++) {
4204 rtvals[i] = zfs_vm_pagerret_bad;
4205 }
4206 }
4207 }
4208 zfs_vmobject_wunlock(object);
4209
4210 boolean_t commit = (flags & (zfs_vm_pagerput_sync |
4211 zfs_vm_pagerput_inval)) != 0 ||
4212 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS;
4213
4214 if (ncount == 0)
4215 goto out;
4216
4217 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, zp->z_uid) ||
4218 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, zp->z_gid) ||
4219 (zp->z_projid != ZFS_DEFAULT_PROJID &&
4220 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
4221 zp->z_projid))) {
4222 goto out;
4223 }
4224
4225 tx = dmu_tx_create(zfsvfs->z_os);
4226 dmu_tx_hold_write(tx, zp->z_id, off, len);
4227
4228 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4229 zfs_sa_upgrade_txholds(tx, zp);
4230 err = dmu_tx_assign(tx, TXG_WAIT);
4231 if (err != 0) {
4232 dmu_tx_abort(tx);
4233 goto out;
4234 }
4235
4236 if (zp->z_blksz < PAGE_SIZE) {
4237 for (i = 0; len > 0; off += tocopy, len -= tocopy, i++) {
4238 tocopy = len > PAGE_SIZE ? PAGE_SIZE : len;
4239 va = zfs_map_page(ma[i], &sf);
4240 dmu_write(zfsvfs->z_os, zp->z_id, off, tocopy, va, tx);
4241 zfs_unmap_page(sf);
4242 }
4243 } else {
4244 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, ma, tx);
4245 }
4246
4247 if (err == 0) {
4248 uint64_t mtime[2], ctime[2];
4249 sa_bulk_attr_t bulk[3];
4250 int count = 0;
4251
4252 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
4253 &mtime, 16);
4254 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
4255 &ctime, 16);
4256 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
4257 &zp->z_pflags, 8);
4258 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
4259 err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
4260 ASSERT0(err);
4261 /*
4262 * XXX we should be passing a callback to undirty
4263 * but that would make the locking messier
4264 */
4265 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off,
4266 len, commit, NULL, NULL);
4267
4268 zfs_vmobject_wlock(object);
4269 for (i = 0; i < ncount; i++) {
4270 rtvals[i] = zfs_vm_pagerret_ok;
4271 vm_page_undirty(ma[i]);
4272 }
4273 zfs_vmobject_wunlock(object);
4274 VM_CNT_INC(v_vnodeout);
4275 VM_CNT_ADD(v_vnodepgsout, ncount);
4276 }
4277 dmu_tx_commit(tx);
4278
4279 out:
4280 zfs_rangelock_exit(lr);
4281 if (commit)
4282 zil_commit(zfsvfs->z_log, zp->z_id);
4283
4284 dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, len);
4285
4286 zfs_exit(zfsvfs, FTAG);
4287 return (rtvals[0]);
4288 }
4289
4290 #ifndef _SYS_SYSPROTO_H_
4291 struct vop_putpages_args {
4292 struct vnode *a_vp;
4293 vm_page_t *a_m;
4294 int a_count;
4295 int a_sync;
4296 int *a_rtvals;
4297 };
4298 #endif
4299
4300 static int
4301 zfs_freebsd_putpages(struct vop_putpages_args *ap)
4302 {
4303
4304 return (zfs_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync,
4305 ap->a_rtvals));
4306 }
4307
4308 #ifndef _SYS_SYSPROTO_H_
4309 struct vop_bmap_args {
4310 struct vnode *a_vp;
4311 daddr_t a_bn;
4312 struct bufobj **a_bop;
4313 daddr_t *a_bnp;
4314 int *a_runp;
4315 int *a_runb;
4316 };
4317 #endif
4318
4319 static int
4320 zfs_freebsd_bmap(struct vop_bmap_args *ap)
4321 {
4322
4323 if (ap->a_bop != NULL)
4324 *ap->a_bop = &ap->a_vp->v_bufobj;
4325 if (ap->a_bnp != NULL)
4326 *ap->a_bnp = ap->a_bn;
4327 if (ap->a_runp != NULL)
4328 *ap->a_runp = 0;
4329 if (ap->a_runb != NULL)
4330 *ap->a_runb = 0;
4331
4332 return (0);
4333 }
4334
4335 #ifndef _SYS_SYSPROTO_H_
4336 struct vop_open_args {
4337 struct vnode *a_vp;
4338 int a_mode;
4339 struct ucred *a_cred;
4340 struct thread *a_td;
4341 };
4342 #endif
4343
4344 static int
4345 zfs_freebsd_open(struct vop_open_args *ap)
4346 {
4347 vnode_t *vp = ap->a_vp;
4348 znode_t *zp = VTOZ(vp);
4349 int error;
4350
4351 error = zfs_open(&vp, ap->a_mode, ap->a_cred);
4352 if (error == 0)
4353 vnode_create_vobject(vp, zp->z_size, ap->a_td);
4354 return (error);
4355 }
4356
4357 #ifndef _SYS_SYSPROTO_H_
4358 struct vop_close_args {
4359 struct vnode *a_vp;
4360 int a_fflag;
4361 struct ucred *a_cred;
4362 struct thread *a_td;
4363 };
4364 #endif
4365
4366 static int
4367 zfs_freebsd_close(struct vop_close_args *ap)
4368 {
4369
4370 return (zfs_close(ap->a_vp, ap->a_fflag, 1, 0, ap->a_cred));
4371 }
4372
4373 #ifndef _SYS_SYSPROTO_H_
4374 struct vop_ioctl_args {
4375 struct vnode *a_vp;
4376 ulong_t a_command;
4377 caddr_t a_data;
4378 int a_fflag;
4379 struct ucred *cred;
4380 struct thread *td;
4381 };
4382 #endif
4383
4384 static int
4385 zfs_freebsd_ioctl(struct vop_ioctl_args *ap)
4386 {
4387
4388 return (zfs_ioctl(ap->a_vp, ap->a_command, (intptr_t)ap->a_data,
4389 ap->a_fflag, ap->a_cred, NULL));
4390 }
4391
4392 static int
4393 ioflags(int ioflags)
4394 {
4395 int flags = 0;
4396
4397 if (ioflags & IO_APPEND)
4398 flags |= O_APPEND;
4399 if (ioflags & IO_NDELAY)
4400 flags |= O_NONBLOCK;
4401 if (ioflags & IO_SYNC)
4402 flags |= O_SYNC;
4403
4404 return (flags);
4405 }
4406
4407 #ifndef _SYS_SYSPROTO_H_
4408 struct vop_read_args {
4409 struct vnode *a_vp;
4410 struct uio *a_uio;
4411 int a_ioflag;
4412 struct ucred *a_cred;
4413 };
4414 #endif
4415
4416 static int
4417 zfs_freebsd_read(struct vop_read_args *ap)
4418 {
4419 zfs_uio_t uio;
4420 zfs_uio_init(&uio, ap->a_uio);
4421 return (zfs_read(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag),
4422 ap->a_cred));
4423 }
4424
4425 #ifndef _SYS_SYSPROTO_H_
4426 struct vop_write_args {
4427 struct vnode *a_vp;
4428 struct uio *a_uio;
4429 int a_ioflag;
4430 struct ucred *a_cred;
4431 };
4432 #endif
4433
4434 static int
4435 zfs_freebsd_write(struct vop_write_args *ap)
4436 {
4437 zfs_uio_t uio;
4438 zfs_uio_init(&uio, ap->a_uio);
4439 return (zfs_write(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag),
4440 ap->a_cred));
4441 }
4442
4443 #if __FreeBSD_version >= 1300102
4444 /*
4445 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
4446 * the comment above cache_fplookup for details.
4447 */
4448 static int
4449 zfs_freebsd_fplookup_vexec(struct vop_fplookup_vexec_args *v)
4450 {
4451 vnode_t *vp;
4452 znode_t *zp;
4453 uint64_t pflags;
4454
4455 vp = v->a_vp;
4456 zp = VTOZ_SMR(vp);
4457 if (__predict_false(zp == NULL))
4458 return (EAGAIN);
4459 pflags = atomic_load_64(&zp->z_pflags);
4460 if (pflags & ZFS_AV_QUARANTINED)
4461 return (EAGAIN);
4462 if (pflags & ZFS_XATTR)
4463 return (EAGAIN);
4464 if ((pflags & ZFS_NO_EXECS_DENIED) == 0)
4465 return (EAGAIN);
4466 return (0);
4467 }
4468 #endif
4469
4470 #if __FreeBSD_version >= 1300139
4471 static int
4472 zfs_freebsd_fplookup_symlink(struct vop_fplookup_symlink_args *v)
4473 {
4474 vnode_t *vp;
4475 znode_t *zp;
4476 char *target;
4477
4478 vp = v->a_vp;
4479 zp = VTOZ_SMR(vp);
4480 if (__predict_false(zp == NULL)) {
4481 return (EAGAIN);
4482 }
4483
4484 target = atomic_load_consume_ptr(&zp->z_cached_symlink);
4485 if (target == NULL) {
4486 return (EAGAIN);
4487 }
4488 return (cache_symlink_resolve(v->a_fpl, target, strlen(target)));
4489 }
4490 #endif
4491
4492 #ifndef _SYS_SYSPROTO_H_
4493 struct vop_access_args {
4494 struct vnode *a_vp;
4495 accmode_t a_accmode;
4496 struct ucred *a_cred;
4497 struct thread *a_td;
4498 };
4499 #endif
4500
4501 static int
4502 zfs_freebsd_access(struct vop_access_args *ap)
4503 {
4504 vnode_t *vp = ap->a_vp;
4505 znode_t *zp = VTOZ(vp);
4506 accmode_t accmode;
4507 int error = 0;
4508
4509
4510 if (ap->a_accmode == VEXEC) {
4511 if (zfs_fastaccesschk_execute(zp, ap->a_cred) == 0)
4512 return (0);
4513 }
4514
4515 /*
4516 * ZFS itself only knowns about VREAD, VWRITE, VEXEC and VAPPEND,
4517 */
4518 accmode = ap->a_accmode & (VREAD|VWRITE|VEXEC|VAPPEND);
4519 if (accmode != 0)
4520 error = zfs_access(zp, accmode, 0, ap->a_cred);
4521
4522 /*
4523 * VADMIN has to be handled by vaccess().
4524 */
4525 if (error == 0) {
4526 accmode = ap->a_accmode & ~(VREAD|VWRITE|VEXEC|VAPPEND);
4527 if (accmode != 0) {
4528 #if __FreeBSD_version >= 1300105
4529 error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
4530 zp->z_gid, accmode, ap->a_cred);
4531 #else
4532 error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
4533 zp->z_gid, accmode, ap->a_cred, NULL);
4534 #endif
4535 }
4536 }
4537
4538 /*
4539 * For VEXEC, ensure that at least one execute bit is set for
4540 * non-directories.
4541 */
4542 if (error == 0 && (ap->a_accmode & VEXEC) != 0 && vp->v_type != VDIR &&
4543 (zp->z_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
4544 error = EACCES;
4545 }
4546
4547 return (error);
4548 }
4549
4550 #ifndef _SYS_SYSPROTO_H_
4551 struct vop_lookup_args {
4552 struct vnode *a_dvp;
4553 struct vnode **a_vpp;
4554 struct componentname *a_cnp;
4555 };
4556 #endif
4557
4558 static int
4559 zfs_freebsd_lookup(struct vop_lookup_args *ap, boolean_t cached)
4560 {
4561 struct componentname *cnp = ap->a_cnp;
4562 char nm[NAME_MAX + 1];
4563
4564 ASSERT3U(cnp->cn_namelen, <, sizeof (nm));
4565 strlcpy(nm, cnp->cn_nameptr, MIN(cnp->cn_namelen + 1, sizeof (nm)));
4566
4567 return (zfs_lookup(ap->a_dvp, nm, ap->a_vpp, cnp, cnp->cn_nameiop,
4568 cnp->cn_cred, 0, cached));
4569 }
4570
4571 static int
4572 zfs_freebsd_cachedlookup(struct vop_cachedlookup_args *ap)
4573 {
4574
4575 return (zfs_freebsd_lookup((struct vop_lookup_args *)ap, B_TRUE));
4576 }
4577
4578 #ifndef _SYS_SYSPROTO_H_
4579 struct vop_lookup_args {
4580 struct vnode *a_dvp;
4581 struct vnode **a_vpp;
4582 struct componentname *a_cnp;
4583 };
4584 #endif
4585
4586 static int
4587 zfs_cache_lookup(struct vop_lookup_args *ap)
4588 {
4589 zfsvfs_t *zfsvfs;
4590
4591 zfsvfs = ap->a_dvp->v_mount->mnt_data;
4592 if (zfsvfs->z_use_namecache)
4593 return (vfs_cache_lookup(ap));
4594 else
4595 return (zfs_freebsd_lookup(ap, B_FALSE));
4596 }
4597
4598 #ifndef _SYS_SYSPROTO_H_
4599 struct vop_create_args {
4600 struct vnode *a_dvp;
4601 struct vnode **a_vpp;
4602 struct componentname *a_cnp;
4603 struct vattr *a_vap;
4604 };
4605 #endif
4606
4607 static int
4608 zfs_freebsd_create(struct vop_create_args *ap)
4609 {
4610 zfsvfs_t *zfsvfs;
4611 struct componentname *cnp = ap->a_cnp;
4612 vattr_t *vap = ap->a_vap;
4613 znode_t *zp = NULL;
4614 int rc, mode;
4615
4616 #if __FreeBSD_version < 1400068
4617 ASSERT(cnp->cn_flags & SAVENAME);
4618 #endif
4619
4620 vattr_init_mask(vap);
4621 mode = vap->va_mode & ALLPERMS;
4622 zfsvfs = ap->a_dvp->v_mount->mnt_data;
4623 *ap->a_vpp = NULL;
4624
4625 rc = zfs_create(VTOZ(ap->a_dvp), cnp->cn_nameptr, vap, 0, mode,
4626 &zp, cnp->cn_cred, 0 /* flag */, NULL /* vsecattr */, NULL);
4627 if (rc == 0)
4628 *ap->a_vpp = ZTOV(zp);
4629 if (zfsvfs->z_use_namecache &&
4630 rc == 0 && (cnp->cn_flags & MAKEENTRY) != 0)
4631 cache_enter(ap->a_dvp, *ap->a_vpp, cnp);
4632
4633 return (rc);
4634 }
4635
4636 #ifndef _SYS_SYSPROTO_H_
4637 struct vop_remove_args {
4638 struct vnode *a_dvp;
4639 struct vnode *a_vp;
4640 struct componentname *a_cnp;
4641 };
4642 #endif
4643
4644 static int
4645 zfs_freebsd_remove(struct vop_remove_args *ap)
4646 {
4647
4648 #if __FreeBSD_version < 1400068
4649 ASSERT(ap->a_cnp->cn_flags & SAVENAME);
4650 #endif
4651
4652 return (zfs_remove_(ap->a_dvp, ap->a_vp, ap->a_cnp->cn_nameptr,
4653 ap->a_cnp->cn_cred));
4654 }
4655
4656 #ifndef _SYS_SYSPROTO_H_
4657 struct vop_mkdir_args {
4658 struct vnode *a_dvp;
4659 struct vnode **a_vpp;
4660 struct componentname *a_cnp;
4661 struct vattr *a_vap;
4662 };
4663 #endif
4664
4665 static int
4666 zfs_freebsd_mkdir(struct vop_mkdir_args *ap)
4667 {
4668 vattr_t *vap = ap->a_vap;
4669 znode_t *zp = NULL;
4670 int rc;
4671
4672 #if __FreeBSD_version < 1400068
4673 ASSERT(ap->a_cnp->cn_flags & SAVENAME);
4674 #endif
4675
4676 vattr_init_mask(vap);
4677 *ap->a_vpp = NULL;
4678
4679 rc = zfs_mkdir(VTOZ(ap->a_dvp), ap->a_cnp->cn_nameptr, vap, &zp,
4680 ap->a_cnp->cn_cred, 0, NULL, NULL);
4681
4682 if (rc == 0)
4683 *ap->a_vpp = ZTOV(zp);
4684 return (rc);
4685 }
4686
4687 #ifndef _SYS_SYSPROTO_H_
4688 struct vop_rmdir_args {
4689 struct vnode *a_dvp;
4690 struct vnode *a_vp;
4691 struct componentname *a_cnp;
4692 };
4693 #endif
4694
4695 static int
4696 zfs_freebsd_rmdir(struct vop_rmdir_args *ap)
4697 {
4698 struct componentname *cnp = ap->a_cnp;
4699
4700 #if __FreeBSD_version < 1400068
4701 ASSERT(cnp->cn_flags & SAVENAME);
4702 #endif
4703
4704 return (zfs_rmdir_(ap->a_dvp, ap->a_vp, cnp->cn_nameptr, cnp->cn_cred));
4705 }
4706
4707 #ifndef _SYS_SYSPROTO_H_
4708 struct vop_readdir_args {
4709 struct vnode *a_vp;
4710 struct uio *a_uio;
4711 struct ucred *a_cred;
4712 int *a_eofflag;
4713 int *a_ncookies;
4714 cookie_t **a_cookies;
4715 };
4716 #endif
4717
4718 static int
4719 zfs_freebsd_readdir(struct vop_readdir_args *ap)
4720 {
4721 zfs_uio_t uio;
4722 zfs_uio_init(&uio, ap->a_uio);
4723 return (zfs_readdir(ap->a_vp, &uio, ap->a_cred, ap->a_eofflag,
4724 ap->a_ncookies, ap->a_cookies));
4725 }
4726
4727 #ifndef _SYS_SYSPROTO_H_
4728 struct vop_fsync_args {
4729 struct vnode *a_vp;
4730 int a_waitfor;
4731 struct thread *a_td;
4732 };
4733 #endif
4734
4735 static int
4736 zfs_freebsd_fsync(struct vop_fsync_args *ap)
4737 {
4738
4739 return (zfs_fsync(VTOZ(ap->a_vp), 0, ap->a_td->td_ucred));
4740 }
4741
4742 #ifndef _SYS_SYSPROTO_H_
4743 struct vop_getattr_args {
4744 struct vnode *a_vp;
4745 struct vattr *a_vap;
4746 struct ucred *a_cred;
4747 };
4748 #endif
4749
4750 static int
4751 zfs_freebsd_getattr(struct vop_getattr_args *ap)
4752 {
4753 vattr_t *vap = ap->a_vap;
4754 xvattr_t xvap;
4755 ulong_t fflags = 0;
4756 int error;
4757
4758 xva_init(&xvap);
4759 xvap.xva_vattr = *vap;
4760 xvap.xva_vattr.va_mask |= AT_XVATTR;
4761
4762 /* Convert chflags into ZFS-type flags. */
4763 /* XXX: what about SF_SETTABLE?. */
4764 XVA_SET_REQ(&xvap, XAT_IMMUTABLE);
4765 XVA_SET_REQ(&xvap, XAT_APPENDONLY);
4766 XVA_SET_REQ(&xvap, XAT_NOUNLINK);
4767 XVA_SET_REQ(&xvap, XAT_NODUMP);
4768 XVA_SET_REQ(&xvap, XAT_READONLY);
4769 XVA_SET_REQ(&xvap, XAT_ARCHIVE);
4770 XVA_SET_REQ(&xvap, XAT_SYSTEM);
4771 XVA_SET_REQ(&xvap, XAT_HIDDEN);
4772 XVA_SET_REQ(&xvap, XAT_REPARSE);
4773 XVA_SET_REQ(&xvap, XAT_OFFLINE);
4774 XVA_SET_REQ(&xvap, XAT_SPARSE);
4775
4776 error = zfs_getattr(ap->a_vp, (vattr_t *)&xvap, 0, ap->a_cred);
4777 if (error != 0)
4778 return (error);
4779
4780 /* Convert ZFS xattr into chflags. */
4781 #define FLAG_CHECK(fflag, xflag, xfield) do { \
4782 if (XVA_ISSET_RTN(&xvap, (xflag)) && (xfield) != 0) \
4783 fflags |= (fflag); \
4784 } while (0)
4785 FLAG_CHECK(SF_IMMUTABLE, XAT_IMMUTABLE,
4786 xvap.xva_xoptattrs.xoa_immutable);
4787 FLAG_CHECK(SF_APPEND, XAT_APPENDONLY,
4788 xvap.xva_xoptattrs.xoa_appendonly);
4789 FLAG_CHECK(SF_NOUNLINK, XAT_NOUNLINK,
4790 xvap.xva_xoptattrs.xoa_nounlink);
4791 FLAG_CHECK(UF_ARCHIVE, XAT_ARCHIVE,
4792 xvap.xva_xoptattrs.xoa_archive);
4793 FLAG_CHECK(UF_NODUMP, XAT_NODUMP,
4794 xvap.xva_xoptattrs.xoa_nodump);
4795 FLAG_CHECK(UF_READONLY, XAT_READONLY,
4796 xvap.xva_xoptattrs.xoa_readonly);
4797 FLAG_CHECK(UF_SYSTEM, XAT_SYSTEM,
4798 xvap.xva_xoptattrs.xoa_system);
4799 FLAG_CHECK(UF_HIDDEN, XAT_HIDDEN,
4800 xvap.xva_xoptattrs.xoa_hidden);
4801 FLAG_CHECK(UF_REPARSE, XAT_REPARSE,
4802 xvap.xva_xoptattrs.xoa_reparse);
4803 FLAG_CHECK(UF_OFFLINE, XAT_OFFLINE,
4804 xvap.xva_xoptattrs.xoa_offline);
4805 FLAG_CHECK(UF_SPARSE, XAT_SPARSE,
4806 xvap.xva_xoptattrs.xoa_sparse);
4807
4808 #undef FLAG_CHECK
4809 *vap = xvap.xva_vattr;
4810 vap->va_flags = fflags;
4811 return (0);
4812 }
4813
4814 #ifndef _SYS_SYSPROTO_H_
4815 struct vop_setattr_args {
4816 struct vnode *a_vp;
4817 struct vattr *a_vap;
4818 struct ucred *a_cred;
4819 };
4820 #endif
4821
4822 static int
4823 zfs_freebsd_setattr(struct vop_setattr_args *ap)
4824 {
4825 vnode_t *vp = ap->a_vp;
4826 vattr_t *vap = ap->a_vap;
4827 cred_t *cred = ap->a_cred;
4828 xvattr_t xvap;
4829 ulong_t fflags;
4830 uint64_t zflags;
4831
4832 vattr_init_mask(vap);
4833 vap->va_mask &= ~AT_NOSET;
4834
4835 xva_init(&xvap);
4836 xvap.xva_vattr = *vap;
4837
4838 zflags = VTOZ(vp)->z_pflags;
4839
4840 if (vap->va_flags != VNOVAL) {
4841 zfsvfs_t *zfsvfs = VTOZ(vp)->z_zfsvfs;
4842 int error;
4843
4844 if (zfsvfs->z_use_fuids == B_FALSE)
4845 return (EOPNOTSUPP);
4846
4847 fflags = vap->va_flags;
4848 /*
4849 * XXX KDM
4850 * We need to figure out whether it makes sense to allow
4851 * UF_REPARSE through, since we don't really have other
4852 * facilities to handle reparse points and zfs_setattr()
4853 * doesn't currently allow setting that attribute anyway.
4854 */
4855 if ((fflags & ~(SF_IMMUTABLE|SF_APPEND|SF_NOUNLINK|UF_ARCHIVE|
4856 UF_NODUMP|UF_SYSTEM|UF_HIDDEN|UF_READONLY|UF_REPARSE|
4857 UF_OFFLINE|UF_SPARSE)) != 0)
4858 return (EOPNOTSUPP);
4859 /*
4860 * Unprivileged processes are not permitted to unset system
4861 * flags, or modify flags if any system flags are set.
4862 * Privileged non-jail processes may not modify system flags
4863 * if securelevel > 0 and any existing system flags are set.
4864 * Privileged jail processes behave like privileged non-jail
4865 * processes if the PR_ALLOW_CHFLAGS permission bit is set;
4866 * otherwise, they behave like unprivileged processes.
4867 */
4868 if (secpolicy_fs_owner(vp->v_mount, cred) == 0 ||
4869 spl_priv_check_cred(cred, PRIV_VFS_SYSFLAGS) == 0) {
4870 if (zflags &
4871 (ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
4872 error = securelevel_gt(cred, 0);
4873 if (error != 0)
4874 return (error);
4875 }
4876 } else {
4877 /*
4878 * Callers may only modify the file flags on
4879 * objects they have VADMIN rights for.
4880 */
4881 if ((error = VOP_ACCESS(vp, VADMIN, cred,
4882 curthread)) != 0)
4883 return (error);
4884 if (zflags &
4885 (ZFS_IMMUTABLE | ZFS_APPENDONLY |
4886 ZFS_NOUNLINK)) {
4887 return (EPERM);
4888 }
4889 if (fflags &
4890 (SF_IMMUTABLE | SF_APPEND | SF_NOUNLINK)) {
4891 return (EPERM);
4892 }
4893 }
4894
4895 #define FLAG_CHANGE(fflag, zflag, xflag, xfield) do { \
4896 if (((fflags & (fflag)) && !(zflags & (zflag))) || \
4897 ((zflags & (zflag)) && !(fflags & (fflag)))) { \
4898 XVA_SET_REQ(&xvap, (xflag)); \
4899 (xfield) = ((fflags & (fflag)) != 0); \
4900 } \
4901 } while (0)
4902 /* Convert chflags into ZFS-type flags. */
4903 /* XXX: what about SF_SETTABLE?. */
4904 FLAG_CHANGE(SF_IMMUTABLE, ZFS_IMMUTABLE, XAT_IMMUTABLE,
4905 xvap.xva_xoptattrs.xoa_immutable);
4906 FLAG_CHANGE(SF_APPEND, ZFS_APPENDONLY, XAT_APPENDONLY,
4907 xvap.xva_xoptattrs.xoa_appendonly);
4908 FLAG_CHANGE(SF_NOUNLINK, ZFS_NOUNLINK, XAT_NOUNLINK,
4909 xvap.xva_xoptattrs.xoa_nounlink);
4910 FLAG_CHANGE(UF_ARCHIVE, ZFS_ARCHIVE, XAT_ARCHIVE,
4911 xvap.xva_xoptattrs.xoa_archive);
4912 FLAG_CHANGE(UF_NODUMP, ZFS_NODUMP, XAT_NODUMP,
4913 xvap.xva_xoptattrs.xoa_nodump);
4914 FLAG_CHANGE(UF_READONLY, ZFS_READONLY, XAT_READONLY,
4915 xvap.xva_xoptattrs.xoa_readonly);
4916 FLAG_CHANGE(UF_SYSTEM, ZFS_SYSTEM, XAT_SYSTEM,
4917 xvap.xva_xoptattrs.xoa_system);
4918 FLAG_CHANGE(UF_HIDDEN, ZFS_HIDDEN, XAT_HIDDEN,
4919 xvap.xva_xoptattrs.xoa_hidden);
4920 FLAG_CHANGE(UF_REPARSE, ZFS_REPARSE, XAT_REPARSE,
4921 xvap.xva_xoptattrs.xoa_reparse);
4922 FLAG_CHANGE(UF_OFFLINE, ZFS_OFFLINE, XAT_OFFLINE,
4923 xvap.xva_xoptattrs.xoa_offline);
4924 FLAG_CHANGE(UF_SPARSE, ZFS_SPARSE, XAT_SPARSE,
4925 xvap.xva_xoptattrs.xoa_sparse);
4926 #undef FLAG_CHANGE
4927 }
4928 if (vap->va_birthtime.tv_sec != VNOVAL) {
4929 xvap.xva_vattr.va_mask |= AT_XVATTR;
4930 XVA_SET_REQ(&xvap, XAT_CREATETIME);
4931 }
4932 return (zfs_setattr(VTOZ(vp), (vattr_t *)&xvap, 0, cred, NULL));
4933 }
4934
4935 #ifndef _SYS_SYSPROTO_H_
4936 struct vop_rename_args {
4937 struct vnode *a_fdvp;
4938 struct vnode *a_fvp;
4939 struct componentname *a_fcnp;
4940 struct vnode *a_tdvp;
4941 struct vnode *a_tvp;
4942 struct componentname *a_tcnp;
4943 };
4944 #endif
4945
4946 static int
4947 zfs_freebsd_rename(struct vop_rename_args *ap)
4948 {
4949 vnode_t *fdvp = ap->a_fdvp;
4950 vnode_t *fvp = ap->a_fvp;
4951 vnode_t *tdvp = ap->a_tdvp;
4952 vnode_t *tvp = ap->a_tvp;
4953 int error;
4954
4955 #if __FreeBSD_version < 1400068
4956 ASSERT(ap->a_fcnp->cn_flags & (SAVENAME|SAVESTART));
4957 ASSERT(ap->a_tcnp->cn_flags & (SAVENAME|SAVESTART));
4958 #endif
4959
4960 error = zfs_do_rename(fdvp, &fvp, ap->a_fcnp, tdvp, &tvp,
4961 ap->a_tcnp, ap->a_fcnp->cn_cred);
4962
4963 vrele(fdvp);
4964 vrele(fvp);
4965 vrele(tdvp);
4966 if (tvp != NULL)
4967 vrele(tvp);
4968
4969 return (error);
4970 }
4971
4972 #ifndef _SYS_SYSPROTO_H_
4973 struct vop_symlink_args {
4974 struct vnode *a_dvp;
4975 struct vnode **a_vpp;
4976 struct componentname *a_cnp;
4977 struct vattr *a_vap;
4978 char *a_target;
4979 };
4980 #endif
4981
4982 static int
4983 zfs_freebsd_symlink(struct vop_symlink_args *ap)
4984 {
4985 struct componentname *cnp = ap->a_cnp;
4986 vattr_t *vap = ap->a_vap;
4987 znode_t *zp = NULL;
4988 #if __FreeBSD_version >= 1300139
4989 char *symlink;
4990 size_t symlink_len;
4991 #endif
4992 int rc;
4993
4994 #if __FreeBSD_version < 1400068
4995 ASSERT(cnp->cn_flags & SAVENAME);
4996 #endif
4997
4998 vap->va_type = VLNK; /* FreeBSD: Syscall only sets va_mode. */
4999 vattr_init_mask(vap);
5000 *ap->a_vpp = NULL;
5001
5002 rc = zfs_symlink(VTOZ(ap->a_dvp), cnp->cn_nameptr, vap,
5003 ap->a_target, &zp, cnp->cn_cred, 0 /* flags */, NULL);
5004 if (rc == 0) {
5005 *ap->a_vpp = ZTOV(zp);
5006 ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
5007 #if __FreeBSD_version >= 1300139
5008 MPASS(zp->z_cached_symlink == NULL);
5009 symlink_len = strlen(ap->a_target);
5010 symlink = cache_symlink_alloc(symlink_len + 1, M_WAITOK);
5011 if (symlink != NULL) {
5012 memcpy(symlink, ap->a_target, symlink_len);
5013 symlink[symlink_len] = '\0';
5014 atomic_store_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
5015 (uintptr_t)symlink);
5016 }
5017 #endif
5018 }
5019 return (rc);
5020 }
5021
5022 #ifndef _SYS_SYSPROTO_H_
5023 struct vop_readlink_args {
5024 struct vnode *a_vp;
5025 struct uio *a_uio;
5026 struct ucred *a_cred;
5027 };
5028 #endif
5029
5030 static int
5031 zfs_freebsd_readlink(struct vop_readlink_args *ap)
5032 {
5033 zfs_uio_t uio;
5034 int error;
5035 #if __FreeBSD_version >= 1300139
5036 znode_t *zp = VTOZ(ap->a_vp);
5037 char *symlink, *base;
5038 size_t symlink_len;
5039 bool trycache;
5040 #endif
5041
5042 zfs_uio_init(&uio, ap->a_uio);
5043 #if __FreeBSD_version >= 1300139
5044 trycache = false;
5045 if (zfs_uio_segflg(&uio) == UIO_SYSSPACE &&
5046 zfs_uio_iovcnt(&uio) == 1) {
5047 base = zfs_uio_iovbase(&uio, 0);
5048 symlink_len = zfs_uio_iovlen(&uio, 0);
5049 trycache = true;
5050 }
5051 #endif
5052 error = zfs_readlink(ap->a_vp, &uio, ap->a_cred, NULL);
5053 #if __FreeBSD_version >= 1300139
5054 if (atomic_load_ptr(&zp->z_cached_symlink) != NULL ||
5055 error != 0 || !trycache) {
5056 return (error);
5057 }
5058 symlink_len -= zfs_uio_resid(&uio);
5059 symlink = cache_symlink_alloc(symlink_len + 1, M_WAITOK);
5060 if (symlink != NULL) {
5061 memcpy(symlink, base, symlink_len);
5062 symlink[symlink_len] = '\0';
5063 if (!atomic_cmpset_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
5064 (uintptr_t)NULL, (uintptr_t)symlink)) {
5065 cache_symlink_free(symlink, symlink_len + 1);
5066 }
5067 }
5068 #endif
5069 return (error);
5070 }
5071
5072 #ifndef _SYS_SYSPROTO_H_
5073 struct vop_link_args {
5074 struct vnode *a_tdvp;
5075 struct vnode *a_vp;
5076 struct componentname *a_cnp;
5077 };
5078 #endif
5079
5080 static int
5081 zfs_freebsd_link(struct vop_link_args *ap)
5082 {
5083 struct componentname *cnp = ap->a_cnp;
5084 vnode_t *vp = ap->a_vp;
5085 vnode_t *tdvp = ap->a_tdvp;
5086
5087 if (tdvp->v_mount != vp->v_mount)
5088 return (EXDEV);
5089
5090 #if __FreeBSD_version < 1400068
5091 ASSERT(cnp->cn_flags & SAVENAME);
5092 #endif
5093
5094 return (zfs_link(VTOZ(tdvp), VTOZ(vp),
5095 cnp->cn_nameptr, cnp->cn_cred, 0));
5096 }
5097
5098 #ifndef _SYS_SYSPROTO_H_
5099 struct vop_inactive_args {
5100 struct vnode *a_vp;
5101 struct thread *a_td;
5102 };
5103 #endif
5104
5105 static int
5106 zfs_freebsd_inactive(struct vop_inactive_args *ap)
5107 {
5108 vnode_t *vp = ap->a_vp;
5109
5110 #if __FreeBSD_version >= 1300123
5111 zfs_inactive(vp, curthread->td_ucred, NULL);
5112 #else
5113 zfs_inactive(vp, ap->a_td->td_ucred, NULL);
5114 #endif
5115 return (0);
5116 }
5117
5118 #if __FreeBSD_version >= 1300042
5119 #ifndef _SYS_SYSPROTO_H_
5120 struct vop_need_inactive_args {
5121 struct vnode *a_vp;
5122 struct thread *a_td;
5123 };
5124 #endif
5125
5126 static int
5127 zfs_freebsd_need_inactive(struct vop_need_inactive_args *ap)
5128 {
5129 vnode_t *vp = ap->a_vp;
5130 znode_t *zp = VTOZ(vp);
5131 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5132 int need;
5133
5134 if (vn_need_pageq_flush(vp))
5135 return (1);
5136
5137 if (!ZFS_TEARDOWN_INACTIVE_TRY_ENTER_READ(zfsvfs))
5138 return (1);
5139 need = (zp->z_sa_hdl == NULL || zp->z_unlinked || zp->z_atime_dirty);
5140 ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
5141
5142 return (need);
5143 }
5144 #endif
5145
5146 #ifndef _SYS_SYSPROTO_H_
5147 struct vop_reclaim_args {
5148 struct vnode *a_vp;
5149 struct thread *a_td;
5150 };
5151 #endif
5152
5153 static int
5154 zfs_freebsd_reclaim(struct vop_reclaim_args *ap)
5155 {
5156 vnode_t *vp = ap->a_vp;
5157 znode_t *zp = VTOZ(vp);
5158 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5159
5160 ASSERT3P(zp, !=, NULL);
5161
5162 #if __FreeBSD_version < 1300042
5163 /* Destroy the vm object and flush associated pages. */
5164 vnode_destroy_vobject(vp);
5165 #endif
5166 /*
5167 * z_teardown_inactive_lock protects from a race with
5168 * zfs_znode_dmu_fini in zfsvfs_teardown during
5169 * force unmount.
5170 */
5171 ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs);
5172 if (zp->z_sa_hdl == NULL)
5173 zfs_znode_free(zp);
5174 else
5175 zfs_zinactive(zp);
5176 ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
5177
5178 vp->v_data = NULL;
5179 return (0);
5180 }
5181
5182 #ifndef _SYS_SYSPROTO_H_
5183 struct vop_fid_args {
5184 struct vnode *a_vp;
5185 struct fid *a_fid;
5186 };
5187 #endif
5188
5189 static int
5190 zfs_freebsd_fid(struct vop_fid_args *ap)
5191 {
5192
5193 return (zfs_fid(ap->a_vp, (void *)ap->a_fid, NULL));
5194 }
5195
5196
5197 #ifndef _SYS_SYSPROTO_H_
5198 struct vop_pathconf_args {
5199 struct vnode *a_vp;
5200 int a_name;
5201 register_t *a_retval;
5202 } *ap;
5203 #endif
5204
5205 static int
5206 zfs_freebsd_pathconf(struct vop_pathconf_args *ap)
5207 {
5208 ulong_t val;
5209 int error;
5210
5211 error = zfs_pathconf(ap->a_vp, ap->a_name, &val,
5212 curthread->td_ucred, NULL);
5213 if (error == 0) {
5214 *ap->a_retval = val;
5215 return (error);
5216 }
5217 if (error != EOPNOTSUPP)
5218 return (error);
5219
5220 switch (ap->a_name) {
5221 case _PC_NAME_MAX:
5222 *ap->a_retval = NAME_MAX;
5223 return (0);
5224 #if __FreeBSD_version >= 1400032
5225 case _PC_DEALLOC_PRESENT:
5226 *ap->a_retval = 1;
5227 return (0);
5228 #endif
5229 case _PC_PIPE_BUF:
5230 if (ap->a_vp->v_type == VDIR || ap->a_vp->v_type == VFIFO) {
5231 *ap->a_retval = PIPE_BUF;
5232 return (0);
5233 }
5234 return (EINVAL);
5235 default:
5236 return (vop_stdpathconf(ap));
5237 }
5238 }
5239
5240 static int zfs_xattr_compat = 1;
5241
5242 static int
5243 zfs_check_attrname(const char *name)
5244 {
5245 /* We don't allow '/' character in attribute name. */
5246 if (strchr(name, '/') != NULL)
5247 return (SET_ERROR(EINVAL));
5248 /* We don't allow attribute names that start with a namespace prefix. */
5249 if (ZFS_XA_NS_PREFIX_FORBIDDEN(name))
5250 return (SET_ERROR(EINVAL));
5251 return (0);
5252 }
5253
5254 /*
5255 * FreeBSD's extended attributes namespace defines file name prefix for ZFS'
5256 * extended attribute name:
5257 *
5258 * NAMESPACE XATTR_COMPAT PREFIX
5259 * system * freebsd:system:
5260 * user 1 (none, can be used to access ZFS
5261 * fsattr(5) attributes created on Solaris)
5262 * user 0 user.
5263 */
5264 static int
5265 zfs_create_attrname(int attrnamespace, const char *name, char *attrname,
5266 size_t size, boolean_t compat)
5267 {
5268 const char *namespace, *prefix, *suffix;
5269
5270 memset(attrname, 0, size);
5271
5272 switch (attrnamespace) {
5273 case EXTATTR_NAMESPACE_USER:
5274 if (compat) {
5275 /*
5276 * This is the default namespace by which we can access
5277 * all attributes created on Solaris.
5278 */
5279 prefix = namespace = suffix = "";
5280 } else {
5281 /*
5282 * This is compatible with the user namespace encoding
5283 * on Linux prior to xattr_compat, but nothing
5284 * else.
5285 */
5286 prefix = "";
5287 namespace = "user";
5288 suffix = ".";
5289 }
5290 break;
5291 case EXTATTR_NAMESPACE_SYSTEM:
5292 prefix = "freebsd:";
5293 namespace = EXTATTR_NAMESPACE_SYSTEM_STRING;
5294 suffix = ":";
5295 break;
5296 case EXTATTR_NAMESPACE_EMPTY:
5297 default:
5298 return (SET_ERROR(EINVAL));
5299 }
5300 if (snprintf(attrname, size, "%s%s%s%s", prefix, namespace, suffix,
5301 name) >= size) {
5302 return (SET_ERROR(ENAMETOOLONG));
5303 }
5304 return (0);
5305 }
5306
5307 static int
5308 zfs_ensure_xattr_cached(znode_t *zp)
5309 {
5310 int error = 0;
5311
5312 ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
5313
5314 if (zp->z_xattr_cached != NULL)
5315 return (0);
5316
5317 if (rw_write_held(&zp->z_xattr_lock))
5318 return (zfs_sa_get_xattr(zp));
5319
5320 if (!rw_tryupgrade(&zp->z_xattr_lock)) {
5321 rw_exit(&zp->z_xattr_lock);
5322 rw_enter(&zp->z_xattr_lock, RW_WRITER);
5323 }
5324 if (zp->z_xattr_cached == NULL)
5325 error = zfs_sa_get_xattr(zp);
5326 rw_downgrade(&zp->z_xattr_lock);
5327 return (error);
5328 }
5329
5330 #ifndef _SYS_SYSPROTO_H_
5331 struct vop_getextattr {
5332 IN struct vnode *a_vp;
5333 IN int a_attrnamespace;
5334 IN const char *a_name;
5335 INOUT struct uio *a_uio;
5336 OUT size_t *a_size;
5337 IN struct ucred *a_cred;
5338 IN struct thread *a_td;
5339 };
5340 #endif
5341
5342 static int
5343 zfs_getextattr_dir(struct vop_getextattr_args *ap, const char *attrname)
5344 {
5345 struct thread *td = ap->a_td;
5346 struct nameidata nd;
5347 struct vattr va;
5348 vnode_t *xvp = NULL, *vp;
5349 int error, flags;
5350
5351 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
5352 LOOKUP_XATTR, B_FALSE);
5353 if (error != 0)
5354 return (error);
5355
5356 flags = FREAD;
5357 #if __FreeBSD_version < 1400043
5358 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname,
5359 xvp, td);
5360 #else
5361 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp);
5362 #endif
5363 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_INVFS, ap->a_cred, NULL);
5364 if (error != 0)
5365 return (SET_ERROR(error));
5366 vp = nd.ni_vp;
5367 NDFREE_PNBUF(&nd);
5368
5369 if (ap->a_size != NULL) {
5370 error = VOP_GETATTR(vp, &va, ap->a_cred);
5371 if (error == 0)
5372 *ap->a_size = (size_t)va.va_size;
5373 } else if (ap->a_uio != NULL)
5374 error = VOP_READ(vp, ap->a_uio, IO_UNIT, ap->a_cred);
5375
5376 VOP_UNLOCK1(vp);
5377 vn_close(vp, flags, ap->a_cred, td);
5378 return (error);
5379 }
5380
5381 static int
5382 zfs_getextattr_sa(struct vop_getextattr_args *ap, const char *attrname)
5383 {
5384 znode_t *zp = VTOZ(ap->a_vp);
5385 uchar_t *nv_value;
5386 uint_t nv_size;
5387 int error;
5388
5389 error = zfs_ensure_xattr_cached(zp);
5390 if (error != 0)
5391 return (error);
5392
5393 ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
5394 ASSERT3P(zp->z_xattr_cached, !=, NULL);
5395
5396 error = nvlist_lookup_byte_array(zp->z_xattr_cached, attrname,
5397 &nv_value, &nv_size);
5398 if (error != 0)
5399 return (SET_ERROR(error));
5400
5401 if (ap->a_size != NULL)
5402 *ap->a_size = nv_size;
5403 else if (ap->a_uio != NULL)
5404 error = uiomove(nv_value, nv_size, ap->a_uio);
5405 if (error != 0)
5406 return (SET_ERROR(error));
5407
5408 return (0);
5409 }
5410
5411 static int
5412 zfs_getextattr_impl(struct vop_getextattr_args *ap, boolean_t compat)
5413 {
5414 znode_t *zp = VTOZ(ap->a_vp);
5415 zfsvfs_t *zfsvfs = ZTOZSB(zp);
5416 char attrname[EXTATTR_MAXNAMELEN+1];
5417 int error;
5418
5419 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
5420 sizeof (attrname), compat);
5421 if (error != 0)
5422 return (error);
5423
5424 error = ENOENT;
5425 if (zfsvfs->z_use_sa && zp->z_is_sa)
5426 error = zfs_getextattr_sa(ap, attrname);
5427 if (error == ENOENT)
5428 error = zfs_getextattr_dir(ap, attrname);
5429 return (error);
5430 }
5431
5432 /*
5433 * Vnode operation to retrieve a named extended attribute.
5434 */
5435 static int
5436 zfs_getextattr(struct vop_getextattr_args *ap)
5437 {
5438 znode_t *zp = VTOZ(ap->a_vp);
5439 zfsvfs_t *zfsvfs = ZTOZSB(zp);
5440 int error;
5441
5442 /*
5443 * If the xattr property is off, refuse the request.
5444 */
5445 if (!(zfsvfs->z_flags & ZSB_XATTR))
5446 return (SET_ERROR(EOPNOTSUPP));
5447
5448 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
5449 ap->a_cred, ap->a_td, VREAD);
5450 if (error != 0)
5451 return (SET_ERROR(error));
5452
5453 error = zfs_check_attrname(ap->a_name);
5454 if (error != 0)
5455 return (error);
5456
5457 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
5458 return (error);
5459 error = ENOENT;
5460 rw_enter(&zp->z_xattr_lock, RW_READER);
5461
5462 error = zfs_getextattr_impl(ap, zfs_xattr_compat);
5463 if ((error == ENOENT || error == ENOATTR) &&
5464 ap->a_attrnamespace == EXTATTR_NAMESPACE_USER) {
5465 /*
5466 * Fall back to the alternate namespace format if we failed to
5467 * find a user xattr.
5468 */
5469 error = zfs_getextattr_impl(ap, !zfs_xattr_compat);
5470 }
5471
5472 rw_exit(&zp->z_xattr_lock);
5473 zfs_exit(zfsvfs, FTAG);
5474 if (error == ENOENT)
5475 error = SET_ERROR(ENOATTR);
5476 return (error);
5477 }
5478
5479 #ifndef _SYS_SYSPROTO_H_
5480 struct vop_deleteextattr {
5481 IN struct vnode *a_vp;
5482 IN int a_attrnamespace;
5483 IN const char *a_name;
5484 IN struct ucred *a_cred;
5485 IN struct thread *a_td;
5486 };
5487 #endif
5488
5489 static int
5490 zfs_deleteextattr_dir(struct vop_deleteextattr_args *ap, const char *attrname)
5491 {
5492 struct nameidata nd;
5493 vnode_t *xvp = NULL, *vp;
5494 int error;
5495
5496 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
5497 LOOKUP_XATTR, B_FALSE);
5498 if (error != 0)
5499 return (error);
5500
5501 #if __FreeBSD_version < 1400043
5502 NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF,
5503 UIO_SYSSPACE, attrname, xvp, ap->a_td);
5504 #else
5505 NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF,
5506 UIO_SYSSPACE, attrname, xvp);
5507 #endif
5508 error = namei(&nd);
5509 if (error != 0)
5510 return (SET_ERROR(error));
5511
5512 vp = nd.ni_vp;
5513 error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
5514 NDFREE_PNBUF(&nd);
5515
5516 vput(nd.ni_dvp);
5517 if (vp == nd.ni_dvp)
5518 vrele(vp);
5519 else
5520 vput(vp);
5521
5522 return (error);
5523 }
5524
5525 static int
5526 zfs_deleteextattr_sa(struct vop_deleteextattr_args *ap, const char *attrname)
5527 {
5528 znode_t *zp = VTOZ(ap->a_vp);
5529 nvlist_t *nvl;
5530 int error;
5531
5532 error = zfs_ensure_xattr_cached(zp);
5533 if (error != 0)
5534 return (error);
5535
5536 ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
5537 ASSERT3P(zp->z_xattr_cached, !=, NULL);
5538
5539 nvl = zp->z_xattr_cached;
5540 error = nvlist_remove(nvl, attrname, DATA_TYPE_BYTE_ARRAY);
5541 if (error != 0)
5542 error = SET_ERROR(error);
5543 else
5544 error = zfs_sa_set_xattr(zp, attrname, NULL, 0);
5545 if (error != 0) {
5546 zp->z_xattr_cached = NULL;
5547 nvlist_free(nvl);
5548 }
5549 return (error);
5550 }
5551
5552 static int
5553 zfs_deleteextattr_impl(struct vop_deleteextattr_args *ap, boolean_t compat)
5554 {
5555 znode_t *zp = VTOZ(ap->a_vp);
5556 zfsvfs_t *zfsvfs = ZTOZSB(zp);
5557 char attrname[EXTATTR_MAXNAMELEN+1];
5558 int error;
5559
5560 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
5561 sizeof (attrname), compat);
5562 if (error != 0)
5563 return (error);
5564
5565 error = ENOENT;
5566 if (zfsvfs->z_use_sa && zp->z_is_sa)
5567 error = zfs_deleteextattr_sa(ap, attrname);
5568 if (error == ENOENT)
5569 error = zfs_deleteextattr_dir(ap, attrname);
5570 return (error);
5571 }
5572
5573 /*
5574 * Vnode operation to remove a named attribute.
5575 */
5576 static int
5577 zfs_deleteextattr(struct vop_deleteextattr_args *ap)
5578 {
5579 znode_t *zp = VTOZ(ap->a_vp);
5580 zfsvfs_t *zfsvfs = ZTOZSB(zp);
5581 int error;
5582
5583 /*
5584 * If the xattr property is off, refuse the request.
5585 */
5586 if (!(zfsvfs->z_flags & ZSB_XATTR))
5587 return (SET_ERROR(EOPNOTSUPP));
5588
5589 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
5590 ap->a_cred, ap->a_td, VWRITE);
5591 if (error != 0)
5592 return (SET_ERROR(error));
5593
5594 error = zfs_check_attrname(ap->a_name);
5595 if (error != 0)
5596 return (error);
5597
5598 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
5599 return (error);
5600 rw_enter(&zp->z_xattr_lock, RW_WRITER);
5601
5602 error = zfs_deleteextattr_impl(ap, zfs_xattr_compat);
5603 if ((error == ENOENT || error == ENOATTR) &&
5604 ap->a_attrnamespace == EXTATTR_NAMESPACE_USER) {
5605 /*
5606 * Fall back to the alternate namespace format if we failed to
5607 * find a user xattr.
5608 */
5609 error = zfs_deleteextattr_impl(ap, !zfs_xattr_compat);
5610 }
5611
5612 rw_exit(&zp->z_xattr_lock);
5613 zfs_exit(zfsvfs, FTAG);
5614 if (error == ENOENT)
5615 error = SET_ERROR(ENOATTR);
5616 return (error);
5617 }
5618
5619 #ifndef _SYS_SYSPROTO_H_
5620 struct vop_setextattr {
5621 IN struct vnode *a_vp;
5622 IN int a_attrnamespace;
5623 IN const char *a_name;
5624 INOUT struct uio *a_uio;
5625 IN struct ucred *a_cred;
5626 IN struct thread *a_td;
5627 };
5628 #endif
5629
5630 static int
5631 zfs_setextattr_dir(struct vop_setextattr_args *ap, const char *attrname)
5632 {
5633 struct thread *td = ap->a_td;
5634 struct nameidata nd;
5635 struct vattr va;
5636 vnode_t *xvp = NULL, *vp;
5637 int error, flags;
5638
5639 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
5640 LOOKUP_XATTR | CREATE_XATTR_DIR, B_FALSE);
5641 if (error != 0)
5642 return (error);
5643
5644 flags = FFLAGS(O_WRONLY | O_CREAT);
5645 #if __FreeBSD_version < 1400043
5646 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp, td);
5647 #else
5648 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp);
5649 #endif
5650 error = vn_open_cred(&nd, &flags, 0600, VN_OPEN_INVFS, ap->a_cred,
5651 NULL);
5652 if (error != 0)
5653 return (SET_ERROR(error));
5654 vp = nd.ni_vp;
5655 NDFREE_PNBUF(&nd);
5656
5657 VATTR_NULL(&va);
5658 va.va_size = 0;
5659 error = VOP_SETATTR(vp, &va, ap->a_cred);
5660 if (error == 0)
5661 VOP_WRITE(vp, ap->a_uio, IO_UNIT, ap->a_cred);
5662
5663 VOP_UNLOCK1(vp);
5664 vn_close(vp, flags, ap->a_cred, td);
5665 return (error);
5666 }
5667
5668 static int
5669 zfs_setextattr_sa(struct vop_setextattr_args *ap, const char *attrname)
5670 {
5671 znode_t *zp = VTOZ(ap->a_vp);
5672 nvlist_t *nvl;
5673 size_t sa_size;
5674 int error;
5675
5676 error = zfs_ensure_xattr_cached(zp);
5677 if (error != 0)
5678 return (error);
5679
5680 ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
5681 ASSERT3P(zp->z_xattr_cached, !=, NULL);
5682
5683 nvl = zp->z_xattr_cached;
5684 size_t entry_size = ap->a_uio->uio_resid;
5685 if (entry_size > DXATTR_MAX_ENTRY_SIZE)
5686 return (SET_ERROR(EFBIG));
5687 error = nvlist_size(nvl, &sa_size, NV_ENCODE_XDR);
5688 if (error != 0)
5689 return (SET_ERROR(error));
5690 if (sa_size > DXATTR_MAX_SA_SIZE)
5691 return (SET_ERROR(EFBIG));
5692 uchar_t *buf = kmem_alloc(entry_size, KM_SLEEP);
5693 error = uiomove(buf, entry_size, ap->a_uio);
5694 if (error != 0) {
5695 error = SET_ERROR(error);
5696 } else {
5697 error = nvlist_add_byte_array(nvl, attrname, buf, entry_size);
5698 if (error != 0)
5699 error = SET_ERROR(error);
5700 }
5701 if (error == 0)
5702 error = zfs_sa_set_xattr(zp, attrname, buf, entry_size);
5703 kmem_free(buf, entry_size);
5704 if (error != 0) {
5705 zp->z_xattr_cached = NULL;
5706 nvlist_free(nvl);
5707 }
5708 return (error);
5709 }
5710
5711 static int
5712 zfs_setextattr_impl(struct vop_setextattr_args *ap, boolean_t compat)
5713 {
5714 znode_t *zp = VTOZ(ap->a_vp);
5715 zfsvfs_t *zfsvfs = ZTOZSB(zp);
5716 char attrname[EXTATTR_MAXNAMELEN+1];
5717 int error;
5718
5719 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
5720 sizeof (attrname), compat);
5721 if (error != 0)
5722 return (error);
5723
5724 struct vop_deleteextattr_args vda = {
5725 .a_vp = ap->a_vp,
5726 .a_attrnamespace = ap->a_attrnamespace,
5727 .a_name = ap->a_name,
5728 .a_cred = ap->a_cred,
5729 .a_td = ap->a_td,
5730 };
5731 error = ENOENT;
5732 if (zfsvfs->z_use_sa && zp->z_is_sa && zfsvfs->z_xattr_sa) {
5733 error = zfs_setextattr_sa(ap, attrname);
5734 if (error == 0) {
5735 /*
5736 * Successfully put into SA, we need to clear the one
5737 * in dir if present.
5738 */
5739 zfs_deleteextattr_dir(&vda, attrname);
5740 }
5741 }
5742 if (error != 0) {
5743 error = zfs_setextattr_dir(ap, attrname);
5744 if (error == 0 && zp->z_is_sa) {
5745 /*
5746 * Successfully put into dir, we need to clear the one
5747 * in SA if present.
5748 */
5749 zfs_deleteextattr_sa(&vda, attrname);
5750 }
5751 }
5752 if (error == 0 && ap->a_attrnamespace == EXTATTR_NAMESPACE_USER) {
5753 /*
5754 * Also clear all versions of the alternate compat name.
5755 */
5756 zfs_deleteextattr_impl(&vda, !compat);
5757 }
5758 return (error);
5759 }
5760
5761 /*
5762 * Vnode operation to set a named attribute.
5763 */
5764 static int
5765 zfs_setextattr(struct vop_setextattr_args *ap)
5766 {
5767 znode_t *zp = VTOZ(ap->a_vp);
5768 zfsvfs_t *zfsvfs = ZTOZSB(zp);
5769 int error;
5770
5771 /*
5772 * If the xattr property is off, refuse the request.
5773 */
5774 if (!(zfsvfs->z_flags & ZSB_XATTR))
5775 return (SET_ERROR(EOPNOTSUPP));
5776
5777 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
5778 ap->a_cred, ap->a_td, VWRITE);
5779 if (error != 0)
5780 return (SET_ERROR(error));
5781
5782 error = zfs_check_attrname(ap->a_name);
5783 if (error != 0)
5784 return (error);
5785
5786 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
5787 return (error);
5788 rw_enter(&zp->z_xattr_lock, RW_WRITER);
5789
5790 error = zfs_setextattr_impl(ap, zfs_xattr_compat);
5791
5792 rw_exit(&zp->z_xattr_lock);
5793 zfs_exit(zfsvfs, FTAG);
5794 return (error);
5795 }
5796
5797 #ifndef _SYS_SYSPROTO_H_
5798 struct vop_listextattr {
5799 IN struct vnode *a_vp;
5800 IN int a_attrnamespace;
5801 INOUT struct uio *a_uio;
5802 OUT size_t *a_size;
5803 IN struct ucred *a_cred;
5804 IN struct thread *a_td;
5805 };
5806 #endif
5807
5808 static int
5809 zfs_listextattr_dir(struct vop_listextattr_args *ap, const char *attrprefix)
5810 {
5811 struct thread *td = ap->a_td;
5812 struct nameidata nd;
5813 uint8_t dirbuf[sizeof (struct dirent)];
5814 struct iovec aiov;
5815 struct uio auio;
5816 vnode_t *xvp = NULL, *vp;
5817 int error, eof;
5818
5819 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
5820 LOOKUP_XATTR, B_FALSE);
5821 if (error != 0) {
5822 /*
5823 * ENOATTR means that the EA directory does not yet exist,
5824 * i.e. there are no extended attributes there.
5825 */
5826 if (error == ENOATTR)
5827 error = 0;
5828 return (error);
5829 }
5830
5831 #if __FreeBSD_version < 1400043
5832 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED,
5833 UIO_SYSSPACE, ".", xvp, td);
5834 #else
5835 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED,
5836 UIO_SYSSPACE, ".", xvp);
5837 #endif
5838 error = namei(&nd);
5839 if (error != 0)
5840 return (SET_ERROR(error));
5841 vp = nd.ni_vp;
5842 NDFREE_PNBUF(&nd);
5843
5844 auio.uio_iov = &aiov;
5845 auio.uio_iovcnt = 1;
5846 auio.uio_segflg = UIO_SYSSPACE;
5847 auio.uio_td = td;
5848 auio.uio_rw = UIO_READ;
5849 auio.uio_offset = 0;
5850
5851 size_t plen = strlen(attrprefix);
5852
5853 do {
5854 aiov.iov_base = (void *)dirbuf;
5855 aiov.iov_len = sizeof (dirbuf);
5856 auio.uio_resid = sizeof (dirbuf);
5857 error = VOP_READDIR(vp, &auio, ap->a_cred, &eof, NULL, NULL);
5858 if (error != 0)
5859 break;
5860 int done = sizeof (dirbuf) - auio.uio_resid;
5861 for (int pos = 0; pos < done; ) {
5862 struct dirent *dp = (struct dirent *)(dirbuf + pos);
5863 pos += dp->d_reclen;
5864 /*
5865 * XXX: Temporarily we also accept DT_UNKNOWN, as this
5866 * is what we get when attribute was created on Solaris.
5867 */
5868 if (dp->d_type != DT_REG && dp->d_type != DT_UNKNOWN)
5869 continue;
5870 else if (plen == 0 &&
5871 ZFS_XA_NS_PREFIX_FORBIDDEN(dp->d_name))
5872 continue;
5873 else if (strncmp(dp->d_name, attrprefix, plen) != 0)
5874 continue;
5875 uint8_t nlen = dp->d_namlen - plen;
5876 if (ap->a_size != NULL) {
5877 *ap->a_size += 1 + nlen;
5878 } else if (ap->a_uio != NULL) {
5879 /*
5880 * Format of extattr name entry is one byte for
5881 * length and the rest for name.
5882 */
5883 error = uiomove(&nlen, 1, ap->a_uio);
5884 if (error == 0) {
5885 char *namep = dp->d_name + plen;
5886 error = uiomove(namep, nlen, ap->a_uio);
5887 }
5888 if (error != 0) {
5889 error = SET_ERROR(error);
5890 break;
5891 }
5892 }
5893 }
5894 } while (!eof && error == 0);
5895
5896 vput(vp);
5897 return (error);
5898 }
5899
5900 static int
5901 zfs_listextattr_sa(struct vop_listextattr_args *ap, const char *attrprefix)
5902 {
5903 znode_t *zp = VTOZ(ap->a_vp);
5904 int error;
5905
5906 error = zfs_ensure_xattr_cached(zp);
5907 if (error != 0)
5908 return (error);
5909
5910 ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
5911 ASSERT3P(zp->z_xattr_cached, !=, NULL);
5912
5913 size_t plen = strlen(attrprefix);
5914 nvpair_t *nvp = NULL;
5915 while ((nvp = nvlist_next_nvpair(zp->z_xattr_cached, nvp)) != NULL) {
5916 ASSERT3U(nvpair_type(nvp), ==, DATA_TYPE_BYTE_ARRAY);
5917
5918 const char *name = nvpair_name(nvp);
5919 if (plen == 0 && ZFS_XA_NS_PREFIX_FORBIDDEN(name))
5920 continue;
5921 else if (strncmp(name, attrprefix, plen) != 0)
5922 continue;
5923 uint8_t nlen = strlen(name) - plen;
5924 if (ap->a_size != NULL) {
5925 *ap->a_size += 1 + nlen;
5926 } else if (ap->a_uio != NULL) {
5927 /*
5928 * Format of extattr name entry is one byte for
5929 * length and the rest for name.
5930 */
5931 error = uiomove(&nlen, 1, ap->a_uio);
5932 if (error == 0) {
5933 char *namep = __DECONST(char *, name) + plen;
5934 error = uiomove(namep, nlen, ap->a_uio);
5935 }
5936 if (error != 0) {
5937 error = SET_ERROR(error);
5938 break;
5939 }
5940 }
5941 }
5942
5943 return (error);
5944 }
5945
5946 static int
5947 zfs_listextattr_impl(struct vop_listextattr_args *ap, boolean_t compat)
5948 {
5949 znode_t *zp = VTOZ(ap->a_vp);
5950 zfsvfs_t *zfsvfs = ZTOZSB(zp);
5951 char attrprefix[16];
5952 int error;
5953
5954 error = zfs_create_attrname(ap->a_attrnamespace, "", attrprefix,
5955 sizeof (attrprefix), compat);
5956 if (error != 0)
5957 return (error);
5958
5959 if (zfsvfs->z_use_sa && zp->z_is_sa)
5960 error = zfs_listextattr_sa(ap, attrprefix);
5961 if (error == 0)
5962 error = zfs_listextattr_dir(ap, attrprefix);
5963 return (error);
5964 }
5965
5966 /*
5967 * Vnode operation to retrieve extended attributes on a vnode.
5968 */
5969 static int
5970 zfs_listextattr(struct vop_listextattr_args *ap)
5971 {
5972 znode_t *zp = VTOZ(ap->a_vp);
5973 zfsvfs_t *zfsvfs = ZTOZSB(zp);
5974 int error;
5975
5976 if (ap->a_size != NULL)
5977 *ap->a_size = 0;
5978
5979 /*
5980 * If the xattr property is off, refuse the request.
5981 */
5982 if (!(zfsvfs->z_flags & ZSB_XATTR))
5983 return (SET_ERROR(EOPNOTSUPP));
5984
5985 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
5986 ap->a_cred, ap->a_td, VREAD);
5987 if (error != 0)
5988 return (SET_ERROR(error));
5989
5990 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
5991 return (error);
5992 rw_enter(&zp->z_xattr_lock, RW_READER);
5993
5994 error = zfs_listextattr_impl(ap, zfs_xattr_compat);
5995 if (error == 0 && ap->a_attrnamespace == EXTATTR_NAMESPACE_USER) {
5996 /* Also list user xattrs with the alternate format. */
5997 error = zfs_listextattr_impl(ap, !zfs_xattr_compat);
5998 }
5999
6000 rw_exit(&zp->z_xattr_lock);
6001 zfs_exit(zfsvfs, FTAG);
6002 return (error);
6003 }
6004
6005 #ifndef _SYS_SYSPROTO_H_
6006 struct vop_getacl_args {
6007 struct vnode *vp;
6008 acl_type_t type;
6009 struct acl *aclp;
6010 struct ucred *cred;
6011 struct thread *td;
6012 };
6013 #endif
6014
6015 static int
6016 zfs_freebsd_getacl(struct vop_getacl_args *ap)
6017 {
6018 int error;
6019 vsecattr_t vsecattr;
6020
6021 if (ap->a_type != ACL_TYPE_NFS4)
6022 return (EINVAL);
6023
6024 vsecattr.vsa_mask = VSA_ACE | VSA_ACECNT;
6025 if ((error = zfs_getsecattr(VTOZ(ap->a_vp),
6026 &vsecattr, 0, ap->a_cred)))
6027 return (error);
6028
6029 error = acl_from_aces(ap->a_aclp, vsecattr.vsa_aclentp,
6030 vsecattr.vsa_aclcnt);
6031 if (vsecattr.vsa_aclentp != NULL)
6032 kmem_free(vsecattr.vsa_aclentp, vsecattr.vsa_aclentsz);
6033
6034 return (error);
6035 }
6036
6037 #ifndef _SYS_SYSPROTO_H_
6038 struct vop_setacl_args {
6039 struct vnode *vp;
6040 acl_type_t type;
6041 struct acl *aclp;
6042 struct ucred *cred;
6043 struct thread *td;
6044 };
6045 #endif
6046
6047 static int
6048 zfs_freebsd_setacl(struct vop_setacl_args *ap)
6049 {
6050 int error;
6051 vsecattr_t vsecattr;
6052 int aclbsize; /* size of acl list in bytes */
6053 aclent_t *aaclp;
6054
6055 if (ap->a_type != ACL_TYPE_NFS4)
6056 return (EINVAL);
6057
6058 if (ap->a_aclp == NULL)
6059 return (EINVAL);
6060
6061 if (ap->a_aclp->acl_cnt < 1 || ap->a_aclp->acl_cnt > MAX_ACL_ENTRIES)
6062 return (EINVAL);
6063
6064 /*
6065 * With NFSv4 ACLs, chmod(2) may need to add additional entries,
6066 * splitting every entry into two and appending "canonical six"
6067 * entries at the end. Don't allow for setting an ACL that would
6068 * cause chmod(2) to run out of ACL entries.
6069 */
6070 if (ap->a_aclp->acl_cnt * 2 + 6 > ACL_MAX_ENTRIES)
6071 return (ENOSPC);
6072
6073 error = acl_nfs4_check(ap->a_aclp, ap->a_vp->v_type == VDIR);
6074 if (error != 0)
6075 return (error);
6076
6077 vsecattr.vsa_mask = VSA_ACE;
6078 aclbsize = ap->a_aclp->acl_cnt * sizeof (ace_t);
6079 vsecattr.vsa_aclentp = kmem_alloc(aclbsize, KM_SLEEP);
6080 aaclp = vsecattr.vsa_aclentp;
6081 vsecattr.vsa_aclentsz = aclbsize;
6082
6083 aces_from_acl(vsecattr.vsa_aclentp, &vsecattr.vsa_aclcnt, ap->a_aclp);
6084 error = zfs_setsecattr(VTOZ(ap->a_vp), &vsecattr, 0, ap->a_cred);
6085 kmem_free(aaclp, aclbsize);
6086
6087 return (error);
6088 }
6089
6090 #ifndef _SYS_SYSPROTO_H_
6091 struct vop_aclcheck_args {
6092 struct vnode *vp;
6093 acl_type_t type;
6094 struct acl *aclp;
6095 struct ucred *cred;
6096 struct thread *td;
6097 };
6098 #endif
6099
6100 static int
6101 zfs_freebsd_aclcheck(struct vop_aclcheck_args *ap)
6102 {
6103
6104 return (EOPNOTSUPP);
6105 }
6106
6107 static int
6108 zfs_vptocnp(struct vop_vptocnp_args *ap)
6109 {
6110 vnode_t *covered_vp;
6111 vnode_t *vp = ap->a_vp;
6112 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
6113 znode_t *zp = VTOZ(vp);
6114 int ltype;
6115 int error;
6116
6117 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
6118 return (error);
6119
6120 /*
6121 * If we are a snapshot mounted under .zfs, run the operation
6122 * on the covered vnode.
6123 */
6124 if (zp->z_id != zfsvfs->z_root || zfsvfs->z_parent == zfsvfs) {
6125 char name[MAXNAMLEN + 1];
6126 znode_t *dzp;
6127 size_t len;
6128
6129 error = zfs_znode_parent_and_name(zp, &dzp, name);
6130 if (error == 0) {
6131 len = strlen(name);
6132 if (*ap->a_buflen < len)
6133 error = SET_ERROR(ENOMEM);
6134 }
6135 if (error == 0) {
6136 *ap->a_buflen -= len;
6137 memcpy(ap->a_buf + *ap->a_buflen, name, len);
6138 *ap->a_vpp = ZTOV(dzp);
6139 }
6140 zfs_exit(zfsvfs, FTAG);
6141 return (error);
6142 }
6143 zfs_exit(zfsvfs, FTAG);
6144
6145 covered_vp = vp->v_mount->mnt_vnodecovered;
6146 #if __FreeBSD_version >= 1300045
6147 enum vgetstate vs = vget_prep(covered_vp);
6148 #else
6149 vhold(covered_vp);
6150 #endif
6151 ltype = VOP_ISLOCKED(vp);
6152 VOP_UNLOCK1(vp);
6153 #if __FreeBSD_version >= 1300045
6154 error = vget_finish(covered_vp, LK_SHARED, vs);
6155 #else
6156 error = vget(covered_vp, LK_SHARED | LK_VNHELD, curthread);
6157 #endif
6158 if (error == 0) {
6159 #if __FreeBSD_version >= 1300123
6160 error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_buf,
6161 ap->a_buflen);
6162 #else
6163 error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_cred,
6164 ap->a_buf, ap->a_buflen);
6165 #endif
6166 vput(covered_vp);
6167 }
6168 vn_lock(vp, ltype | LK_RETRY);
6169 if (VN_IS_DOOMED(vp))
6170 error = SET_ERROR(ENOENT);
6171 return (error);
6172 }
6173
6174 #if __FreeBSD_version >= 1400032
6175 static int
6176 zfs_deallocate(struct vop_deallocate_args *ap)
6177 {
6178 znode_t *zp = VTOZ(ap->a_vp);
6179 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
6180 zilog_t *zilog;
6181 off_t off, len, file_sz;
6182 int error;
6183
6184 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
6185 return (error);
6186
6187 /*
6188 * Callers might not be able to detect properly that we are read-only,
6189 * so check it explicitly here.
6190 */
6191 if (zfs_is_readonly(zfsvfs)) {
6192 zfs_exit(zfsvfs, FTAG);
6193 return (SET_ERROR(EROFS));
6194 }
6195
6196 zilog = zfsvfs->z_log;
6197 off = *ap->a_offset;
6198 len = *ap->a_len;
6199 file_sz = zp->z_size;
6200 if (off + len > file_sz)
6201 len = file_sz - off;
6202 /* Fast path for out-of-range request. */
6203 if (len <= 0) {
6204 *ap->a_len = 0;
6205 zfs_exit(zfsvfs, FTAG);
6206 return (0);
6207 }
6208
6209 error = zfs_freesp(zp, off, len, O_RDWR, TRUE);
6210 if (error == 0) {
6211 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS ||
6212 (ap->a_ioflag & IO_SYNC) != 0)
6213 zil_commit(zilog, zp->z_id);
6214 *ap->a_offset = off + len;
6215 *ap->a_len = 0;
6216 }
6217
6218 zfs_exit(zfsvfs, FTAG);
6219 return (error);
6220 }
6221 #endif
6222
6223 #if __FreeBSD_version >= 1300039
6224 #ifndef _SYS_SYSPROTO_H_
6225 struct vop_copy_file_range_args {
6226 struct vnode *a_invp;
6227 off_t *a_inoffp;
6228 struct vnode *a_outvp;
6229 off_t *a_outoffp;
6230 size_t *a_lenp;
6231 unsigned int a_flags;
6232 struct ucred *a_incred;
6233 struct ucred *a_outcred;
6234 struct thread *a_fsizetd;
6235 }
6236 #endif
6237 /*
6238 * TODO: FreeBSD will only call file system-specific copy_file_range() if both
6239 * files resides under the same mountpoint. In case of ZFS we want to be called
6240 * even is files are in different datasets (but on the same pools, but we need
6241 * to check that ourselves).
6242 */
6243 static int
6244 zfs_freebsd_copy_file_range(struct vop_copy_file_range_args *ap)
6245 {
6246 zfsvfs_t *outzfsvfs;
6247 struct vnode *invp = ap->a_invp;
6248 struct vnode *outvp = ap->a_outvp;
6249 struct mount *mp;
6250 struct uio io;
6251 int error;
6252 uint64_t len = *ap->a_lenp;
6253
6254 if (!zfs_bclone_enabled) {
6255 mp = NULL;
6256 goto bad_write_fallback;
6257 }
6258
6259 /*
6260 * TODO: If offset/length is not aligned to recordsize, use
6261 * vn_generic_copy_file_range() on this fragment.
6262 * It would be better to do this after we lock the vnodes, but then we
6263 * need something else than vn_generic_copy_file_range().
6264 */
6265
6266 vn_start_write(outvp, &mp, V_WAIT);
6267 if (__predict_true(mp == outvp->v_mount)) {
6268 outzfsvfs = (zfsvfs_t *)mp->mnt_data;
6269 if (!spa_feature_is_enabled(dmu_objset_spa(outzfsvfs->z_os),
6270 SPA_FEATURE_BLOCK_CLONING)) {
6271 goto bad_write_fallback;
6272 }
6273 }
6274 if (invp == outvp) {
6275 if (vn_lock(outvp, LK_EXCLUSIVE) != 0) {
6276 goto bad_write_fallback;
6277 }
6278 } else {
6279 #if (__FreeBSD_version >= 1302506 && __FreeBSD_version < 1400000) || \
6280 __FreeBSD_version >= 1400086
6281 vn_lock_pair(invp, false, LK_EXCLUSIVE, outvp, false,
6282 LK_EXCLUSIVE);
6283 #else
6284 vn_lock_pair(invp, false, outvp, false);
6285 #endif
6286 if (VN_IS_DOOMED(invp) || VN_IS_DOOMED(outvp)) {
6287 goto bad_locked_fallback;
6288 }
6289 }
6290
6291 #ifdef MAC
6292 error = mac_vnode_check_write(curthread->td_ucred, ap->a_outcred,
6293 outvp);
6294 if (error != 0)
6295 goto out_locked;
6296 #endif
6297
6298 io.uio_offset = *ap->a_outoffp;
6299 io.uio_resid = *ap->a_lenp;
6300 error = vn_rlimit_fsize(outvp, &io, ap->a_fsizetd);
6301 if (error != 0)
6302 goto out_locked;
6303
6304 error = zfs_clone_range(VTOZ(invp), ap->a_inoffp, VTOZ(outvp),
6305 ap->a_outoffp, &len, ap->a_outcred);
6306 if (error == EXDEV || error == EAGAIN || error == EINVAL ||
6307 error == EOPNOTSUPP)
6308 goto bad_locked_fallback;
6309 *ap->a_lenp = (size_t)len;
6310 out_locked:
6311 if (invp != outvp)
6312 VOP_UNLOCK(invp);
6313 VOP_UNLOCK(outvp);
6314 if (mp != NULL)
6315 vn_finished_write(mp);
6316 return (error);
6317
6318 bad_locked_fallback:
6319 if (invp != outvp)
6320 VOP_UNLOCK(invp);
6321 VOP_UNLOCK(outvp);
6322 bad_write_fallback:
6323 if (mp != NULL)
6324 vn_finished_write(mp);
6325 error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp,
6326 ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags,
6327 ap->a_incred, ap->a_outcred, ap->a_fsizetd);
6328 return (error);
6329 }
6330 #endif
6331
6332 struct vop_vector zfs_vnodeops;
6333 struct vop_vector zfs_fifoops;
6334 struct vop_vector zfs_shareops;
6335
6336 struct vop_vector zfs_vnodeops = {
6337 .vop_default = &default_vnodeops,
6338 .vop_inactive = zfs_freebsd_inactive,
6339 #if __FreeBSD_version >= 1300042
6340 .vop_need_inactive = zfs_freebsd_need_inactive,
6341 #endif
6342 .vop_reclaim = zfs_freebsd_reclaim,
6343 #if __FreeBSD_version >= 1300102
6344 .vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
6345 #endif
6346 #if __FreeBSD_version >= 1300139
6347 .vop_fplookup_symlink = zfs_freebsd_fplookup_symlink,
6348 #endif
6349 .vop_access = zfs_freebsd_access,
6350 .vop_allocate = VOP_EINVAL,
6351 #if __FreeBSD_version >= 1400032
6352 .vop_deallocate = zfs_deallocate,
6353 #endif
6354 .vop_lookup = zfs_cache_lookup,
6355 .vop_cachedlookup = zfs_freebsd_cachedlookup,
6356 .vop_getattr = zfs_freebsd_getattr,
6357 .vop_setattr = zfs_freebsd_setattr,
6358 .vop_create = zfs_freebsd_create,
6359 .vop_mknod = (vop_mknod_t *)zfs_freebsd_create,
6360 .vop_mkdir = zfs_freebsd_mkdir,
6361 .vop_readdir = zfs_freebsd_readdir,
6362 .vop_fsync = zfs_freebsd_fsync,
6363 .vop_open = zfs_freebsd_open,
6364 .vop_close = zfs_freebsd_close,
6365 .vop_rmdir = zfs_freebsd_rmdir,
6366 .vop_ioctl = zfs_freebsd_ioctl,
6367 .vop_link = zfs_freebsd_link,
6368 .vop_symlink = zfs_freebsd_symlink,
6369 .vop_readlink = zfs_freebsd_readlink,
6370 .vop_read = zfs_freebsd_read,
6371 .vop_write = zfs_freebsd_write,
6372 .vop_remove = zfs_freebsd_remove,
6373 .vop_rename = zfs_freebsd_rename,
6374 .vop_pathconf = zfs_freebsd_pathconf,
6375 .vop_bmap = zfs_freebsd_bmap,
6376 .vop_fid = zfs_freebsd_fid,
6377 .vop_getextattr = zfs_getextattr,
6378 .vop_deleteextattr = zfs_deleteextattr,
6379 .vop_setextattr = zfs_setextattr,
6380 .vop_listextattr = zfs_listextattr,
6381 .vop_getacl = zfs_freebsd_getacl,
6382 .vop_setacl = zfs_freebsd_setacl,
6383 .vop_aclcheck = zfs_freebsd_aclcheck,
6384 .vop_getpages = zfs_freebsd_getpages,
6385 .vop_putpages = zfs_freebsd_putpages,
6386 .vop_vptocnp = zfs_vptocnp,
6387 #if __FreeBSD_version >= 1300064
6388 .vop_lock1 = vop_lock,
6389 .vop_unlock = vop_unlock,
6390 .vop_islocked = vop_islocked,
6391 #endif
6392 #if __FreeBSD_version >= 1400043
6393 .vop_add_writecount = vop_stdadd_writecount_nomsync,
6394 #endif
6395 #if __FreeBSD_version >= 1300039
6396 .vop_copy_file_range = zfs_freebsd_copy_file_range,
6397 #endif
6398 };
6399 VFS_VOP_VECTOR_REGISTER(zfs_vnodeops);
6400
6401 struct vop_vector zfs_fifoops = {
6402 .vop_default = &fifo_specops,
6403 .vop_fsync = zfs_freebsd_fsync,
6404 #if __FreeBSD_version >= 1300102
6405 .vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
6406 #endif
6407 #if __FreeBSD_version >= 1300139
6408 .vop_fplookup_symlink = zfs_freebsd_fplookup_symlink,
6409 #endif
6410 .vop_access = zfs_freebsd_access,
6411 .vop_getattr = zfs_freebsd_getattr,
6412 .vop_inactive = zfs_freebsd_inactive,
6413 .vop_read = VOP_PANIC,
6414 .vop_reclaim = zfs_freebsd_reclaim,
6415 .vop_setattr = zfs_freebsd_setattr,
6416 .vop_write = VOP_PANIC,
6417 .vop_pathconf = zfs_freebsd_pathconf,
6418 .vop_fid = zfs_freebsd_fid,
6419 .vop_getacl = zfs_freebsd_getacl,
6420 .vop_setacl = zfs_freebsd_setacl,
6421 .vop_aclcheck = zfs_freebsd_aclcheck,
6422 #if __FreeBSD_version >= 1400043
6423 .vop_add_writecount = vop_stdadd_writecount_nomsync,
6424 #endif
6425 };
6426 VFS_VOP_VECTOR_REGISTER(zfs_fifoops);
6427
6428 /*
6429 * special share hidden files vnode operations template
6430 */
6431 struct vop_vector zfs_shareops = {
6432 .vop_default = &default_vnodeops,
6433 #if __FreeBSD_version >= 1300121
6434 .vop_fplookup_vexec = VOP_EAGAIN,
6435 #endif
6436 #if __FreeBSD_version >= 1300139
6437 .vop_fplookup_symlink = VOP_EAGAIN,
6438 #endif
6439 .vop_access = zfs_freebsd_access,
6440 .vop_inactive = zfs_freebsd_inactive,
6441 .vop_reclaim = zfs_freebsd_reclaim,
6442 .vop_fid = zfs_freebsd_fid,
6443 .vop_pathconf = zfs_freebsd_pathconf,
6444 #if __FreeBSD_version >= 1400043
6445 .vop_add_writecount = vop_stdadd_writecount_nomsync,
6446 #endif
6447 };
6448 VFS_VOP_VECTOR_REGISTER(zfs_shareops);
6449
6450 ZFS_MODULE_PARAM(zfs, zfs_, xattr_compat, INT, ZMOD_RW,
6451 "Use legacy ZFS xattr naming for writing new user namespace xattrs");