]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zfs_vnops.c
OpenZFS 8997 - ztest assertion failure in zil_lwb_write_issue
[mirror_zfs.git] / module / zfs / zfs_vnops.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc.
27 */
28
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
31
32
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/time.h>
36 #include <sys/systm.h>
37 #include <sys/sysmacros.h>
38 #include <sys/resource.h>
39 #include <sys/vfs.h>
40 #include <sys/vfs_opreg.h>
41 #include <sys/file.h>
42 #include <sys/stat.h>
43 #include <sys/kmem.h>
44 #include <sys/taskq.h>
45 #include <sys/uio.h>
46 #include <sys/vmsystm.h>
47 #include <sys/atomic.h>
48 #include <vm/pvn.h>
49 #include <sys/pathname.h>
50 #include <sys/cmn_err.h>
51 #include <sys/errno.h>
52 #include <sys/unistd.h>
53 #include <sys/zfs_dir.h>
54 #include <sys/zfs_acl.h>
55 #include <sys/zfs_ioctl.h>
56 #include <sys/fs/zfs.h>
57 #include <sys/dmu.h>
58 #include <sys/dmu_objset.h>
59 #include <sys/spa.h>
60 #include <sys/txg.h>
61 #include <sys/dbuf.h>
62 #include <sys/zap.h>
63 #include <sys/sa.h>
64 #include <sys/dirent.h>
65 #include <sys/policy.h>
66 #include <sys/sunddi.h>
67 #include <sys/sid.h>
68 #include <sys/mode.h>
69 #include "fs/fs_subr.h"
70 #include <sys/zfs_ctldir.h>
71 #include <sys/zfs_fuid.h>
72 #include <sys/zfs_sa.h>
73 #include <sys/zfs_vnops.h>
74 #include <sys/dnlc.h>
75 #include <sys/zfs_rlock.h>
76 #include <sys/extdirent.h>
77 #include <sys/kidmap.h>
78 #include <sys/cred.h>
79 #include <sys/attr.h>
80 #include <sys/zpl.h>
81
82 /*
83 * Programming rules.
84 *
85 * Each vnode op performs some logical unit of work. To do this, the ZPL must
86 * properly lock its in-core state, create a DMU transaction, do the work,
87 * record this work in the intent log (ZIL), commit the DMU transaction,
88 * and wait for the intent log to commit if it is a synchronous operation.
89 * Moreover, the vnode ops must work in both normal and log replay context.
90 * The ordering of events is important to avoid deadlocks and references
91 * to freed memory. The example below illustrates the following Big Rules:
92 *
93 * (1) A check must be made in each zfs thread for a mounted file system.
94 * This is done avoiding races using ZFS_ENTER(zfsvfs).
95 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
96 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
97 * can return EIO from the calling function.
98 *
99 * (2) iput() should always be the last thing except for zil_commit()
100 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
101 * First, if it's the last reference, the vnode/znode
102 * can be freed, so the zp may point to freed memory. Second, the last
103 * reference will call zfs_zinactive(), which may induce a lot of work --
104 * pushing cached pages (which acquires range locks) and syncing out
105 * cached atime changes. Third, zfs_zinactive() may require a new tx,
106 * which could deadlock the system if you were already holding one.
107 * If you must call iput() within a tx then use zfs_iput_async().
108 *
109 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
110 * as they can span dmu_tx_assign() calls.
111 *
112 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
113 * dmu_tx_assign(). This is critical because we don't want to block
114 * while holding locks.
115 *
116 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
117 * reduces lock contention and CPU usage when we must wait (note that if
118 * throughput is constrained by the storage, nearly every transaction
119 * must wait).
120 *
121 * Note, in particular, that if a lock is sometimes acquired before
122 * the tx assigns, and sometimes after (e.g. z_lock), then failing
123 * to use a non-blocking assign can deadlock the system. The scenario:
124 *
125 * Thread A has grabbed a lock before calling dmu_tx_assign().
126 * Thread B is in an already-assigned tx, and blocks for this lock.
127 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
128 * forever, because the previous txg can't quiesce until B's tx commits.
129 *
130 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
131 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
132 * calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
133 * to indicate that this operation has already called dmu_tx_wait().
134 * This will ensure that we don't retry forever, waiting a short bit
135 * each time.
136 *
137 * (5) If the operation succeeded, generate the intent log entry for it
138 * before dropping locks. This ensures that the ordering of events
139 * in the intent log matches the order in which they actually occurred.
140 * During ZIL replay the zfs_log_* functions will update the sequence
141 * number to indicate the zil transaction has replayed.
142 *
143 * (6) At the end of each vnode op, the DMU tx must always commit,
144 * regardless of whether there were any errors.
145 *
146 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
147 * to ensure that synchronous semantics are provided when necessary.
148 *
149 * In general, this is how things should be ordered in each vnode op:
150 *
151 * ZFS_ENTER(zfsvfs); // exit if unmounted
152 * top:
153 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
154 * rw_enter(...); // grab any other locks you need
155 * tx = dmu_tx_create(...); // get DMU tx
156 * dmu_tx_hold_*(); // hold each object you might modify
157 * error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
158 * if (error) {
159 * rw_exit(...); // drop locks
160 * zfs_dirent_unlock(dl); // unlock directory entry
161 * iput(...); // release held vnodes
162 * if (error == ERESTART) {
163 * waited = B_TRUE;
164 * dmu_tx_wait(tx);
165 * dmu_tx_abort(tx);
166 * goto top;
167 * }
168 * dmu_tx_abort(tx); // abort DMU tx
169 * ZFS_EXIT(zfsvfs); // finished in zfs
170 * return (error); // really out of space
171 * }
172 * error = do_real_work(); // do whatever this VOP does
173 * if (error == 0)
174 * zfs_log_*(...); // on success, make ZIL entry
175 * dmu_tx_commit(tx); // commit DMU tx -- error or not
176 * rw_exit(...); // drop locks
177 * zfs_dirent_unlock(dl); // unlock directory entry
178 * iput(...); // release held vnodes
179 * zil_commit(zilog, foid); // synchronous when necessary
180 * ZFS_EXIT(zfsvfs); // finished in zfs
181 * return (error); // done, report error
182 */
183
184 /*
185 * Virus scanning is unsupported. It would be possible to add a hook
186 * here to performance the required virus scan. This could be done
187 * entirely in the kernel or potentially as an update to invoke a
188 * scanning utility.
189 */
190 static int
191 zfs_vscan(struct inode *ip, cred_t *cr, int async)
192 {
193 return (0);
194 }
195
196 /* ARGSUSED */
197 int
198 zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
199 {
200 znode_t *zp = ITOZ(ip);
201 zfsvfs_t *zfsvfs = ITOZSB(ip);
202
203 ZFS_ENTER(zfsvfs);
204 ZFS_VERIFY_ZP(zp);
205
206 /* Honor ZFS_APPENDONLY file attribute */
207 if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
208 ((flag & O_APPEND) == 0)) {
209 ZFS_EXIT(zfsvfs);
210 return (SET_ERROR(EPERM));
211 }
212
213 /* Virus scan eligible files on open */
214 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
215 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
216 if (zfs_vscan(ip, cr, 0) != 0) {
217 ZFS_EXIT(zfsvfs);
218 return (SET_ERROR(EACCES));
219 }
220 }
221
222 /* Keep a count of the synchronous opens in the znode */
223 if (flag & O_SYNC)
224 atomic_inc_32(&zp->z_sync_cnt);
225
226 ZFS_EXIT(zfsvfs);
227 return (0);
228 }
229
230 /* ARGSUSED */
231 int
232 zfs_close(struct inode *ip, int flag, cred_t *cr)
233 {
234 znode_t *zp = ITOZ(ip);
235 zfsvfs_t *zfsvfs = ITOZSB(ip);
236
237 ZFS_ENTER(zfsvfs);
238 ZFS_VERIFY_ZP(zp);
239
240 /* Decrement the synchronous opens in the znode */
241 if (flag & O_SYNC)
242 atomic_dec_32(&zp->z_sync_cnt);
243
244 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
245 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
246 VERIFY(zfs_vscan(ip, cr, 1) == 0);
247
248 ZFS_EXIT(zfsvfs);
249 return (0);
250 }
251
252 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
253 /*
254 * Lseek support for finding holes (cmd == SEEK_HOLE) and
255 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
256 */
257 static int
258 zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
259 {
260 znode_t *zp = ITOZ(ip);
261 uint64_t noff = (uint64_t)*off; /* new offset */
262 uint64_t file_sz;
263 int error;
264 boolean_t hole;
265
266 file_sz = zp->z_size;
267 if (noff >= file_sz) {
268 return (SET_ERROR(ENXIO));
269 }
270
271 if (cmd == SEEK_HOLE)
272 hole = B_TRUE;
273 else
274 hole = B_FALSE;
275
276 error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
277
278 if (error == ESRCH)
279 return (SET_ERROR(ENXIO));
280
281 /* file was dirty, so fall back to using generic logic */
282 if (error == EBUSY) {
283 if (hole)
284 *off = file_sz;
285
286 return (0);
287 }
288
289 /*
290 * We could find a hole that begins after the logical end-of-file,
291 * because dmu_offset_next() only works on whole blocks. If the
292 * EOF falls mid-block, then indicate that the "virtual hole"
293 * at the end of the file begins at the logical EOF, rather than
294 * at the end of the last block.
295 */
296 if (noff > file_sz) {
297 ASSERT(hole);
298 noff = file_sz;
299 }
300
301 if (noff < *off)
302 return (error);
303 *off = noff;
304 return (error);
305 }
306
307 int
308 zfs_holey(struct inode *ip, int cmd, loff_t *off)
309 {
310 znode_t *zp = ITOZ(ip);
311 zfsvfs_t *zfsvfs = ITOZSB(ip);
312 int error;
313
314 ZFS_ENTER(zfsvfs);
315 ZFS_VERIFY_ZP(zp);
316
317 error = zfs_holey_common(ip, cmd, off);
318
319 ZFS_EXIT(zfsvfs);
320 return (error);
321 }
322 #endif /* SEEK_HOLE && SEEK_DATA */
323
324 #if defined(_KERNEL)
325 /*
326 * When a file is memory mapped, we must keep the IO data synchronized
327 * between the DMU cache and the memory mapped pages. What this means:
328 *
329 * On Write: If we find a memory mapped page, we write to *both*
330 * the page and the dmu buffer.
331 */
332 static void
333 update_pages(struct inode *ip, int64_t start, int len,
334 objset_t *os, uint64_t oid)
335 {
336 struct address_space *mp = ip->i_mapping;
337 struct page *pp;
338 uint64_t nbytes;
339 int64_t off;
340 void *pb;
341
342 off = start & (PAGE_SIZE-1);
343 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
344 nbytes = MIN(PAGE_SIZE - off, len);
345
346 pp = find_lock_page(mp, start >> PAGE_SHIFT);
347 if (pp) {
348 if (mapping_writably_mapped(mp))
349 flush_dcache_page(pp);
350
351 pb = kmap(pp);
352 (void) dmu_read(os, oid, start+off, nbytes, pb+off,
353 DMU_READ_PREFETCH);
354 kunmap(pp);
355
356 if (mapping_writably_mapped(mp))
357 flush_dcache_page(pp);
358
359 mark_page_accessed(pp);
360 SetPageUptodate(pp);
361 ClearPageError(pp);
362 unlock_page(pp);
363 put_page(pp);
364 }
365
366 len -= nbytes;
367 off = 0;
368 }
369 }
370
371 /*
372 * When a file is memory mapped, we must keep the IO data synchronized
373 * between the DMU cache and the memory mapped pages. What this means:
374 *
375 * On Read: We "read" preferentially from memory mapped pages,
376 * else we default from the dmu buffer.
377 *
378 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
379 * the file is memory mapped.
380 */
381 static int
382 mappedread(struct inode *ip, int nbytes, uio_t *uio)
383 {
384 struct address_space *mp = ip->i_mapping;
385 struct page *pp;
386 znode_t *zp = ITOZ(ip);
387 int64_t start, off;
388 uint64_t bytes;
389 int len = nbytes;
390 int error = 0;
391 void *pb;
392
393 start = uio->uio_loffset;
394 off = start & (PAGE_SIZE-1);
395 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
396 bytes = MIN(PAGE_SIZE - off, len);
397
398 pp = find_lock_page(mp, start >> PAGE_SHIFT);
399 if (pp) {
400 ASSERT(PageUptodate(pp));
401 unlock_page(pp);
402
403 pb = kmap(pp);
404 error = uiomove(pb + off, bytes, UIO_READ, uio);
405 kunmap(pp);
406
407 if (mapping_writably_mapped(mp))
408 flush_dcache_page(pp);
409
410 mark_page_accessed(pp);
411 put_page(pp);
412 } else {
413 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
414 uio, bytes);
415 }
416
417 len -= bytes;
418 off = 0;
419 if (error)
420 break;
421 }
422 return (error);
423 }
424 #endif /* _KERNEL */
425
426 unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
427 unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
428
429 /*
430 * Read bytes from specified file into supplied buffer.
431 *
432 * IN: ip - inode of file to be read from.
433 * uio - structure supplying read location, range info,
434 * and return buffer.
435 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
436 * O_DIRECT flag; used to bypass page cache.
437 * cr - credentials of caller.
438 *
439 * OUT: uio - updated offset and range, buffer filled.
440 *
441 * RETURN: 0 on success, error code on failure.
442 *
443 * Side Effects:
444 * inode - atime updated if byte count > 0
445 */
446 /* ARGSUSED */
447 int
448 zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
449 {
450 znode_t *zp = ITOZ(ip);
451 zfsvfs_t *zfsvfs = ITOZSB(ip);
452 ssize_t n, nbytes;
453 int error = 0;
454 rl_t *rl;
455 #ifdef HAVE_UIO_ZEROCOPY
456 xuio_t *xuio = NULL;
457 #endif /* HAVE_UIO_ZEROCOPY */
458
459 ZFS_ENTER(zfsvfs);
460 ZFS_VERIFY_ZP(zp);
461
462 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
463 ZFS_EXIT(zfsvfs);
464 return (SET_ERROR(EACCES));
465 }
466
467 /*
468 * Validate file offset
469 */
470 if (uio->uio_loffset < (offset_t)0) {
471 ZFS_EXIT(zfsvfs);
472 return (SET_ERROR(EINVAL));
473 }
474
475 /*
476 * Fasttrack empty reads
477 */
478 if (uio->uio_resid == 0) {
479 ZFS_EXIT(zfsvfs);
480 return (0);
481 }
482
483 /*
484 * If we're in FRSYNC mode, sync out this znode before reading it.
485 * Only do this for non-snapshots.
486 */
487 if (zfsvfs->z_log &&
488 (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
489 zil_commit(zfsvfs->z_log, zp->z_id);
490
491 /*
492 * Lock the range against changes.
493 */
494 rl = zfs_range_lock(&zp->z_range_lock, uio->uio_loffset, uio->uio_resid,
495 RL_READER);
496
497 /*
498 * If we are reading past end-of-file we can skip
499 * to the end; but we might still need to set atime.
500 */
501 if (uio->uio_loffset >= zp->z_size) {
502 error = 0;
503 goto out;
504 }
505
506 ASSERT(uio->uio_loffset < zp->z_size);
507 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
508
509 #ifdef HAVE_UIO_ZEROCOPY
510 if ((uio->uio_extflg == UIO_XUIO) &&
511 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
512 int nblk;
513 int blksz = zp->z_blksz;
514 uint64_t offset = uio->uio_loffset;
515
516 xuio = (xuio_t *)uio;
517 if ((ISP2(blksz))) {
518 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
519 blksz)) / blksz;
520 } else {
521 ASSERT(offset + n <= blksz);
522 nblk = 1;
523 }
524 (void) dmu_xuio_init(xuio, nblk);
525
526 if (vn_has_cached_data(ip)) {
527 /*
528 * For simplicity, we always allocate a full buffer
529 * even if we only expect to read a portion of a block.
530 */
531 while (--nblk >= 0) {
532 (void) dmu_xuio_add(xuio,
533 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
534 blksz), 0, blksz);
535 }
536 }
537 }
538 #endif /* HAVE_UIO_ZEROCOPY */
539
540 while (n > 0) {
541 nbytes = MIN(n, zfs_read_chunk_size -
542 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
543
544 if (zp->z_is_mapped && !(ioflag & O_DIRECT)) {
545 error = mappedread(ip, nbytes, uio);
546 } else {
547 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
548 uio, nbytes);
549 }
550
551 if (error) {
552 /* convert checksum errors into IO errors */
553 if (error == ECKSUM)
554 error = SET_ERROR(EIO);
555 break;
556 }
557
558 n -= nbytes;
559 }
560 out:
561 zfs_range_unlock(rl);
562
563 ZFS_EXIT(zfsvfs);
564 return (error);
565 }
566
567 /*
568 * Write the bytes to a file.
569 *
570 * IN: ip - inode of file to be written to.
571 * uio - structure supplying write location, range info,
572 * and data buffer.
573 * ioflag - FAPPEND flag set if in append mode.
574 * O_DIRECT flag; used to bypass page cache.
575 * cr - credentials of caller.
576 *
577 * OUT: uio - updated offset and range.
578 *
579 * RETURN: 0 if success
580 * error code if failure
581 *
582 * Timestamps:
583 * ip - ctime|mtime updated if byte count > 0
584 */
585
586 /* ARGSUSED */
587 int
588 zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
589 {
590 znode_t *zp = ITOZ(ip);
591 rlim64_t limit = uio->uio_limit;
592 ssize_t start_resid = uio->uio_resid;
593 ssize_t tx_bytes;
594 uint64_t end_size;
595 dmu_tx_t *tx;
596 zfsvfs_t *zfsvfs = ZTOZSB(zp);
597 zilog_t *zilog;
598 offset_t woff;
599 ssize_t n, nbytes;
600 rl_t *rl;
601 int max_blksz = zfsvfs->z_max_blksz;
602 int error = 0;
603 arc_buf_t *abuf;
604 const iovec_t *aiov = NULL;
605 xuio_t *xuio = NULL;
606 int write_eof;
607 int count = 0;
608 sa_bulk_attr_t bulk[4];
609 uint64_t mtime[2], ctime[2];
610 uint32_t uid;
611 #ifdef HAVE_UIO_ZEROCOPY
612 int i_iov = 0;
613 const iovec_t *iovp = uio->uio_iov;
614 ASSERTV(int iovcnt = uio->uio_iovcnt);
615 #endif
616
617 /*
618 * Fasttrack empty write
619 */
620 n = start_resid;
621 if (n == 0)
622 return (0);
623
624 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
625 limit = MAXOFFSET_T;
626
627 ZFS_ENTER(zfsvfs);
628 ZFS_VERIFY_ZP(zp);
629
630 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
631 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
632 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
633 &zp->z_size, 8);
634 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
635 &zp->z_pflags, 8);
636
637 /*
638 * Callers might not be able to detect properly that we are read-only,
639 * so check it explicitly here.
640 */
641 if (zfs_is_readonly(zfsvfs)) {
642 ZFS_EXIT(zfsvfs);
643 return (SET_ERROR(EROFS));
644 }
645
646 /*
647 * If immutable or not appending then return EPERM
648 */
649 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
650 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
651 (uio->uio_loffset < zp->z_size))) {
652 ZFS_EXIT(zfsvfs);
653 return (SET_ERROR(EPERM));
654 }
655
656 zilog = zfsvfs->z_log;
657
658 /*
659 * Validate file offset
660 */
661 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
662 if (woff < 0) {
663 ZFS_EXIT(zfsvfs);
664 return (SET_ERROR(EINVAL));
665 }
666
667 /*
668 * Pre-fault the pages to ensure slow (eg NFS) pages
669 * don't hold up txg.
670 * Skip this if uio contains loaned arc_buf.
671 */
672 #ifdef HAVE_UIO_ZEROCOPY
673 if ((uio->uio_extflg == UIO_XUIO) &&
674 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
675 xuio = (xuio_t *)uio;
676 else
677 #endif
678 uio_prefaultpages(MIN(n, max_blksz), uio);
679
680 /*
681 * If in append mode, set the io offset pointer to eof.
682 */
683 if (ioflag & FAPPEND) {
684 /*
685 * Obtain an appending range lock to guarantee file append
686 * semantics. We reset the write offset once we have the lock.
687 */
688 rl = zfs_range_lock(&zp->z_range_lock, 0, n, RL_APPEND);
689 woff = rl->r_off;
690 if (rl->r_len == UINT64_MAX) {
691 /*
692 * We overlocked the file because this write will cause
693 * the file block size to increase.
694 * Note that zp_size cannot change with this lock held.
695 */
696 woff = zp->z_size;
697 }
698 uio->uio_loffset = woff;
699 } else {
700 /*
701 * Note that if the file block size will change as a result of
702 * this write, then this range lock will lock the entire file
703 * so that we can re-write the block safely.
704 */
705 rl = zfs_range_lock(&zp->z_range_lock, woff, n, RL_WRITER);
706 }
707
708 if (woff >= limit) {
709 zfs_range_unlock(rl);
710 ZFS_EXIT(zfsvfs);
711 return (SET_ERROR(EFBIG));
712 }
713
714 if ((woff + n) > limit || woff > (limit - n))
715 n = limit - woff;
716
717 /* Will this write extend the file length? */
718 write_eof = (woff + n > zp->z_size);
719
720 end_size = MAX(zp->z_size, woff + n);
721
722 /*
723 * Write the file in reasonable size chunks. Each chunk is written
724 * in a separate transaction; this keeps the intent log records small
725 * and allows us to do more fine-grained space accounting.
726 */
727 while (n > 0) {
728 abuf = NULL;
729 woff = uio->uio_loffset;
730 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
731 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
732 if (abuf != NULL)
733 dmu_return_arcbuf(abuf);
734 error = SET_ERROR(EDQUOT);
735 break;
736 }
737
738 if (xuio && abuf == NULL) {
739 #ifdef HAVE_UIO_ZEROCOPY
740 ASSERT(i_iov < iovcnt);
741 ASSERT3U(uio->uio_segflg, !=, UIO_BVEC);
742 aiov = &iovp[i_iov];
743 abuf = dmu_xuio_arcbuf(xuio, i_iov);
744 dmu_xuio_clear(xuio, i_iov);
745 ASSERT((aiov->iov_base == abuf->b_data) ||
746 ((char *)aiov->iov_base - (char *)abuf->b_data +
747 aiov->iov_len == arc_buf_size(abuf)));
748 i_iov++;
749 #endif
750 } else if (abuf == NULL && n >= max_blksz &&
751 woff >= zp->z_size &&
752 P2PHASE(woff, max_blksz) == 0 &&
753 zp->z_blksz == max_blksz) {
754 /*
755 * This write covers a full block. "Borrow" a buffer
756 * from the dmu so that we can fill it before we enter
757 * a transaction. This avoids the possibility of
758 * holding up the transaction if the data copy hangs
759 * up on a pagefault (e.g., from an NFS server mapping).
760 */
761 size_t cbytes;
762
763 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
764 max_blksz);
765 ASSERT(abuf != NULL);
766 ASSERT(arc_buf_size(abuf) == max_blksz);
767 if ((error = uiocopy(abuf->b_data, max_blksz,
768 UIO_WRITE, uio, &cbytes))) {
769 dmu_return_arcbuf(abuf);
770 break;
771 }
772 ASSERT(cbytes == max_blksz);
773 }
774
775 /*
776 * Start a transaction.
777 */
778 tx = dmu_tx_create(zfsvfs->z_os);
779 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
780 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
781 zfs_sa_upgrade_txholds(tx, zp);
782 error = dmu_tx_assign(tx, TXG_WAIT);
783 if (error) {
784 dmu_tx_abort(tx);
785 if (abuf != NULL)
786 dmu_return_arcbuf(abuf);
787 break;
788 }
789
790 /*
791 * If zfs_range_lock() over-locked we grow the blocksize
792 * and then reduce the lock range. This will only happen
793 * on the first iteration since zfs_range_reduce() will
794 * shrink down r_len to the appropriate size.
795 */
796 if (rl->r_len == UINT64_MAX) {
797 uint64_t new_blksz;
798
799 if (zp->z_blksz > max_blksz) {
800 /*
801 * File's blocksize is already larger than the
802 * "recordsize" property. Only let it grow to
803 * the next power of 2.
804 */
805 ASSERT(!ISP2(zp->z_blksz));
806 new_blksz = MIN(end_size,
807 1 << highbit64(zp->z_blksz));
808 } else {
809 new_blksz = MIN(end_size, max_blksz);
810 }
811 zfs_grow_blocksize(zp, new_blksz, tx);
812 zfs_range_reduce(rl, woff, n);
813 }
814
815 /*
816 * XXX - should we really limit each write to z_max_blksz?
817 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
818 */
819 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
820
821 if (abuf == NULL) {
822 tx_bytes = uio->uio_resid;
823 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
824 uio, nbytes, tx);
825 tx_bytes -= uio->uio_resid;
826 } else {
827 tx_bytes = nbytes;
828 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
829 /*
830 * If this is not a full block write, but we are
831 * extending the file past EOF and this data starts
832 * block-aligned, use assign_arcbuf(). Otherwise,
833 * write via dmu_write().
834 */
835 if (tx_bytes < max_blksz && (!write_eof ||
836 aiov->iov_base != abuf->b_data)) {
837 ASSERT(xuio);
838 dmu_write(zfsvfs->z_os, zp->z_id, woff,
839 /* cppcheck-suppress nullPointer */
840 aiov->iov_len, aiov->iov_base, tx);
841 dmu_return_arcbuf(abuf);
842 xuio_stat_wbuf_copied();
843 } else {
844 ASSERT(xuio || tx_bytes == max_blksz);
845 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
846 woff, abuf, tx);
847 }
848 ASSERT(tx_bytes <= uio->uio_resid);
849 uioskip(uio, tx_bytes);
850 }
851 if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT)) {
852 update_pages(ip, woff,
853 tx_bytes, zfsvfs->z_os, zp->z_id);
854 }
855
856 /*
857 * If we made no progress, we're done. If we made even
858 * partial progress, update the znode and ZIL accordingly.
859 */
860 if (tx_bytes == 0) {
861 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
862 (void *)&zp->z_size, sizeof (uint64_t), tx);
863 dmu_tx_commit(tx);
864 ASSERT(error != 0);
865 break;
866 }
867
868 /*
869 * Clear Set-UID/Set-GID bits on successful write if not
870 * privileged and at least one of the execute bits is set.
871 *
872 * It would be nice to to this after all writes have
873 * been done, but that would still expose the ISUID/ISGID
874 * to another app after the partial write is committed.
875 *
876 * Note: we don't call zfs_fuid_map_id() here because
877 * user 0 is not an ephemeral uid.
878 */
879 mutex_enter(&zp->z_acl_lock);
880 uid = KUID_TO_SUID(ip->i_uid);
881 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
882 (S_IXUSR >> 6))) != 0 &&
883 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
884 secpolicy_vnode_setid_retain(cr,
885 ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
886 uint64_t newmode;
887 zp->z_mode &= ~(S_ISUID | S_ISGID);
888 ip->i_mode = newmode = zp->z_mode;
889 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
890 (void *)&newmode, sizeof (uint64_t), tx);
891 }
892 mutex_exit(&zp->z_acl_lock);
893
894 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
895
896 /*
897 * Update the file size (zp_size) if it has changed;
898 * account for possible concurrent updates.
899 */
900 while ((end_size = zp->z_size) < uio->uio_loffset) {
901 (void) atomic_cas_64(&zp->z_size, end_size,
902 uio->uio_loffset);
903 ASSERT(error == 0);
904 }
905 /*
906 * If we are replaying and eof is non zero then force
907 * the file size to the specified eof. Note, there's no
908 * concurrency during replay.
909 */
910 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
911 zp->z_size = zfsvfs->z_replay_eof;
912
913 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
914
915 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
916 NULL, NULL);
917 dmu_tx_commit(tx);
918
919 if (error != 0)
920 break;
921 ASSERT(tx_bytes == nbytes);
922 n -= nbytes;
923
924 if (!xuio && n > 0)
925 uio_prefaultpages(MIN(n, max_blksz), uio);
926 }
927
928 zfs_inode_update(zp);
929 zfs_range_unlock(rl);
930
931 /*
932 * If we're in replay mode, or we made no progress, return error.
933 * Otherwise, it's at least a partial write, so it's successful.
934 */
935 if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
936 ZFS_EXIT(zfsvfs);
937 return (error);
938 }
939
940 if (ioflag & (FSYNC | FDSYNC) ||
941 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
942 zil_commit(zilog, zp->z_id);
943
944 ZFS_EXIT(zfsvfs);
945 return (0);
946 }
947
948 /*
949 * Drop a reference on the passed inode asynchronously. This ensures
950 * that the caller will never drop the last reference on an inode in
951 * the current context. Doing so while holding open a tx could result
952 * in a deadlock if iput_final() re-enters the filesystem code.
953 */
954 void
955 zfs_iput_async(struct inode *ip)
956 {
957 objset_t *os = ITOZSB(ip)->z_os;
958
959 ASSERT(atomic_read(&ip->i_count) > 0);
960 ASSERT(os != NULL);
961
962 if (atomic_read(&ip->i_count) == 1)
963 VERIFY(taskq_dispatch(dsl_pool_iput_taskq(dmu_objset_pool(os)),
964 (task_func_t *)iput, ip, TQ_SLEEP) != TASKQID_INVALID);
965 else
966 iput(ip);
967 }
968
969 void
970 zfs_get_done(zgd_t *zgd, int error)
971 {
972 znode_t *zp = zgd->zgd_private;
973
974 if (zgd->zgd_db)
975 dmu_buf_rele(zgd->zgd_db, zgd);
976
977 zfs_range_unlock(zgd->zgd_rl);
978
979 /*
980 * Release the vnode asynchronously as we currently have the
981 * txg stopped from syncing.
982 */
983 zfs_iput_async(ZTOI(zp));
984
985 if (error == 0 && zgd->zgd_bp)
986 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
987
988 kmem_free(zgd, sizeof (zgd_t));
989 }
990
991 #ifdef DEBUG
992 static int zil_fault_io = 0;
993 #endif
994
995 /*
996 * Get data to generate a TX_WRITE intent log record.
997 */
998 int
999 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1000 {
1001 zfsvfs_t *zfsvfs = arg;
1002 objset_t *os = zfsvfs->z_os;
1003 znode_t *zp;
1004 uint64_t object = lr->lr_foid;
1005 uint64_t offset = lr->lr_offset;
1006 uint64_t size = lr->lr_length;
1007 dmu_buf_t *db;
1008 zgd_t *zgd;
1009 int error = 0;
1010
1011 ASSERT(zio != NULL);
1012 ASSERT(size != 0);
1013
1014 /*
1015 * Nothing to do if the file has been removed
1016 */
1017 if (zfs_zget(zfsvfs, object, &zp) != 0)
1018 return (SET_ERROR(ENOENT));
1019 if (zp->z_unlinked) {
1020 /*
1021 * Release the vnode asynchronously as we currently have the
1022 * txg stopped from syncing.
1023 */
1024 zfs_iput_async(ZTOI(zp));
1025 return (SET_ERROR(ENOENT));
1026 }
1027
1028 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1029 zgd->zgd_zilog = zfsvfs->z_log;
1030 zgd->zgd_private = zp;
1031
1032 /*
1033 * Write records come in two flavors: immediate and indirect.
1034 * For small writes it's cheaper to store the data with the
1035 * log record (immediate); for large writes it's cheaper to
1036 * sync the data and get a pointer to it (indirect) so that
1037 * we don't have to write the data twice.
1038 */
1039 if (buf != NULL) { /* immediate write */
1040 zgd->zgd_rl = zfs_range_lock(&zp->z_range_lock, offset, size,
1041 RL_READER);
1042 /* test for truncation needs to be done while range locked */
1043 if (offset >= zp->z_size) {
1044 error = SET_ERROR(ENOENT);
1045 } else {
1046 error = dmu_read(os, object, offset, size, buf,
1047 DMU_READ_NO_PREFETCH);
1048 }
1049 ASSERT(error == 0 || error == ENOENT);
1050 } else { /* indirect write */
1051 /*
1052 * Have to lock the whole block to ensure when it's
1053 * written out and its checksum is being calculated
1054 * that no one can change the data. We need to re-check
1055 * blocksize after we get the lock in case it's changed!
1056 */
1057 for (;;) {
1058 uint64_t blkoff;
1059 size = zp->z_blksz;
1060 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1061 offset -= blkoff;
1062 zgd->zgd_rl = zfs_range_lock(&zp->z_range_lock, offset,
1063 size, RL_READER);
1064 if (zp->z_blksz == size)
1065 break;
1066 offset += blkoff;
1067 zfs_range_unlock(zgd->zgd_rl);
1068 }
1069 /* test for truncation needs to be done while range locked */
1070 if (lr->lr_offset >= zp->z_size)
1071 error = SET_ERROR(ENOENT);
1072 #ifdef DEBUG
1073 if (zil_fault_io) {
1074 error = SET_ERROR(EIO);
1075 zil_fault_io = 0;
1076 }
1077 #endif
1078 if (error == 0)
1079 error = dmu_buf_hold(os, object, offset, zgd, &db,
1080 DMU_READ_NO_PREFETCH);
1081
1082 if (error == 0) {
1083 blkptr_t *bp = &lr->lr_blkptr;
1084
1085 zgd->zgd_db = db;
1086 zgd->zgd_bp = bp;
1087
1088 ASSERT(db->db_offset == offset);
1089 ASSERT(db->db_size == size);
1090
1091 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1092 zfs_get_done, zgd);
1093 ASSERT(error || lr->lr_length <= size);
1094
1095 /*
1096 * On success, we need to wait for the write I/O
1097 * initiated by dmu_sync() to complete before we can
1098 * release this dbuf. We will finish everything up
1099 * in the zfs_get_done() callback.
1100 */
1101 if (error == 0)
1102 return (0);
1103
1104 if (error == EALREADY) {
1105 lr->lr_common.lrc_txtype = TX_WRITE2;
1106 error = 0;
1107 }
1108 }
1109 }
1110
1111 zfs_get_done(zgd, error);
1112
1113 return (error);
1114 }
1115
1116 /*ARGSUSED*/
1117 int
1118 zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
1119 {
1120 znode_t *zp = ITOZ(ip);
1121 zfsvfs_t *zfsvfs = ITOZSB(ip);
1122 int error;
1123
1124 ZFS_ENTER(zfsvfs);
1125 ZFS_VERIFY_ZP(zp);
1126
1127 if (flag & V_ACE_MASK)
1128 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1129 else
1130 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1131
1132 ZFS_EXIT(zfsvfs);
1133 return (error);
1134 }
1135
1136 /*
1137 * Lookup an entry in a directory, or an extended attribute directory.
1138 * If it exists, return a held inode reference for it.
1139 *
1140 * IN: dip - inode of directory to search.
1141 * nm - name of entry to lookup.
1142 * flags - LOOKUP_XATTR set if looking for an attribute.
1143 * cr - credentials of caller.
1144 * direntflags - directory lookup flags
1145 * realpnp - returned pathname.
1146 *
1147 * OUT: ipp - inode of located entry, NULL if not found.
1148 *
1149 * RETURN: 0 on success, error code on failure.
1150 *
1151 * Timestamps:
1152 * NA
1153 */
1154 /* ARGSUSED */
1155 int
1156 zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
1157 cred_t *cr, int *direntflags, pathname_t *realpnp)
1158 {
1159 znode_t *zdp = ITOZ(dip);
1160 zfsvfs_t *zfsvfs = ITOZSB(dip);
1161 int error = 0;
1162
1163 /*
1164 * Fast path lookup, however we must skip DNLC lookup
1165 * for case folding or normalizing lookups because the
1166 * DNLC code only stores the passed in name. This means
1167 * creating 'a' and removing 'A' on a case insensitive
1168 * file system would work, but DNLC still thinks 'a'
1169 * exists and won't let you create it again on the next
1170 * pass through fast path.
1171 */
1172 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1173
1174 if (!S_ISDIR(dip->i_mode)) {
1175 return (SET_ERROR(ENOTDIR));
1176 } else if (zdp->z_sa_hdl == NULL) {
1177 return (SET_ERROR(EIO));
1178 }
1179
1180 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1181 error = zfs_fastaccesschk_execute(zdp, cr);
1182 if (!error) {
1183 *ipp = dip;
1184 igrab(*ipp);
1185 return (0);
1186 }
1187 return (error);
1188 #ifdef HAVE_DNLC
1189 } else if (!zdp->z_zfsvfs->z_norm &&
1190 (zdp->z_zfsvfs->z_case == ZFS_CASE_SENSITIVE)) {
1191
1192 vnode_t *tvp = dnlc_lookup(dvp, nm);
1193
1194 if (tvp) {
1195 error = zfs_fastaccesschk_execute(zdp, cr);
1196 if (error) {
1197 iput(tvp);
1198 return (error);
1199 }
1200 if (tvp == DNLC_NO_VNODE) {
1201 iput(tvp);
1202 return (SET_ERROR(ENOENT));
1203 } else {
1204 *vpp = tvp;
1205 return (specvp_check(vpp, cr));
1206 }
1207 }
1208 #endif /* HAVE_DNLC */
1209 }
1210 }
1211
1212 ZFS_ENTER(zfsvfs);
1213 ZFS_VERIFY_ZP(zdp);
1214
1215 *ipp = NULL;
1216
1217 if (flags & LOOKUP_XATTR) {
1218 /*
1219 * We don't allow recursive attributes..
1220 * Maybe someday we will.
1221 */
1222 if (zdp->z_pflags & ZFS_XATTR) {
1223 ZFS_EXIT(zfsvfs);
1224 return (SET_ERROR(EINVAL));
1225 }
1226
1227 if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
1228 ZFS_EXIT(zfsvfs);
1229 return (error);
1230 }
1231
1232 /*
1233 * Do we have permission to get into attribute directory?
1234 */
1235
1236 if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0,
1237 B_FALSE, cr))) {
1238 iput(*ipp);
1239 *ipp = NULL;
1240 }
1241
1242 ZFS_EXIT(zfsvfs);
1243 return (error);
1244 }
1245
1246 if (!S_ISDIR(dip->i_mode)) {
1247 ZFS_EXIT(zfsvfs);
1248 return (SET_ERROR(ENOTDIR));
1249 }
1250
1251 /*
1252 * Check accessibility of directory.
1253 */
1254
1255 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
1256 ZFS_EXIT(zfsvfs);
1257 return (error);
1258 }
1259
1260 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1261 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1262 ZFS_EXIT(zfsvfs);
1263 return (SET_ERROR(EILSEQ));
1264 }
1265
1266 error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
1267 if ((error == 0) && (*ipp))
1268 zfs_inode_update(ITOZ(*ipp));
1269
1270 ZFS_EXIT(zfsvfs);
1271 return (error);
1272 }
1273
1274 /*
1275 * Attempt to create a new entry in a directory. If the entry
1276 * already exists, truncate the file if permissible, else return
1277 * an error. Return the ip of the created or trunc'd file.
1278 *
1279 * IN: dip - inode of directory to put new file entry in.
1280 * name - name of new file entry.
1281 * vap - attributes of new file.
1282 * excl - flag indicating exclusive or non-exclusive mode.
1283 * mode - mode to open file with.
1284 * cr - credentials of caller.
1285 * flag - large file flag [UNUSED].
1286 * vsecp - ACL to be set
1287 *
1288 * OUT: ipp - inode of created or trunc'd entry.
1289 *
1290 * RETURN: 0 on success, error code on failure.
1291 *
1292 * Timestamps:
1293 * dip - ctime|mtime updated if new entry created
1294 * ip - ctime|mtime always, atime if new
1295 */
1296
1297 /* ARGSUSED */
1298 int
1299 zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
1300 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1301 {
1302 znode_t *zp, *dzp = ITOZ(dip);
1303 zfsvfs_t *zfsvfs = ITOZSB(dip);
1304 zilog_t *zilog;
1305 objset_t *os;
1306 zfs_dirlock_t *dl;
1307 dmu_tx_t *tx;
1308 int error;
1309 uid_t uid;
1310 gid_t gid;
1311 zfs_acl_ids_t acl_ids;
1312 boolean_t fuid_dirtied;
1313 boolean_t have_acl = B_FALSE;
1314 boolean_t waited = B_FALSE;
1315
1316 /*
1317 * If we have an ephemeral id, ACL, or XVATTR then
1318 * make sure file system is at proper version
1319 */
1320
1321 gid = crgetgid(cr);
1322 uid = crgetuid(cr);
1323
1324 if (zfsvfs->z_use_fuids == B_FALSE &&
1325 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1326 return (SET_ERROR(EINVAL));
1327
1328 if (name == NULL)
1329 return (SET_ERROR(EINVAL));
1330
1331 ZFS_ENTER(zfsvfs);
1332 ZFS_VERIFY_ZP(dzp);
1333 os = zfsvfs->z_os;
1334 zilog = zfsvfs->z_log;
1335
1336 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1337 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1338 ZFS_EXIT(zfsvfs);
1339 return (SET_ERROR(EILSEQ));
1340 }
1341
1342 if (vap->va_mask & ATTR_XVATTR) {
1343 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1344 crgetuid(cr), cr, vap->va_mode)) != 0) {
1345 ZFS_EXIT(zfsvfs);
1346 return (error);
1347 }
1348 }
1349
1350 top:
1351 *ipp = NULL;
1352 if (*name == '\0') {
1353 /*
1354 * Null component name refers to the directory itself.
1355 */
1356 igrab(dip);
1357 zp = dzp;
1358 dl = NULL;
1359 error = 0;
1360 } else {
1361 /* possible igrab(zp) */
1362 int zflg = 0;
1363
1364 if (flag & FIGNORECASE)
1365 zflg |= ZCILOOK;
1366
1367 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1368 NULL, NULL);
1369 if (error) {
1370 if (have_acl)
1371 zfs_acl_ids_free(&acl_ids);
1372 if (strcmp(name, "..") == 0)
1373 error = SET_ERROR(EISDIR);
1374 ZFS_EXIT(zfsvfs);
1375 return (error);
1376 }
1377 }
1378
1379 if (zp == NULL) {
1380 uint64_t txtype;
1381
1382 /*
1383 * Create a new file object and update the directory
1384 * to reference it.
1385 */
1386 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1387 if (have_acl)
1388 zfs_acl_ids_free(&acl_ids);
1389 goto out;
1390 }
1391
1392 /*
1393 * We only support the creation of regular files in
1394 * extended attribute directories.
1395 */
1396
1397 if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
1398 if (have_acl)
1399 zfs_acl_ids_free(&acl_ids);
1400 error = SET_ERROR(EINVAL);
1401 goto out;
1402 }
1403
1404 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1405 cr, vsecp, &acl_ids)) != 0)
1406 goto out;
1407 have_acl = B_TRUE;
1408
1409 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1410 zfs_acl_ids_free(&acl_ids);
1411 error = SET_ERROR(EDQUOT);
1412 goto out;
1413 }
1414
1415 tx = dmu_tx_create(os);
1416
1417 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1418 ZFS_SA_BASE_ATTR_SIZE);
1419
1420 fuid_dirtied = zfsvfs->z_fuid_dirty;
1421 if (fuid_dirtied)
1422 zfs_fuid_txhold(zfsvfs, tx);
1423 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1424 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1425 if (!zfsvfs->z_use_sa &&
1426 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1427 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1428 0, acl_ids.z_aclp->z_acl_bytes);
1429 }
1430 error = dmu_tx_assign(tx,
1431 (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1432 if (error) {
1433 zfs_dirent_unlock(dl);
1434 if (error == ERESTART) {
1435 waited = B_TRUE;
1436 dmu_tx_wait(tx);
1437 dmu_tx_abort(tx);
1438 goto top;
1439 }
1440 zfs_acl_ids_free(&acl_ids);
1441 dmu_tx_abort(tx);
1442 ZFS_EXIT(zfsvfs);
1443 return (error);
1444 }
1445 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1446
1447 if (fuid_dirtied)
1448 zfs_fuid_sync(zfsvfs, tx);
1449
1450 (void) zfs_link_create(dl, zp, tx, ZNEW);
1451 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1452 if (flag & FIGNORECASE)
1453 txtype |= TX_CI;
1454 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1455 vsecp, acl_ids.z_fuidp, vap);
1456 zfs_acl_ids_free(&acl_ids);
1457 dmu_tx_commit(tx);
1458 } else {
1459 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1460
1461 if (have_acl)
1462 zfs_acl_ids_free(&acl_ids);
1463 have_acl = B_FALSE;
1464
1465 /*
1466 * A directory entry already exists for this name.
1467 */
1468 /*
1469 * Can't truncate an existing file if in exclusive mode.
1470 */
1471 if (excl) {
1472 error = SET_ERROR(EEXIST);
1473 goto out;
1474 }
1475 /*
1476 * Can't open a directory for writing.
1477 */
1478 if (S_ISDIR(ZTOI(zp)->i_mode)) {
1479 error = SET_ERROR(EISDIR);
1480 goto out;
1481 }
1482 /*
1483 * Verify requested access to file.
1484 */
1485 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1486 goto out;
1487 }
1488
1489 mutex_enter(&dzp->z_lock);
1490 dzp->z_seq++;
1491 mutex_exit(&dzp->z_lock);
1492
1493 /*
1494 * Truncate regular files if requested.
1495 */
1496 if (S_ISREG(ZTOI(zp)->i_mode) &&
1497 (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
1498 /* we can't hold any locks when calling zfs_freesp() */
1499 if (dl) {
1500 zfs_dirent_unlock(dl);
1501 dl = NULL;
1502 }
1503 error = zfs_freesp(zp, 0, 0, mode, TRUE);
1504 }
1505 }
1506 out:
1507
1508 if (dl)
1509 zfs_dirent_unlock(dl);
1510
1511 if (error) {
1512 if (zp)
1513 iput(ZTOI(zp));
1514 } else {
1515 zfs_inode_update(dzp);
1516 zfs_inode_update(zp);
1517 *ipp = ZTOI(zp);
1518 }
1519
1520 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1521 zil_commit(zilog, 0);
1522
1523 ZFS_EXIT(zfsvfs);
1524 return (error);
1525 }
1526
1527 /* ARGSUSED */
1528 int
1529 zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
1530 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1531 {
1532 znode_t *zp = NULL, *dzp = ITOZ(dip);
1533 zfsvfs_t *zfsvfs = ITOZSB(dip);
1534 objset_t *os;
1535 dmu_tx_t *tx;
1536 int error;
1537 uid_t uid;
1538 gid_t gid;
1539 zfs_acl_ids_t acl_ids;
1540 boolean_t fuid_dirtied;
1541 boolean_t have_acl = B_FALSE;
1542 boolean_t waited = B_FALSE;
1543
1544 /*
1545 * If we have an ephemeral id, ACL, or XVATTR then
1546 * make sure file system is at proper version
1547 */
1548
1549 gid = crgetgid(cr);
1550 uid = crgetuid(cr);
1551
1552 if (zfsvfs->z_use_fuids == B_FALSE &&
1553 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1554 return (SET_ERROR(EINVAL));
1555
1556 ZFS_ENTER(zfsvfs);
1557 ZFS_VERIFY_ZP(dzp);
1558 os = zfsvfs->z_os;
1559
1560 if (vap->va_mask & ATTR_XVATTR) {
1561 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1562 crgetuid(cr), cr, vap->va_mode)) != 0) {
1563 ZFS_EXIT(zfsvfs);
1564 return (error);
1565 }
1566 }
1567
1568 top:
1569 *ipp = NULL;
1570
1571 /*
1572 * Create a new file object and update the directory
1573 * to reference it.
1574 */
1575 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1576 if (have_acl)
1577 zfs_acl_ids_free(&acl_ids);
1578 goto out;
1579 }
1580
1581 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1582 cr, vsecp, &acl_ids)) != 0)
1583 goto out;
1584 have_acl = B_TRUE;
1585
1586 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1587 zfs_acl_ids_free(&acl_ids);
1588 error = SET_ERROR(EDQUOT);
1589 goto out;
1590 }
1591
1592 tx = dmu_tx_create(os);
1593
1594 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1595 ZFS_SA_BASE_ATTR_SIZE);
1596 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1597
1598 fuid_dirtied = zfsvfs->z_fuid_dirty;
1599 if (fuid_dirtied)
1600 zfs_fuid_txhold(zfsvfs, tx);
1601 if (!zfsvfs->z_use_sa &&
1602 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1603 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1604 0, acl_ids.z_aclp->z_acl_bytes);
1605 }
1606 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1607 if (error) {
1608 if (error == ERESTART) {
1609 waited = B_TRUE;
1610 dmu_tx_wait(tx);
1611 dmu_tx_abort(tx);
1612 goto top;
1613 }
1614 zfs_acl_ids_free(&acl_ids);
1615 dmu_tx_abort(tx);
1616 ZFS_EXIT(zfsvfs);
1617 return (error);
1618 }
1619 zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids);
1620
1621 if (fuid_dirtied)
1622 zfs_fuid_sync(zfsvfs, tx);
1623
1624 /* Add to unlinked set */
1625 zp->z_unlinked = 1;
1626 zfs_unlinked_add(zp, tx);
1627 zfs_acl_ids_free(&acl_ids);
1628 dmu_tx_commit(tx);
1629 out:
1630
1631 if (error) {
1632 if (zp)
1633 iput(ZTOI(zp));
1634 } else {
1635 zfs_inode_update(dzp);
1636 zfs_inode_update(zp);
1637 *ipp = ZTOI(zp);
1638 }
1639
1640 ZFS_EXIT(zfsvfs);
1641 return (error);
1642 }
1643
1644 /*
1645 * Remove an entry from a directory.
1646 *
1647 * IN: dip - inode of directory to remove entry from.
1648 * name - name of entry to remove.
1649 * cr - credentials of caller.
1650 *
1651 * RETURN: 0 if success
1652 * error code if failure
1653 *
1654 * Timestamps:
1655 * dip - ctime|mtime
1656 * ip - ctime (if nlink > 0)
1657 */
1658
1659 uint64_t null_xattr = 0;
1660
1661 /*ARGSUSED*/
1662 int
1663 zfs_remove(struct inode *dip, char *name, cred_t *cr, int flags)
1664 {
1665 znode_t *zp, *dzp = ITOZ(dip);
1666 znode_t *xzp;
1667 struct inode *ip;
1668 zfsvfs_t *zfsvfs = ITOZSB(dip);
1669 zilog_t *zilog;
1670 uint64_t acl_obj, xattr_obj;
1671 uint64_t xattr_obj_unlinked = 0;
1672 uint64_t obj = 0;
1673 uint64_t links;
1674 zfs_dirlock_t *dl;
1675 dmu_tx_t *tx;
1676 boolean_t may_delete_now, delete_now = FALSE;
1677 boolean_t unlinked, toobig = FALSE;
1678 uint64_t txtype;
1679 pathname_t *realnmp = NULL;
1680 pathname_t realnm;
1681 int error;
1682 int zflg = ZEXISTS;
1683 boolean_t waited = B_FALSE;
1684
1685 if (name == NULL)
1686 return (SET_ERROR(EINVAL));
1687
1688 ZFS_ENTER(zfsvfs);
1689 ZFS_VERIFY_ZP(dzp);
1690 zilog = zfsvfs->z_log;
1691
1692 if (flags & FIGNORECASE) {
1693 zflg |= ZCILOOK;
1694 pn_alloc(&realnm);
1695 realnmp = &realnm;
1696 }
1697
1698 top:
1699 xattr_obj = 0;
1700 xzp = NULL;
1701 /*
1702 * Attempt to lock directory; fail if entry doesn't exist.
1703 */
1704 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1705 NULL, realnmp))) {
1706 if (realnmp)
1707 pn_free(realnmp);
1708 ZFS_EXIT(zfsvfs);
1709 return (error);
1710 }
1711
1712 ip = ZTOI(zp);
1713
1714 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1715 goto out;
1716 }
1717
1718 /*
1719 * Need to use rmdir for removing directories.
1720 */
1721 if (S_ISDIR(ip->i_mode)) {
1722 error = SET_ERROR(EPERM);
1723 goto out;
1724 }
1725
1726 #ifdef HAVE_DNLC
1727 if (realnmp)
1728 dnlc_remove(dvp, realnmp->pn_buf);
1729 else
1730 dnlc_remove(dvp, name);
1731 #endif /* HAVE_DNLC */
1732
1733 mutex_enter(&zp->z_lock);
1734 may_delete_now = atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped);
1735 mutex_exit(&zp->z_lock);
1736
1737 /*
1738 * We may delete the znode now, or we may put it in the unlinked set;
1739 * it depends on whether we're the last link, and on whether there are
1740 * other holds on the inode. So we dmu_tx_hold() the right things to
1741 * allow for either case.
1742 */
1743 obj = zp->z_id;
1744 tx = dmu_tx_create(zfsvfs->z_os);
1745 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1746 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1747 zfs_sa_upgrade_txholds(tx, zp);
1748 zfs_sa_upgrade_txholds(tx, dzp);
1749 if (may_delete_now) {
1750 toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks;
1751 /* if the file is too big, only hold_free a token amount */
1752 dmu_tx_hold_free(tx, zp->z_id, 0,
1753 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1754 }
1755
1756 /* are there any extended attributes? */
1757 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1758 &xattr_obj, sizeof (xattr_obj));
1759 if (error == 0 && xattr_obj) {
1760 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1761 ASSERT0(error);
1762 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1763 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1764 }
1765
1766 mutex_enter(&zp->z_lock);
1767 if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1768 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1769 mutex_exit(&zp->z_lock);
1770
1771 /* charge as an update -- would be nice not to charge at all */
1772 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1773
1774 /*
1775 * Mark this transaction as typically resulting in a net free of space
1776 */
1777 dmu_tx_mark_netfree(tx);
1778
1779 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1780 if (error) {
1781 zfs_dirent_unlock(dl);
1782 if (error == ERESTART) {
1783 waited = B_TRUE;
1784 dmu_tx_wait(tx);
1785 dmu_tx_abort(tx);
1786 iput(ip);
1787 if (xzp)
1788 iput(ZTOI(xzp));
1789 goto top;
1790 }
1791 if (realnmp)
1792 pn_free(realnmp);
1793 dmu_tx_abort(tx);
1794 iput(ip);
1795 if (xzp)
1796 iput(ZTOI(xzp));
1797 ZFS_EXIT(zfsvfs);
1798 return (error);
1799 }
1800
1801 /*
1802 * Remove the directory entry.
1803 */
1804 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1805
1806 if (error) {
1807 dmu_tx_commit(tx);
1808 goto out;
1809 }
1810
1811 if (unlinked) {
1812 /*
1813 * Hold z_lock so that we can make sure that the ACL obj
1814 * hasn't changed. Could have been deleted due to
1815 * zfs_sa_upgrade().
1816 */
1817 mutex_enter(&zp->z_lock);
1818 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1819 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1820 delete_now = may_delete_now && !toobig &&
1821 atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped) &&
1822 xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
1823 acl_obj;
1824 }
1825
1826 if (delete_now) {
1827 if (xattr_obj_unlinked) {
1828 ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2);
1829 mutex_enter(&xzp->z_lock);
1830 xzp->z_unlinked = 1;
1831 clear_nlink(ZTOI(xzp));
1832 links = 0;
1833 error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
1834 &links, sizeof (links), tx);
1835 ASSERT3U(error, ==, 0);
1836 mutex_exit(&xzp->z_lock);
1837 zfs_unlinked_add(xzp, tx);
1838
1839 if (zp->z_is_sa)
1840 error = sa_remove(zp->z_sa_hdl,
1841 SA_ZPL_XATTR(zfsvfs), tx);
1842 else
1843 error = sa_update(zp->z_sa_hdl,
1844 SA_ZPL_XATTR(zfsvfs), &null_xattr,
1845 sizeof (uint64_t), tx);
1846 ASSERT0(error);
1847 }
1848 /*
1849 * Add to the unlinked set because a new reference could be
1850 * taken concurrently resulting in a deferred destruction.
1851 */
1852 zfs_unlinked_add(zp, tx);
1853 mutex_exit(&zp->z_lock);
1854 } else if (unlinked) {
1855 mutex_exit(&zp->z_lock);
1856 zfs_unlinked_add(zp, tx);
1857 }
1858
1859 txtype = TX_REMOVE;
1860 if (flags & FIGNORECASE)
1861 txtype |= TX_CI;
1862 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1863
1864 dmu_tx_commit(tx);
1865 out:
1866 if (realnmp)
1867 pn_free(realnmp);
1868
1869 zfs_dirent_unlock(dl);
1870 zfs_inode_update(dzp);
1871 zfs_inode_update(zp);
1872
1873 if (delete_now)
1874 iput(ip);
1875 else
1876 zfs_iput_async(ip);
1877
1878 if (xzp) {
1879 zfs_inode_update(xzp);
1880 zfs_iput_async(ZTOI(xzp));
1881 }
1882
1883 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1884 zil_commit(zilog, 0);
1885
1886 ZFS_EXIT(zfsvfs);
1887 return (error);
1888 }
1889
1890 /*
1891 * Create a new directory and insert it into dip using the name
1892 * provided. Return a pointer to the inserted directory.
1893 *
1894 * IN: dip - inode of directory to add subdir to.
1895 * dirname - name of new directory.
1896 * vap - attributes of new directory.
1897 * cr - credentials of caller.
1898 * vsecp - ACL to be set
1899 *
1900 * OUT: ipp - inode of created directory.
1901 *
1902 * RETURN: 0 if success
1903 * error code if failure
1904 *
1905 * Timestamps:
1906 * dip - ctime|mtime updated
1907 * ipp - ctime|mtime|atime updated
1908 */
1909 /*ARGSUSED*/
1910 int
1911 zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
1912 cred_t *cr, int flags, vsecattr_t *vsecp)
1913 {
1914 znode_t *zp, *dzp = ITOZ(dip);
1915 zfsvfs_t *zfsvfs = ITOZSB(dip);
1916 zilog_t *zilog;
1917 zfs_dirlock_t *dl;
1918 uint64_t txtype;
1919 dmu_tx_t *tx;
1920 int error;
1921 int zf = ZNEW;
1922 uid_t uid;
1923 gid_t gid = crgetgid(cr);
1924 zfs_acl_ids_t acl_ids;
1925 boolean_t fuid_dirtied;
1926 boolean_t waited = B_FALSE;
1927
1928 ASSERT(S_ISDIR(vap->va_mode));
1929
1930 /*
1931 * If we have an ephemeral id, ACL, or XVATTR then
1932 * make sure file system is at proper version
1933 */
1934
1935 uid = crgetuid(cr);
1936 if (zfsvfs->z_use_fuids == B_FALSE &&
1937 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1938 return (SET_ERROR(EINVAL));
1939
1940 if (dirname == NULL)
1941 return (SET_ERROR(EINVAL));
1942
1943 ZFS_ENTER(zfsvfs);
1944 ZFS_VERIFY_ZP(dzp);
1945 zilog = zfsvfs->z_log;
1946
1947 if (dzp->z_pflags & ZFS_XATTR) {
1948 ZFS_EXIT(zfsvfs);
1949 return (SET_ERROR(EINVAL));
1950 }
1951
1952 if (zfsvfs->z_utf8 && u8_validate(dirname,
1953 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1954 ZFS_EXIT(zfsvfs);
1955 return (SET_ERROR(EILSEQ));
1956 }
1957 if (flags & FIGNORECASE)
1958 zf |= ZCILOOK;
1959
1960 if (vap->va_mask & ATTR_XVATTR) {
1961 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1962 crgetuid(cr), cr, vap->va_mode)) != 0) {
1963 ZFS_EXIT(zfsvfs);
1964 return (error);
1965 }
1966 }
1967
1968 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1969 vsecp, &acl_ids)) != 0) {
1970 ZFS_EXIT(zfsvfs);
1971 return (error);
1972 }
1973 /*
1974 * First make sure the new directory doesn't exist.
1975 *
1976 * Existence is checked first to make sure we don't return
1977 * EACCES instead of EEXIST which can cause some applications
1978 * to fail.
1979 */
1980 top:
1981 *ipp = NULL;
1982
1983 if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1984 NULL, NULL))) {
1985 zfs_acl_ids_free(&acl_ids);
1986 ZFS_EXIT(zfsvfs);
1987 return (error);
1988 }
1989
1990 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
1991 zfs_acl_ids_free(&acl_ids);
1992 zfs_dirent_unlock(dl);
1993 ZFS_EXIT(zfsvfs);
1994 return (error);
1995 }
1996
1997 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1998 zfs_acl_ids_free(&acl_ids);
1999 zfs_dirent_unlock(dl);
2000 ZFS_EXIT(zfsvfs);
2001 return (SET_ERROR(EDQUOT));
2002 }
2003
2004 /*
2005 * Add a new entry to the directory.
2006 */
2007 tx = dmu_tx_create(zfsvfs->z_os);
2008 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
2009 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
2010 fuid_dirtied = zfsvfs->z_fuid_dirty;
2011 if (fuid_dirtied)
2012 zfs_fuid_txhold(zfsvfs, tx);
2013 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2014 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
2015 acl_ids.z_aclp->z_acl_bytes);
2016 }
2017
2018 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
2019 ZFS_SA_BASE_ATTR_SIZE);
2020
2021 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
2022 if (error) {
2023 zfs_dirent_unlock(dl);
2024 if (error == ERESTART) {
2025 waited = B_TRUE;
2026 dmu_tx_wait(tx);
2027 dmu_tx_abort(tx);
2028 goto top;
2029 }
2030 zfs_acl_ids_free(&acl_ids);
2031 dmu_tx_abort(tx);
2032 ZFS_EXIT(zfsvfs);
2033 return (error);
2034 }
2035
2036 /*
2037 * Create new node.
2038 */
2039 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
2040
2041 if (fuid_dirtied)
2042 zfs_fuid_sync(zfsvfs, tx);
2043
2044 /*
2045 * Now put new name in parent dir.
2046 */
2047 (void) zfs_link_create(dl, zp, tx, ZNEW);
2048
2049 *ipp = ZTOI(zp);
2050
2051 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
2052 if (flags & FIGNORECASE)
2053 txtype |= TX_CI;
2054 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
2055 acl_ids.z_fuidp, vap);
2056
2057 zfs_acl_ids_free(&acl_ids);
2058
2059 dmu_tx_commit(tx);
2060
2061 zfs_dirent_unlock(dl);
2062
2063 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2064 zil_commit(zilog, 0);
2065
2066 zfs_inode_update(dzp);
2067 zfs_inode_update(zp);
2068 ZFS_EXIT(zfsvfs);
2069 return (0);
2070 }
2071
2072 /*
2073 * Remove a directory subdir entry. If the current working
2074 * directory is the same as the subdir to be removed, the
2075 * remove will fail.
2076 *
2077 * IN: dip - inode of directory to remove from.
2078 * name - name of directory to be removed.
2079 * cwd - inode of current working directory.
2080 * cr - credentials of caller.
2081 * flags - case flags
2082 *
2083 * RETURN: 0 on success, error code on failure.
2084 *
2085 * Timestamps:
2086 * dip - ctime|mtime updated
2087 */
2088 /*ARGSUSED*/
2089 int
2090 zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr,
2091 int flags)
2092 {
2093 znode_t *dzp = ITOZ(dip);
2094 znode_t *zp;
2095 struct inode *ip;
2096 zfsvfs_t *zfsvfs = ITOZSB(dip);
2097 zilog_t *zilog;
2098 zfs_dirlock_t *dl;
2099 dmu_tx_t *tx;
2100 int error;
2101 int zflg = ZEXISTS;
2102 boolean_t waited = B_FALSE;
2103
2104 if (name == NULL)
2105 return (SET_ERROR(EINVAL));
2106
2107 ZFS_ENTER(zfsvfs);
2108 ZFS_VERIFY_ZP(dzp);
2109 zilog = zfsvfs->z_log;
2110
2111 if (flags & FIGNORECASE)
2112 zflg |= ZCILOOK;
2113 top:
2114 zp = NULL;
2115
2116 /*
2117 * Attempt to lock directory; fail if entry doesn't exist.
2118 */
2119 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
2120 NULL, NULL))) {
2121 ZFS_EXIT(zfsvfs);
2122 return (error);
2123 }
2124
2125 ip = ZTOI(zp);
2126
2127 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
2128 goto out;
2129 }
2130
2131 if (!S_ISDIR(ip->i_mode)) {
2132 error = SET_ERROR(ENOTDIR);
2133 goto out;
2134 }
2135
2136 if (ip == cwd) {
2137 error = SET_ERROR(EINVAL);
2138 goto out;
2139 }
2140
2141 /*
2142 * Grab a lock on the directory to make sure that no one is
2143 * trying to add (or lookup) entries while we are removing it.
2144 */
2145 rw_enter(&zp->z_name_lock, RW_WRITER);
2146
2147 /*
2148 * Grab a lock on the parent pointer to make sure we play well
2149 * with the treewalk and directory rename code.
2150 */
2151 rw_enter(&zp->z_parent_lock, RW_WRITER);
2152
2153 tx = dmu_tx_create(zfsvfs->z_os);
2154 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2155 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2156 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2157 zfs_sa_upgrade_txholds(tx, zp);
2158 zfs_sa_upgrade_txholds(tx, dzp);
2159 dmu_tx_mark_netfree(tx);
2160 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
2161 if (error) {
2162 rw_exit(&zp->z_parent_lock);
2163 rw_exit(&zp->z_name_lock);
2164 zfs_dirent_unlock(dl);
2165 if (error == ERESTART) {
2166 waited = B_TRUE;
2167 dmu_tx_wait(tx);
2168 dmu_tx_abort(tx);
2169 iput(ip);
2170 goto top;
2171 }
2172 dmu_tx_abort(tx);
2173 iput(ip);
2174 ZFS_EXIT(zfsvfs);
2175 return (error);
2176 }
2177
2178 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
2179
2180 if (error == 0) {
2181 uint64_t txtype = TX_RMDIR;
2182 if (flags & FIGNORECASE)
2183 txtype |= TX_CI;
2184 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2185 }
2186
2187 dmu_tx_commit(tx);
2188
2189 rw_exit(&zp->z_parent_lock);
2190 rw_exit(&zp->z_name_lock);
2191 out:
2192 zfs_dirent_unlock(dl);
2193
2194 zfs_inode_update(dzp);
2195 zfs_inode_update(zp);
2196 iput(ip);
2197
2198 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2199 zil_commit(zilog, 0);
2200
2201 ZFS_EXIT(zfsvfs);
2202 return (error);
2203 }
2204
2205 /*
2206 * Read as many directory entries as will fit into the provided
2207 * dirent buffer from the given directory cursor position.
2208 *
2209 * IN: ip - inode of directory to read.
2210 * dirent - buffer for directory entries.
2211 *
2212 * OUT: dirent - filler buffer of directory entries.
2213 *
2214 * RETURN: 0 if success
2215 * error code if failure
2216 *
2217 * Timestamps:
2218 * ip - atime updated
2219 *
2220 * Note that the low 4 bits of the cookie returned by zap is always zero.
2221 * This allows us to use the low range for "special" directory entries:
2222 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2223 * we use the offset 2 for the '.zfs' directory.
2224 */
2225 /* ARGSUSED */
2226 int
2227 zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr)
2228 {
2229 znode_t *zp = ITOZ(ip);
2230 zfsvfs_t *zfsvfs = ITOZSB(ip);
2231 objset_t *os;
2232 zap_cursor_t zc;
2233 zap_attribute_t zap;
2234 int error;
2235 uint8_t prefetch;
2236 uint8_t type;
2237 int done = 0;
2238 uint64_t parent;
2239 uint64_t offset; /* must be unsigned; checks for < 1 */
2240
2241 ZFS_ENTER(zfsvfs);
2242 ZFS_VERIFY_ZP(zp);
2243
2244 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2245 &parent, sizeof (parent))) != 0)
2246 goto out;
2247
2248 /*
2249 * Quit if directory has been removed (posix)
2250 */
2251 if (zp->z_unlinked)
2252 goto out;
2253
2254 error = 0;
2255 os = zfsvfs->z_os;
2256 offset = ctx->pos;
2257 prefetch = zp->z_zn_prefetch;
2258
2259 /*
2260 * Initialize the iterator cursor.
2261 */
2262 if (offset <= 3) {
2263 /*
2264 * Start iteration from the beginning of the directory.
2265 */
2266 zap_cursor_init(&zc, os, zp->z_id);
2267 } else {
2268 /*
2269 * The offset is a serialized cursor.
2270 */
2271 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2272 }
2273
2274 /*
2275 * Transform to file-system independent format
2276 */
2277 while (!done) {
2278 uint64_t objnum;
2279 /*
2280 * Special case `.', `..', and `.zfs'.
2281 */
2282 if (offset == 0) {
2283 (void) strcpy(zap.za_name, ".");
2284 zap.za_normalization_conflict = 0;
2285 objnum = zp->z_id;
2286 type = DT_DIR;
2287 } else if (offset == 1) {
2288 (void) strcpy(zap.za_name, "..");
2289 zap.za_normalization_conflict = 0;
2290 objnum = parent;
2291 type = DT_DIR;
2292 } else if (offset == 2 && zfs_show_ctldir(zp)) {
2293 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2294 zap.za_normalization_conflict = 0;
2295 objnum = ZFSCTL_INO_ROOT;
2296 type = DT_DIR;
2297 } else {
2298 /*
2299 * Grab next entry.
2300 */
2301 if ((error = zap_cursor_retrieve(&zc, &zap))) {
2302 if (error == ENOENT)
2303 break;
2304 else
2305 goto update;
2306 }
2307
2308 /*
2309 * Allow multiple entries provided the first entry is
2310 * the object id. Non-zpl consumers may safely make
2311 * use of the additional space.
2312 *
2313 * XXX: This should be a feature flag for compatibility
2314 */
2315 if (zap.za_integer_length != 8 ||
2316 zap.za_num_integers == 0) {
2317 cmn_err(CE_WARN, "zap_readdir: bad directory "
2318 "entry, obj = %lld, offset = %lld, "
2319 "length = %d, num = %lld\n",
2320 (u_longlong_t)zp->z_id,
2321 (u_longlong_t)offset,
2322 zap.za_integer_length,
2323 (u_longlong_t)zap.za_num_integers);
2324 error = SET_ERROR(ENXIO);
2325 goto update;
2326 }
2327
2328 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2329 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2330 }
2331
2332 done = !zpl_dir_emit(ctx, zap.za_name, strlen(zap.za_name),
2333 objnum, type);
2334 if (done)
2335 break;
2336
2337 /* Prefetch znode */
2338 if (prefetch) {
2339 dmu_prefetch(os, objnum, 0, 0, 0,
2340 ZIO_PRIORITY_SYNC_READ);
2341 }
2342
2343 /*
2344 * Move to the next entry, fill in the previous offset.
2345 */
2346 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2347 zap_cursor_advance(&zc);
2348 offset = zap_cursor_serialize(&zc);
2349 } else {
2350 offset += 1;
2351 }
2352 ctx->pos = offset;
2353 }
2354 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2355
2356 update:
2357 zap_cursor_fini(&zc);
2358 if (error == ENOENT)
2359 error = 0;
2360 out:
2361 ZFS_EXIT(zfsvfs);
2362
2363 return (error);
2364 }
2365
2366 ulong_t zfs_fsync_sync_cnt = 4;
2367
2368 int
2369 zfs_fsync(struct inode *ip, int syncflag, cred_t *cr)
2370 {
2371 znode_t *zp = ITOZ(ip);
2372 zfsvfs_t *zfsvfs = ITOZSB(ip);
2373
2374 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2375
2376 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2377 ZFS_ENTER(zfsvfs);
2378 ZFS_VERIFY_ZP(zp);
2379 zil_commit(zfsvfs->z_log, zp->z_id);
2380 ZFS_EXIT(zfsvfs);
2381 }
2382 tsd_set(zfs_fsyncer_key, NULL);
2383
2384 return (0);
2385 }
2386
2387
2388 /*
2389 * Get the requested file attributes and place them in the provided
2390 * vattr structure.
2391 *
2392 * IN: ip - inode of file.
2393 * vap - va_mask identifies requested attributes.
2394 * If ATTR_XVATTR set, then optional attrs are requested
2395 * flags - ATTR_NOACLCHECK (CIFS server context)
2396 * cr - credentials of caller.
2397 *
2398 * OUT: vap - attribute values.
2399 *
2400 * RETURN: 0 (always succeeds)
2401 */
2402 /* ARGSUSED */
2403 int
2404 zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2405 {
2406 znode_t *zp = ITOZ(ip);
2407 zfsvfs_t *zfsvfs = ITOZSB(ip);
2408 int error = 0;
2409 uint64_t links;
2410 uint64_t atime[2], mtime[2], ctime[2];
2411 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2412 xoptattr_t *xoap = NULL;
2413 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2414 sa_bulk_attr_t bulk[3];
2415 int count = 0;
2416
2417 ZFS_ENTER(zfsvfs);
2418 ZFS_VERIFY_ZP(zp);
2419
2420 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2421
2422 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
2423 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2424 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2425
2426 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2427 ZFS_EXIT(zfsvfs);
2428 return (error);
2429 }
2430
2431 /*
2432 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2433 * Also, if we are the owner don't bother, since owner should
2434 * always be allowed to read basic attributes of file.
2435 */
2436 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2437 (vap->va_uid != crgetuid(cr))) {
2438 if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2439 skipaclchk, cr))) {
2440 ZFS_EXIT(zfsvfs);
2441 return (error);
2442 }
2443 }
2444
2445 /*
2446 * Return all attributes. It's cheaper to provide the answer
2447 * than to determine whether we were asked the question.
2448 */
2449
2450 mutex_enter(&zp->z_lock);
2451 vap->va_type = vn_mode_to_vtype(zp->z_mode);
2452 vap->va_mode = zp->z_mode;
2453 vap->va_fsid = ZTOI(zp)->i_sb->s_dev;
2454 vap->va_nodeid = zp->z_id;
2455 if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
2456 links = ZTOI(zp)->i_nlink + 1;
2457 else
2458 links = ZTOI(zp)->i_nlink;
2459 vap->va_nlink = MIN(links, ZFS_LINK_MAX);
2460 vap->va_size = i_size_read(ip);
2461 vap->va_rdev = ip->i_rdev;
2462 vap->va_seq = ip->i_generation;
2463
2464 /*
2465 * Add in any requested optional attributes and the create time.
2466 * Also set the corresponding bits in the returned attribute bitmap.
2467 */
2468 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2469 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2470 xoap->xoa_archive =
2471 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2472 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2473 }
2474
2475 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2476 xoap->xoa_readonly =
2477 ((zp->z_pflags & ZFS_READONLY) != 0);
2478 XVA_SET_RTN(xvap, XAT_READONLY);
2479 }
2480
2481 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2482 xoap->xoa_system =
2483 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2484 XVA_SET_RTN(xvap, XAT_SYSTEM);
2485 }
2486
2487 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2488 xoap->xoa_hidden =
2489 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2490 XVA_SET_RTN(xvap, XAT_HIDDEN);
2491 }
2492
2493 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2494 xoap->xoa_nounlink =
2495 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2496 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2497 }
2498
2499 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2500 xoap->xoa_immutable =
2501 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2502 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2503 }
2504
2505 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2506 xoap->xoa_appendonly =
2507 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2508 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2509 }
2510
2511 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2512 xoap->xoa_nodump =
2513 ((zp->z_pflags & ZFS_NODUMP) != 0);
2514 XVA_SET_RTN(xvap, XAT_NODUMP);
2515 }
2516
2517 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2518 xoap->xoa_opaque =
2519 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2520 XVA_SET_RTN(xvap, XAT_OPAQUE);
2521 }
2522
2523 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2524 xoap->xoa_av_quarantined =
2525 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2526 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2527 }
2528
2529 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2530 xoap->xoa_av_modified =
2531 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2532 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2533 }
2534
2535 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2536 S_ISREG(ip->i_mode)) {
2537 zfs_sa_get_scanstamp(zp, xvap);
2538 }
2539
2540 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2541 uint64_t times[2];
2542
2543 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
2544 times, sizeof (times));
2545 ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2546 XVA_SET_RTN(xvap, XAT_CREATETIME);
2547 }
2548
2549 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2550 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2551 XVA_SET_RTN(xvap, XAT_REPARSE);
2552 }
2553 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2554 xoap->xoa_generation = ip->i_generation;
2555 XVA_SET_RTN(xvap, XAT_GEN);
2556 }
2557
2558 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2559 xoap->xoa_offline =
2560 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2561 XVA_SET_RTN(xvap, XAT_OFFLINE);
2562 }
2563
2564 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2565 xoap->xoa_sparse =
2566 ((zp->z_pflags & ZFS_SPARSE) != 0);
2567 XVA_SET_RTN(xvap, XAT_SPARSE);
2568 }
2569 }
2570
2571 ZFS_TIME_DECODE(&vap->va_atime, atime);
2572 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2573 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2574
2575 mutex_exit(&zp->z_lock);
2576
2577 sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2578
2579 if (zp->z_blksz == 0) {
2580 /*
2581 * Block size hasn't been set; suggest maximal I/O transfers.
2582 */
2583 vap->va_blksize = zfsvfs->z_max_blksz;
2584 }
2585
2586 ZFS_EXIT(zfsvfs);
2587 return (0);
2588 }
2589
2590 /*
2591 * Get the basic file attributes and place them in the provided kstat
2592 * structure. The inode is assumed to be the authoritative source
2593 * for most of the attributes. However, the znode currently has the
2594 * authoritative atime, blksize, and block count.
2595 *
2596 * IN: ip - inode of file.
2597 *
2598 * OUT: sp - kstat values.
2599 *
2600 * RETURN: 0 (always succeeds)
2601 */
2602 /* ARGSUSED */
2603 int
2604 zfs_getattr_fast(struct inode *ip, struct kstat *sp)
2605 {
2606 znode_t *zp = ITOZ(ip);
2607 zfsvfs_t *zfsvfs = ITOZSB(ip);
2608 uint32_t blksize;
2609 u_longlong_t nblocks;
2610
2611 ZFS_ENTER(zfsvfs);
2612 ZFS_VERIFY_ZP(zp);
2613
2614 mutex_enter(&zp->z_lock);
2615
2616 generic_fillattr(ip, sp);
2617
2618 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
2619 sp->blksize = blksize;
2620 sp->blocks = nblocks;
2621
2622 if (unlikely(zp->z_blksz == 0)) {
2623 /*
2624 * Block size hasn't been set; suggest maximal I/O transfers.
2625 */
2626 sp->blksize = zfsvfs->z_max_blksz;
2627 }
2628
2629 mutex_exit(&zp->z_lock);
2630
2631 /*
2632 * Required to prevent NFS client from detecting different inode
2633 * numbers of snapshot root dentry before and after snapshot mount.
2634 */
2635 if (zfsvfs->z_issnap) {
2636 if (ip->i_sb->s_root->d_inode == ip)
2637 sp->ino = ZFSCTL_INO_SNAPDIRS -
2638 dmu_objset_id(zfsvfs->z_os);
2639 }
2640
2641 ZFS_EXIT(zfsvfs);
2642
2643 return (0);
2644 }
2645
2646 /*
2647 * Set the file attributes to the values contained in the
2648 * vattr structure.
2649 *
2650 * IN: ip - inode of file to be modified.
2651 * vap - new attribute values.
2652 * If ATTR_XVATTR set, then optional attrs are being set
2653 * flags - ATTR_UTIME set if non-default time values provided.
2654 * - ATTR_NOACLCHECK (CIFS context only).
2655 * cr - credentials of caller.
2656 *
2657 * RETURN: 0 if success
2658 * error code if failure
2659 *
2660 * Timestamps:
2661 * ip - ctime updated, mtime updated if size changed.
2662 */
2663 /* ARGSUSED */
2664 int
2665 zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2666 {
2667 znode_t *zp = ITOZ(ip);
2668 zfsvfs_t *zfsvfs = ITOZSB(ip);
2669 zilog_t *zilog;
2670 dmu_tx_t *tx;
2671 vattr_t oldva;
2672 xvattr_t *tmpxvattr;
2673 uint_t mask = vap->va_mask;
2674 uint_t saved_mask = 0;
2675 int trim_mask = 0;
2676 uint64_t new_mode;
2677 uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid;
2678 uint64_t xattr_obj;
2679 uint64_t mtime[2], ctime[2], atime[2];
2680 znode_t *attrzp;
2681 int need_policy = FALSE;
2682 int err, err2;
2683 zfs_fuid_info_t *fuidp = NULL;
2684 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2685 xoptattr_t *xoap;
2686 zfs_acl_t *aclp;
2687 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2688 boolean_t fuid_dirtied = B_FALSE;
2689 sa_bulk_attr_t *bulk, *xattr_bulk;
2690 int count = 0, xattr_count = 0;
2691
2692 if (mask == 0)
2693 return (0);
2694
2695 ZFS_ENTER(zfsvfs);
2696 ZFS_VERIFY_ZP(zp);
2697
2698 zilog = zfsvfs->z_log;
2699
2700 /*
2701 * Make sure that if we have ephemeral uid/gid or xvattr specified
2702 * that file system is at proper version level
2703 */
2704
2705 if (zfsvfs->z_use_fuids == B_FALSE &&
2706 (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2707 ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2708 (mask & ATTR_XVATTR))) {
2709 ZFS_EXIT(zfsvfs);
2710 return (SET_ERROR(EINVAL));
2711 }
2712
2713 if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
2714 ZFS_EXIT(zfsvfs);
2715 return (SET_ERROR(EISDIR));
2716 }
2717
2718 if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
2719 ZFS_EXIT(zfsvfs);
2720 return (SET_ERROR(EINVAL));
2721 }
2722
2723 /*
2724 * If this is an xvattr_t, then get a pointer to the structure of
2725 * optional attributes. If this is NULL, then we have a vattr_t.
2726 */
2727 xoap = xva_getxoptattr(xvap);
2728
2729 tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
2730 xva_init(tmpxvattr);
2731
2732 bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP);
2733 xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP);
2734
2735 /*
2736 * Immutable files can only alter immutable bit and atime
2737 */
2738 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2739 ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
2740 ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2741 err = EPERM;
2742 goto out3;
2743 }
2744
2745 if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2746 err = EPERM;
2747 goto out3;
2748 }
2749
2750 /*
2751 * Verify timestamps doesn't overflow 32 bits.
2752 * ZFS can handle large timestamps, but 32bit syscalls can't
2753 * handle times greater than 2039. This check should be removed
2754 * once large timestamps are fully supported.
2755 */
2756 if (mask & (ATTR_ATIME | ATTR_MTIME)) {
2757 if (((mask & ATTR_ATIME) &&
2758 TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2759 ((mask & ATTR_MTIME) &&
2760 TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2761 err = EOVERFLOW;
2762 goto out3;
2763 }
2764 }
2765
2766 top:
2767 attrzp = NULL;
2768 aclp = NULL;
2769
2770 /* Can this be moved to before the top label? */
2771 if (zfs_is_readonly(zfsvfs)) {
2772 err = EROFS;
2773 goto out3;
2774 }
2775
2776 /*
2777 * First validate permissions
2778 */
2779
2780 if (mask & ATTR_SIZE) {
2781 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2782 if (err)
2783 goto out3;
2784
2785 /*
2786 * XXX - Note, we are not providing any open
2787 * mode flags here (like FNDELAY), so we may
2788 * block if there are locks present... this
2789 * should be addressed in openat().
2790 */
2791 /* XXX - would it be OK to generate a log record here? */
2792 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2793 if (err)
2794 goto out3;
2795 }
2796
2797 if (mask & (ATTR_ATIME|ATTR_MTIME) ||
2798 ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2799 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2800 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2801 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2802 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2803 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2804 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2805 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2806 skipaclchk, cr);
2807 }
2808
2809 if (mask & (ATTR_UID|ATTR_GID)) {
2810 int idmask = (mask & (ATTR_UID|ATTR_GID));
2811 int take_owner;
2812 int take_group;
2813
2814 /*
2815 * NOTE: even if a new mode is being set,
2816 * we may clear S_ISUID/S_ISGID bits.
2817 */
2818
2819 if (!(mask & ATTR_MODE))
2820 vap->va_mode = zp->z_mode;
2821
2822 /*
2823 * Take ownership or chgrp to group we are a member of
2824 */
2825
2826 take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
2827 take_group = (mask & ATTR_GID) &&
2828 zfs_groupmember(zfsvfs, vap->va_gid, cr);
2829
2830 /*
2831 * If both ATTR_UID and ATTR_GID are set then take_owner and
2832 * take_group must both be set in order to allow taking
2833 * ownership.
2834 *
2835 * Otherwise, send the check through secpolicy_vnode_setattr()
2836 *
2837 */
2838
2839 if (((idmask == (ATTR_UID|ATTR_GID)) &&
2840 take_owner && take_group) ||
2841 ((idmask == ATTR_UID) && take_owner) ||
2842 ((idmask == ATTR_GID) && take_group)) {
2843 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2844 skipaclchk, cr) == 0) {
2845 /*
2846 * Remove setuid/setgid for non-privileged users
2847 */
2848 (void) secpolicy_setid_clear(vap, cr);
2849 trim_mask = (mask & (ATTR_UID|ATTR_GID));
2850 } else {
2851 need_policy = TRUE;
2852 }
2853 } else {
2854 need_policy = TRUE;
2855 }
2856 }
2857
2858 mutex_enter(&zp->z_lock);
2859 oldva.va_mode = zp->z_mode;
2860 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2861 if (mask & ATTR_XVATTR) {
2862 /*
2863 * Update xvattr mask to include only those attributes
2864 * that are actually changing.
2865 *
2866 * the bits will be restored prior to actually setting
2867 * the attributes so the caller thinks they were set.
2868 */
2869 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2870 if (xoap->xoa_appendonly !=
2871 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2872 need_policy = TRUE;
2873 } else {
2874 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2875 XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
2876 }
2877 }
2878
2879 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2880 if (xoap->xoa_nounlink !=
2881 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2882 need_policy = TRUE;
2883 } else {
2884 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2885 XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
2886 }
2887 }
2888
2889 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2890 if (xoap->xoa_immutable !=
2891 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2892 need_policy = TRUE;
2893 } else {
2894 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2895 XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
2896 }
2897 }
2898
2899 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2900 if (xoap->xoa_nodump !=
2901 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2902 need_policy = TRUE;
2903 } else {
2904 XVA_CLR_REQ(xvap, XAT_NODUMP);
2905 XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
2906 }
2907 }
2908
2909 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2910 if (xoap->xoa_av_modified !=
2911 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2912 need_policy = TRUE;
2913 } else {
2914 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2915 XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
2916 }
2917 }
2918
2919 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2920 if ((!S_ISREG(ip->i_mode) &&
2921 xoap->xoa_av_quarantined) ||
2922 xoap->xoa_av_quarantined !=
2923 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2924 need_policy = TRUE;
2925 } else {
2926 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2927 XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
2928 }
2929 }
2930
2931 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2932 mutex_exit(&zp->z_lock);
2933 err = EPERM;
2934 goto out3;
2935 }
2936
2937 if (need_policy == FALSE &&
2938 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2939 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2940 need_policy = TRUE;
2941 }
2942 }
2943
2944 mutex_exit(&zp->z_lock);
2945
2946 if (mask & ATTR_MODE) {
2947 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
2948 err = secpolicy_setid_setsticky_clear(ip, vap,
2949 &oldva, cr);
2950 if (err)
2951 goto out3;
2952
2953 trim_mask |= ATTR_MODE;
2954 } else {
2955 need_policy = TRUE;
2956 }
2957 }
2958
2959 if (need_policy) {
2960 /*
2961 * If trim_mask is set then take ownership
2962 * has been granted or write_acl is present and user
2963 * has the ability to modify mode. In that case remove
2964 * UID|GID and or MODE from mask so that
2965 * secpolicy_vnode_setattr() doesn't revoke it.
2966 */
2967
2968 if (trim_mask) {
2969 saved_mask = vap->va_mask;
2970 vap->va_mask &= ~trim_mask;
2971 }
2972 err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
2973 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
2974 if (err)
2975 goto out3;
2976
2977 if (trim_mask)
2978 vap->va_mask |= saved_mask;
2979 }
2980
2981 /*
2982 * secpolicy_vnode_setattr, or take ownership may have
2983 * changed va_mask
2984 */
2985 mask = vap->va_mask;
2986
2987 if ((mask & (ATTR_UID | ATTR_GID))) {
2988 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
2989 &xattr_obj, sizeof (xattr_obj));
2990
2991 if (err == 0 && xattr_obj) {
2992 err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
2993 if (err)
2994 goto out2;
2995 }
2996 if (mask & ATTR_UID) {
2997 new_kuid = zfs_fuid_create(zfsvfs,
2998 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
2999 if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
3000 zfs_fuid_overquota(zfsvfs, B_FALSE, new_kuid)) {
3001 if (attrzp)
3002 iput(ZTOI(attrzp));
3003 err = EDQUOT;
3004 goto out2;
3005 }
3006 }
3007
3008 if (mask & ATTR_GID) {
3009 new_kgid = zfs_fuid_create(zfsvfs,
3010 (uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
3011 if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
3012 zfs_fuid_overquota(zfsvfs, B_TRUE, new_kgid)) {
3013 if (attrzp)
3014 iput(ZTOI(attrzp));
3015 err = EDQUOT;
3016 goto out2;
3017 }
3018 }
3019 }
3020 tx = dmu_tx_create(zfsvfs->z_os);
3021
3022 if (mask & ATTR_MODE) {
3023 uint64_t pmode = zp->z_mode;
3024 uint64_t acl_obj;
3025 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
3026
3027 zfs_acl_chmod_setattr(zp, &aclp, new_mode);
3028
3029 mutex_enter(&zp->z_lock);
3030 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
3031 /*
3032 * Are we upgrading ACL from old V0 format
3033 * to V1 format?
3034 */
3035 if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
3036 zfs_znode_acl_version(zp) ==
3037 ZFS_ACL_VERSION_INITIAL) {
3038 dmu_tx_hold_free(tx, acl_obj, 0,
3039 DMU_OBJECT_END);
3040 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3041 0, aclp->z_acl_bytes);
3042 } else {
3043 dmu_tx_hold_write(tx, acl_obj, 0,
3044 aclp->z_acl_bytes);
3045 }
3046 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3047 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3048 0, aclp->z_acl_bytes);
3049 }
3050 mutex_exit(&zp->z_lock);
3051 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3052 } else {
3053 if ((mask & ATTR_XVATTR) &&
3054 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3055 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3056 else
3057 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3058 }
3059
3060 if (attrzp) {
3061 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
3062 }
3063
3064 fuid_dirtied = zfsvfs->z_fuid_dirty;
3065 if (fuid_dirtied)
3066 zfs_fuid_txhold(zfsvfs, tx);
3067
3068 zfs_sa_upgrade_txholds(tx, zp);
3069
3070 err = dmu_tx_assign(tx, TXG_WAIT);
3071 if (err)
3072 goto out;
3073
3074 count = 0;
3075 /*
3076 * Set each attribute requested.
3077 * We group settings according to the locks they need to acquire.
3078 *
3079 * Note: you cannot set ctime directly, although it will be
3080 * updated as a side-effect of calling this function.
3081 */
3082
3083
3084 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3085 mutex_enter(&zp->z_acl_lock);
3086 mutex_enter(&zp->z_lock);
3087
3088 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3089 &zp->z_pflags, sizeof (zp->z_pflags));
3090
3091 if (attrzp) {
3092 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3093 mutex_enter(&attrzp->z_acl_lock);
3094 mutex_enter(&attrzp->z_lock);
3095 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3096 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3097 sizeof (attrzp->z_pflags));
3098 }
3099
3100 if (mask & (ATTR_UID|ATTR_GID)) {
3101
3102 if (mask & ATTR_UID) {
3103 ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid);
3104 new_uid = zfs_uid_read(ZTOI(zp));
3105 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3106 &new_uid, sizeof (new_uid));
3107 if (attrzp) {
3108 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3109 SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3110 sizeof (new_uid));
3111 ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid);
3112 }
3113 }
3114
3115 if (mask & ATTR_GID) {
3116 ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid);
3117 new_gid = zfs_gid_read(ZTOI(zp));
3118 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3119 NULL, &new_gid, sizeof (new_gid));
3120 if (attrzp) {
3121 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3122 SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3123 sizeof (new_gid));
3124 ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid);
3125 }
3126 }
3127 if (!(mask & ATTR_MODE)) {
3128 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3129 NULL, &new_mode, sizeof (new_mode));
3130 new_mode = zp->z_mode;
3131 }
3132 err = zfs_acl_chown_setattr(zp);
3133 ASSERT(err == 0);
3134 if (attrzp) {
3135 err = zfs_acl_chown_setattr(attrzp);
3136 ASSERT(err == 0);
3137 }
3138 }
3139
3140 if (mask & ATTR_MODE) {
3141 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3142 &new_mode, sizeof (new_mode));
3143 zp->z_mode = ZTOI(zp)->i_mode = new_mode;
3144 ASSERT3P(aclp, !=, NULL);
3145 err = zfs_aclset_common(zp, aclp, cr, tx);
3146 ASSERT0(err);
3147 if (zp->z_acl_cached)
3148 zfs_acl_free(zp->z_acl_cached);
3149 zp->z_acl_cached = aclp;
3150 aclp = NULL;
3151 }
3152
3153 if ((mask & ATTR_ATIME) || zp->z_atime_dirty) {
3154 zp->z_atime_dirty = 0;
3155 ZFS_TIME_ENCODE(&ip->i_atime, atime);
3156 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3157 &atime, sizeof (atime));
3158 }
3159
3160 if (mask & (ATTR_MTIME | ATTR_SIZE)) {
3161 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3162 ZTOI(zp)->i_mtime = zpl_inode_timespec_trunc(vap->va_mtime,
3163 ZTOI(zp)->i_sb->s_time_gran);
3164
3165 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3166 mtime, sizeof (mtime));
3167 }
3168
3169 if (mask & (ATTR_CTIME | ATTR_SIZE)) {
3170 ZFS_TIME_ENCODE(&vap->va_ctime, ctime);
3171 ZTOI(zp)->i_ctime = zpl_inode_timespec_trunc(vap->va_ctime,
3172 ZTOI(zp)->i_sb->s_time_gran);
3173 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3174 ctime, sizeof (ctime));
3175 }
3176
3177 if (attrzp && mask) {
3178 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3179 SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
3180 sizeof (ctime));
3181 }
3182
3183 /*
3184 * Do this after setting timestamps to prevent timestamp
3185 * update from toggling bit
3186 */
3187
3188 if (xoap && (mask & ATTR_XVATTR)) {
3189
3190 /*
3191 * restore trimmed off masks
3192 * so that return masks can be set for caller.
3193 */
3194
3195 if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
3196 XVA_SET_REQ(xvap, XAT_APPENDONLY);
3197 }
3198 if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
3199 XVA_SET_REQ(xvap, XAT_NOUNLINK);
3200 }
3201 if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
3202 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3203 }
3204 if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
3205 XVA_SET_REQ(xvap, XAT_NODUMP);
3206 }
3207 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
3208 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3209 }
3210 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
3211 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3212 }
3213
3214 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3215 ASSERT(S_ISREG(ip->i_mode));
3216
3217 zfs_xvattr_set(zp, xvap, tx);
3218 }
3219
3220 if (fuid_dirtied)
3221 zfs_fuid_sync(zfsvfs, tx);
3222
3223 if (mask != 0)
3224 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3225
3226 mutex_exit(&zp->z_lock);
3227 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3228 mutex_exit(&zp->z_acl_lock);
3229
3230 if (attrzp) {
3231 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3232 mutex_exit(&attrzp->z_acl_lock);
3233 mutex_exit(&attrzp->z_lock);
3234 }
3235 out:
3236 if (err == 0 && attrzp) {
3237 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3238 xattr_count, tx);
3239 ASSERT(err2 == 0);
3240 }
3241
3242 if (aclp)
3243 zfs_acl_free(aclp);
3244
3245 if (fuidp) {
3246 zfs_fuid_info_free(fuidp);
3247 fuidp = NULL;
3248 }
3249
3250 if (err) {
3251 dmu_tx_abort(tx);
3252 if (attrzp)
3253 iput(ZTOI(attrzp));
3254 if (err == ERESTART)
3255 goto top;
3256 } else {
3257 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3258 dmu_tx_commit(tx);
3259 if (attrzp)
3260 iput(ZTOI(attrzp));
3261 zfs_inode_update(zp);
3262 }
3263
3264 out2:
3265 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3266 zil_commit(zilog, 0);
3267
3268 out3:
3269 kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * 7);
3270 kmem_free(bulk, sizeof (sa_bulk_attr_t) * 7);
3271 kmem_free(tmpxvattr, sizeof (xvattr_t));
3272 ZFS_EXIT(zfsvfs);
3273 return (err);
3274 }
3275
3276 typedef struct zfs_zlock {
3277 krwlock_t *zl_rwlock; /* lock we acquired */
3278 znode_t *zl_znode; /* znode we held */
3279 struct zfs_zlock *zl_next; /* next in list */
3280 } zfs_zlock_t;
3281
3282 /*
3283 * Drop locks and release vnodes that were held by zfs_rename_lock().
3284 */
3285 static void
3286 zfs_rename_unlock(zfs_zlock_t **zlpp)
3287 {
3288 zfs_zlock_t *zl;
3289
3290 while ((zl = *zlpp) != NULL) {
3291 if (zl->zl_znode != NULL)
3292 zfs_iput_async(ZTOI(zl->zl_znode));
3293 rw_exit(zl->zl_rwlock);
3294 *zlpp = zl->zl_next;
3295 kmem_free(zl, sizeof (*zl));
3296 }
3297 }
3298
3299 /*
3300 * Search back through the directory tree, using the ".." entries.
3301 * Lock each directory in the chain to prevent concurrent renames.
3302 * Fail any attempt to move a directory into one of its own descendants.
3303 * XXX - z_parent_lock can overlap with map or grow locks
3304 */
3305 static int
3306 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3307 {
3308 zfs_zlock_t *zl;
3309 znode_t *zp = tdzp;
3310 uint64_t rootid = ZTOZSB(zp)->z_root;
3311 uint64_t oidp = zp->z_id;
3312 krwlock_t *rwlp = &szp->z_parent_lock;
3313 krw_t rw = RW_WRITER;
3314
3315 /*
3316 * First pass write-locks szp and compares to zp->z_id.
3317 * Later passes read-lock zp and compare to zp->z_parent.
3318 */
3319 do {
3320 if (!rw_tryenter(rwlp, rw)) {
3321 /*
3322 * Another thread is renaming in this path.
3323 * Note that if we are a WRITER, we don't have any
3324 * parent_locks held yet.
3325 */
3326 if (rw == RW_READER && zp->z_id > szp->z_id) {
3327 /*
3328 * Drop our locks and restart
3329 */
3330 zfs_rename_unlock(&zl);
3331 *zlpp = NULL;
3332 zp = tdzp;
3333 oidp = zp->z_id;
3334 rwlp = &szp->z_parent_lock;
3335 rw = RW_WRITER;
3336 continue;
3337 } else {
3338 /*
3339 * Wait for other thread to drop its locks
3340 */
3341 rw_enter(rwlp, rw);
3342 }
3343 }
3344
3345 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3346 zl->zl_rwlock = rwlp;
3347 zl->zl_znode = NULL;
3348 zl->zl_next = *zlpp;
3349 *zlpp = zl;
3350
3351 if (oidp == szp->z_id) /* We're a descendant of szp */
3352 return (SET_ERROR(EINVAL));
3353
3354 if (oidp == rootid) /* We've hit the top */
3355 return (0);
3356
3357 if (rw == RW_READER) { /* i.e. not the first pass */
3358 int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
3359 if (error)
3360 return (error);
3361 zl->zl_znode = zp;
3362 }
3363 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
3364 &oidp, sizeof (oidp));
3365 rwlp = &zp->z_parent_lock;
3366 rw = RW_READER;
3367
3368 } while (zp->z_id != sdzp->z_id);
3369
3370 return (0);
3371 }
3372
3373 /*
3374 * Move an entry from the provided source directory to the target
3375 * directory. Change the entry name as indicated.
3376 *
3377 * IN: sdip - Source directory containing the "old entry".
3378 * snm - Old entry name.
3379 * tdip - Target directory to contain the "new entry".
3380 * tnm - New entry name.
3381 * cr - credentials of caller.
3382 * flags - case flags
3383 *
3384 * RETURN: 0 on success, error code on failure.
3385 *
3386 * Timestamps:
3387 * sdip,tdip - ctime|mtime updated
3388 */
3389 /*ARGSUSED*/
3390 int
3391 zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
3392 cred_t *cr, int flags)
3393 {
3394 znode_t *tdzp, *szp, *tzp;
3395 znode_t *sdzp = ITOZ(sdip);
3396 zfsvfs_t *zfsvfs = ITOZSB(sdip);
3397 zilog_t *zilog;
3398 zfs_dirlock_t *sdl, *tdl;
3399 dmu_tx_t *tx;
3400 zfs_zlock_t *zl;
3401 int cmp, serr, terr;
3402 int error = 0;
3403 int zflg = 0;
3404 boolean_t waited = B_FALSE;
3405
3406 if (snm == NULL || tnm == NULL)
3407 return (SET_ERROR(EINVAL));
3408
3409 ZFS_ENTER(zfsvfs);
3410 ZFS_VERIFY_ZP(sdzp);
3411 zilog = zfsvfs->z_log;
3412
3413 tdzp = ITOZ(tdip);
3414 ZFS_VERIFY_ZP(tdzp);
3415
3416 /*
3417 * We check i_sb because snapshots and the ctldir must have different
3418 * super blocks.
3419 */
3420 if (tdip->i_sb != sdip->i_sb || zfsctl_is_node(tdip)) {
3421 ZFS_EXIT(zfsvfs);
3422 return (SET_ERROR(EXDEV));
3423 }
3424
3425 if (zfsvfs->z_utf8 && u8_validate(tnm,
3426 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3427 ZFS_EXIT(zfsvfs);
3428 return (SET_ERROR(EILSEQ));
3429 }
3430
3431 if (flags & FIGNORECASE)
3432 zflg |= ZCILOOK;
3433
3434 top:
3435 szp = NULL;
3436 tzp = NULL;
3437 zl = NULL;
3438
3439 /*
3440 * This is to prevent the creation of links into attribute space
3441 * by renaming a linked file into/outof an attribute directory.
3442 * See the comment in zfs_link() for why this is considered bad.
3443 */
3444 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3445 ZFS_EXIT(zfsvfs);
3446 return (SET_ERROR(EINVAL));
3447 }
3448
3449 /*
3450 * Lock source and target directory entries. To prevent deadlock,
3451 * a lock ordering must be defined. We lock the directory with
3452 * the smallest object id first, or if it's a tie, the one with
3453 * the lexically first name.
3454 */
3455 if (sdzp->z_id < tdzp->z_id) {
3456 cmp = -1;
3457 } else if (sdzp->z_id > tdzp->z_id) {
3458 cmp = 1;
3459 } else {
3460 /*
3461 * First compare the two name arguments without
3462 * considering any case folding.
3463 */
3464 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
3465
3466 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3467 ASSERT(error == 0 || !zfsvfs->z_utf8);
3468 if (cmp == 0) {
3469 /*
3470 * POSIX: "If the old argument and the new argument
3471 * both refer to links to the same existing file,
3472 * the rename() function shall return successfully
3473 * and perform no other action."
3474 */
3475 ZFS_EXIT(zfsvfs);
3476 return (0);
3477 }
3478 /*
3479 * If the file system is case-folding, then we may
3480 * have some more checking to do. A case-folding file
3481 * system is either supporting mixed case sensitivity
3482 * access or is completely case-insensitive. Note
3483 * that the file system is always case preserving.
3484 *
3485 * In mixed sensitivity mode case sensitive behavior
3486 * is the default. FIGNORECASE must be used to
3487 * explicitly request case insensitive behavior.
3488 *
3489 * If the source and target names provided differ only
3490 * by case (e.g., a request to rename 'tim' to 'Tim'),
3491 * we will treat this as a special case in the
3492 * case-insensitive mode: as long as the source name
3493 * is an exact match, we will allow this to proceed as
3494 * a name-change request.
3495 */
3496 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
3497 (zfsvfs->z_case == ZFS_CASE_MIXED &&
3498 flags & FIGNORECASE)) &&
3499 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
3500 &error) == 0) {
3501 /*
3502 * case preserving rename request, require exact
3503 * name matches
3504 */
3505 zflg |= ZCIEXACT;
3506 zflg &= ~ZCILOOK;
3507 }
3508 }
3509
3510 /*
3511 * If the source and destination directories are the same, we should
3512 * grab the z_name_lock of that directory only once.
3513 */
3514 if (sdzp == tdzp) {
3515 zflg |= ZHAVELOCK;
3516 rw_enter(&sdzp->z_name_lock, RW_READER);
3517 }
3518
3519 if (cmp < 0) {
3520 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3521 ZEXISTS | zflg, NULL, NULL);
3522 terr = zfs_dirent_lock(&tdl,
3523 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3524 } else {
3525 terr = zfs_dirent_lock(&tdl,
3526 tdzp, tnm, &tzp, zflg, NULL, NULL);
3527 serr = zfs_dirent_lock(&sdl,
3528 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3529 NULL, NULL);
3530 }
3531
3532 if (serr) {
3533 /*
3534 * Source entry invalid or not there.
3535 */
3536 if (!terr) {
3537 zfs_dirent_unlock(tdl);
3538 if (tzp)
3539 iput(ZTOI(tzp));
3540 }
3541
3542 if (sdzp == tdzp)
3543 rw_exit(&sdzp->z_name_lock);
3544
3545 if (strcmp(snm, "..") == 0)
3546 serr = EINVAL;
3547 ZFS_EXIT(zfsvfs);
3548 return (serr);
3549 }
3550 if (terr) {
3551 zfs_dirent_unlock(sdl);
3552 iput(ZTOI(szp));
3553
3554 if (sdzp == tdzp)
3555 rw_exit(&sdzp->z_name_lock);
3556
3557 if (strcmp(tnm, "..") == 0)
3558 terr = EINVAL;
3559 ZFS_EXIT(zfsvfs);
3560 return (terr);
3561 }
3562
3563 /*
3564 * Must have write access at the source to remove the old entry
3565 * and write access at the target to create the new entry.
3566 * Note that if target and source are the same, this can be
3567 * done in a single check.
3568 */
3569
3570 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
3571 goto out;
3572
3573 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3574 /*
3575 * Check to make sure rename is valid.
3576 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3577 */
3578 if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
3579 goto out;
3580 }
3581
3582 /*
3583 * Does target exist?
3584 */
3585 if (tzp) {
3586 /*
3587 * Source and target must be the same type.
3588 */
3589 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3590 if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
3591 error = SET_ERROR(ENOTDIR);
3592 goto out;
3593 }
3594 } else {
3595 if (S_ISDIR(ZTOI(tzp)->i_mode)) {
3596 error = SET_ERROR(EISDIR);
3597 goto out;
3598 }
3599 }
3600 /*
3601 * POSIX dictates that when the source and target
3602 * entries refer to the same file object, rename
3603 * must do nothing and exit without error.
3604 */
3605 if (szp->z_id == tzp->z_id) {
3606 error = 0;
3607 goto out;
3608 }
3609 }
3610
3611 tx = dmu_tx_create(zfsvfs->z_os);
3612 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3613 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3614 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3615 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3616 if (sdzp != tdzp) {
3617 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3618 zfs_sa_upgrade_txholds(tx, tdzp);
3619 }
3620 if (tzp) {
3621 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3622 zfs_sa_upgrade_txholds(tx, tzp);
3623 }
3624
3625 zfs_sa_upgrade_txholds(tx, szp);
3626 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3627 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
3628 if (error) {
3629 if (zl != NULL)
3630 zfs_rename_unlock(&zl);
3631 zfs_dirent_unlock(sdl);
3632 zfs_dirent_unlock(tdl);
3633
3634 if (sdzp == tdzp)
3635 rw_exit(&sdzp->z_name_lock);
3636
3637 if (error == ERESTART) {
3638 waited = B_TRUE;
3639 dmu_tx_wait(tx);
3640 dmu_tx_abort(tx);
3641 iput(ZTOI(szp));
3642 if (tzp)
3643 iput(ZTOI(tzp));
3644 goto top;
3645 }
3646 dmu_tx_abort(tx);
3647 iput(ZTOI(szp));
3648 if (tzp)
3649 iput(ZTOI(tzp));
3650 ZFS_EXIT(zfsvfs);
3651 return (error);
3652 }
3653
3654 if (tzp) /* Attempt to remove the existing target */
3655 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3656
3657 if (error == 0) {
3658 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3659 if (error == 0) {
3660 szp->z_pflags |= ZFS_AV_MODIFIED;
3661
3662 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3663 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3664 ASSERT0(error);
3665
3666 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3667 if (error == 0) {
3668 zfs_log_rename(zilog, tx, TX_RENAME |
3669 (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3670 sdl->dl_name, tdzp, tdl->dl_name, szp);
3671 } else {
3672 /*
3673 * At this point, we have successfully created
3674 * the target name, but have failed to remove
3675 * the source name. Since the create was done
3676 * with the ZRENAMING flag, there are
3677 * complications; for one, the link count is
3678 * wrong. The easiest way to deal with this
3679 * is to remove the newly created target, and
3680 * return the original error. This must
3681 * succeed; fortunately, it is very unlikely to
3682 * fail, since we just created it.
3683 */
3684 VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3685 ZRENAMING, NULL), ==, 0);
3686 }
3687 }
3688 }
3689
3690 dmu_tx_commit(tx);
3691 out:
3692 if (zl != NULL)
3693 zfs_rename_unlock(&zl);
3694
3695 zfs_dirent_unlock(sdl);
3696 zfs_dirent_unlock(tdl);
3697
3698 zfs_inode_update(sdzp);
3699 if (sdzp == tdzp)
3700 rw_exit(&sdzp->z_name_lock);
3701
3702 if (sdzp != tdzp)
3703 zfs_inode_update(tdzp);
3704
3705 zfs_inode_update(szp);
3706 iput(ZTOI(szp));
3707 if (tzp) {
3708 zfs_inode_update(tzp);
3709 iput(ZTOI(tzp));
3710 }
3711
3712 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3713 zil_commit(zilog, 0);
3714
3715 ZFS_EXIT(zfsvfs);
3716 return (error);
3717 }
3718
3719 /*
3720 * Insert the indicated symbolic reference entry into the directory.
3721 *
3722 * IN: dip - Directory to contain new symbolic link.
3723 * link - Name for new symlink entry.
3724 * vap - Attributes of new entry.
3725 * target - Target path of new symlink.
3726 *
3727 * cr - credentials of caller.
3728 * flags - case flags
3729 *
3730 * RETURN: 0 on success, error code on failure.
3731 *
3732 * Timestamps:
3733 * dip - ctime|mtime updated
3734 */
3735 /*ARGSUSED*/
3736 int
3737 zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
3738 struct inode **ipp, cred_t *cr, int flags)
3739 {
3740 znode_t *zp, *dzp = ITOZ(dip);
3741 zfs_dirlock_t *dl;
3742 dmu_tx_t *tx;
3743 zfsvfs_t *zfsvfs = ITOZSB(dip);
3744 zilog_t *zilog;
3745 uint64_t len = strlen(link);
3746 int error;
3747 int zflg = ZNEW;
3748 zfs_acl_ids_t acl_ids;
3749 boolean_t fuid_dirtied;
3750 uint64_t txtype = TX_SYMLINK;
3751 boolean_t waited = B_FALSE;
3752
3753 ASSERT(S_ISLNK(vap->va_mode));
3754
3755 if (name == NULL)
3756 return (SET_ERROR(EINVAL));
3757
3758 ZFS_ENTER(zfsvfs);
3759 ZFS_VERIFY_ZP(dzp);
3760 zilog = zfsvfs->z_log;
3761
3762 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
3763 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3764 ZFS_EXIT(zfsvfs);
3765 return (SET_ERROR(EILSEQ));
3766 }
3767 if (flags & FIGNORECASE)
3768 zflg |= ZCILOOK;
3769
3770 if (len > MAXPATHLEN) {
3771 ZFS_EXIT(zfsvfs);
3772 return (SET_ERROR(ENAMETOOLONG));
3773 }
3774
3775 if ((error = zfs_acl_ids_create(dzp, 0,
3776 vap, cr, NULL, &acl_ids)) != 0) {
3777 ZFS_EXIT(zfsvfs);
3778 return (error);
3779 }
3780 top:
3781 *ipp = NULL;
3782
3783 /*
3784 * Attempt to lock directory; fail if entry already exists.
3785 */
3786 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3787 if (error) {
3788 zfs_acl_ids_free(&acl_ids);
3789 ZFS_EXIT(zfsvfs);
3790 return (error);
3791 }
3792
3793 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
3794 zfs_acl_ids_free(&acl_ids);
3795 zfs_dirent_unlock(dl);
3796 ZFS_EXIT(zfsvfs);
3797 return (error);
3798 }
3799
3800 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
3801 zfs_acl_ids_free(&acl_ids);
3802 zfs_dirent_unlock(dl);
3803 ZFS_EXIT(zfsvfs);
3804 return (SET_ERROR(EDQUOT));
3805 }
3806 tx = dmu_tx_create(zfsvfs->z_os);
3807 fuid_dirtied = zfsvfs->z_fuid_dirty;
3808 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3809 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3810 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3811 ZFS_SA_BASE_ATTR_SIZE + len);
3812 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3813 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3814 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3815 acl_ids.z_aclp->z_acl_bytes);
3816 }
3817 if (fuid_dirtied)
3818 zfs_fuid_txhold(zfsvfs, tx);
3819 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
3820 if (error) {
3821 zfs_dirent_unlock(dl);
3822 if (error == ERESTART) {
3823 waited = B_TRUE;
3824 dmu_tx_wait(tx);
3825 dmu_tx_abort(tx);
3826 goto top;
3827 }
3828 zfs_acl_ids_free(&acl_ids);
3829 dmu_tx_abort(tx);
3830 ZFS_EXIT(zfsvfs);
3831 return (error);
3832 }
3833
3834 /*
3835 * Create a new object for the symlink.
3836 * for version 4 ZPL datsets the symlink will be an SA attribute
3837 */
3838 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3839
3840 if (fuid_dirtied)
3841 zfs_fuid_sync(zfsvfs, tx);
3842
3843 mutex_enter(&zp->z_lock);
3844 if (zp->z_is_sa)
3845 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
3846 link, len, tx);
3847 else
3848 zfs_sa_symlink(zp, link, len, tx);
3849 mutex_exit(&zp->z_lock);
3850
3851 zp->z_size = len;
3852 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
3853 &zp->z_size, sizeof (zp->z_size), tx);
3854 /*
3855 * Insert the new object into the directory.
3856 */
3857 (void) zfs_link_create(dl, zp, tx, ZNEW);
3858
3859 if (flags & FIGNORECASE)
3860 txtype |= TX_CI;
3861 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3862
3863 zfs_inode_update(dzp);
3864 zfs_inode_update(zp);
3865
3866 zfs_acl_ids_free(&acl_ids);
3867
3868 dmu_tx_commit(tx);
3869
3870 zfs_dirent_unlock(dl);
3871
3872 *ipp = ZTOI(zp);
3873
3874 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3875 zil_commit(zilog, 0);
3876
3877 ZFS_EXIT(zfsvfs);
3878 return (error);
3879 }
3880
3881 /*
3882 * Return, in the buffer contained in the provided uio structure,
3883 * the symbolic path referred to by ip.
3884 *
3885 * IN: ip - inode of symbolic link
3886 * uio - structure to contain the link path.
3887 * cr - credentials of caller.
3888 *
3889 * RETURN: 0 if success
3890 * error code if failure
3891 *
3892 * Timestamps:
3893 * ip - atime updated
3894 */
3895 /* ARGSUSED */
3896 int
3897 zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr)
3898 {
3899 znode_t *zp = ITOZ(ip);
3900 zfsvfs_t *zfsvfs = ITOZSB(ip);
3901 int error;
3902
3903 ZFS_ENTER(zfsvfs);
3904 ZFS_VERIFY_ZP(zp);
3905
3906 mutex_enter(&zp->z_lock);
3907 if (zp->z_is_sa)
3908 error = sa_lookup_uio(zp->z_sa_hdl,
3909 SA_ZPL_SYMLINK(zfsvfs), uio);
3910 else
3911 error = zfs_sa_readlink(zp, uio);
3912 mutex_exit(&zp->z_lock);
3913
3914 ZFS_EXIT(zfsvfs);
3915 return (error);
3916 }
3917
3918 /*
3919 * Insert a new entry into directory tdip referencing sip.
3920 *
3921 * IN: tdip - Directory to contain new entry.
3922 * sip - inode of new entry.
3923 * name - name of new entry.
3924 * cr - credentials of caller.
3925 *
3926 * RETURN: 0 if success
3927 * error code if failure
3928 *
3929 * Timestamps:
3930 * tdip - ctime|mtime updated
3931 * sip - ctime updated
3932 */
3933 /* ARGSUSED */
3934 int
3935 zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr,
3936 int flags)
3937 {
3938 znode_t *dzp = ITOZ(tdip);
3939 znode_t *tzp, *szp;
3940 zfsvfs_t *zfsvfs = ITOZSB(tdip);
3941 zilog_t *zilog;
3942 zfs_dirlock_t *dl;
3943 dmu_tx_t *tx;
3944 int error;
3945 int zf = ZNEW;
3946 uint64_t parent;
3947 uid_t owner;
3948 boolean_t waited = B_FALSE;
3949 boolean_t is_tmpfile = 0;
3950 uint64_t txg;
3951 #ifdef HAVE_TMPFILE
3952 is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE));
3953 #endif
3954 ASSERT(S_ISDIR(tdip->i_mode));
3955
3956 if (name == NULL)
3957 return (SET_ERROR(EINVAL));
3958
3959 ZFS_ENTER(zfsvfs);
3960 ZFS_VERIFY_ZP(dzp);
3961 zilog = zfsvfs->z_log;
3962
3963 /*
3964 * POSIX dictates that we return EPERM here.
3965 * Better choices include ENOTSUP or EISDIR.
3966 */
3967 if (S_ISDIR(sip->i_mode)) {
3968 ZFS_EXIT(zfsvfs);
3969 return (SET_ERROR(EPERM));
3970 }
3971
3972 szp = ITOZ(sip);
3973 ZFS_VERIFY_ZP(szp);
3974
3975 /*
3976 * We check i_sb because snapshots and the ctldir must have different
3977 * super blocks.
3978 */
3979 if (sip->i_sb != tdip->i_sb || zfsctl_is_node(sip)) {
3980 ZFS_EXIT(zfsvfs);
3981 return (SET_ERROR(EXDEV));
3982 }
3983
3984 /* Prevent links to .zfs/shares files */
3985
3986 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
3987 &parent, sizeof (uint64_t))) != 0) {
3988 ZFS_EXIT(zfsvfs);
3989 return (error);
3990 }
3991 if (parent == zfsvfs->z_shares_dir) {
3992 ZFS_EXIT(zfsvfs);
3993 return (SET_ERROR(EPERM));
3994 }
3995
3996 if (zfsvfs->z_utf8 && u8_validate(name,
3997 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3998 ZFS_EXIT(zfsvfs);
3999 return (SET_ERROR(EILSEQ));
4000 }
4001 if (flags & FIGNORECASE)
4002 zf |= ZCILOOK;
4003
4004 /*
4005 * We do not support links between attributes and non-attributes
4006 * because of the potential security risk of creating links
4007 * into "normal" file space in order to circumvent restrictions
4008 * imposed in attribute space.
4009 */
4010 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
4011 ZFS_EXIT(zfsvfs);
4012 return (SET_ERROR(EINVAL));
4013 }
4014
4015 owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid),
4016 cr, ZFS_OWNER);
4017 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
4018 ZFS_EXIT(zfsvfs);
4019 return (SET_ERROR(EPERM));
4020 }
4021
4022 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
4023 ZFS_EXIT(zfsvfs);
4024 return (error);
4025 }
4026
4027 top:
4028 /*
4029 * Attempt to lock directory; fail if entry already exists.
4030 */
4031 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
4032 if (error) {
4033 ZFS_EXIT(zfsvfs);
4034 return (error);
4035 }
4036
4037 tx = dmu_tx_create(zfsvfs->z_os);
4038 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
4039 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4040 if (is_tmpfile)
4041 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
4042
4043 zfs_sa_upgrade_txholds(tx, szp);
4044 zfs_sa_upgrade_txholds(tx, dzp);
4045 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
4046 if (error) {
4047 zfs_dirent_unlock(dl);
4048 if (error == ERESTART) {
4049 waited = B_TRUE;
4050 dmu_tx_wait(tx);
4051 dmu_tx_abort(tx);
4052 goto top;
4053 }
4054 dmu_tx_abort(tx);
4055 ZFS_EXIT(zfsvfs);
4056 return (error);
4057 }
4058 /* unmark z_unlinked so zfs_link_create will not reject */
4059 if (is_tmpfile)
4060 szp->z_unlinked = 0;
4061 error = zfs_link_create(dl, szp, tx, 0);
4062
4063 if (error == 0) {
4064 uint64_t txtype = TX_LINK;
4065 /*
4066 * tmpfile is created to be in z_unlinkedobj, so remove it.
4067 * Also, we don't log in ZIL, be cause all previous file
4068 * operation on the tmpfile are ignored by ZIL. Instead we
4069 * always wait for txg to sync to make sure all previous
4070 * operation are sync safe.
4071 */
4072 if (is_tmpfile) {
4073 VERIFY(zap_remove_int(zfsvfs->z_os,
4074 zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
4075 } else {
4076 if (flags & FIGNORECASE)
4077 txtype |= TX_CI;
4078 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4079 }
4080 } else if (is_tmpfile) {
4081 /* restore z_unlinked since when linking failed */
4082 szp->z_unlinked = 1;
4083 }
4084 txg = dmu_tx_get_txg(tx);
4085 dmu_tx_commit(tx);
4086
4087 zfs_dirent_unlock(dl);
4088
4089 if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4090 zil_commit(zilog, 0);
4091
4092 if (is_tmpfile)
4093 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg);
4094
4095 zfs_inode_update(dzp);
4096 zfs_inode_update(szp);
4097 ZFS_EXIT(zfsvfs);
4098 return (error);
4099 }
4100
4101 static void
4102 zfs_putpage_commit_cb(void *arg)
4103 {
4104 struct page *pp = arg;
4105
4106 ClearPageError(pp);
4107 end_page_writeback(pp);
4108 }
4109
4110 /*
4111 * Push a page out to disk, once the page is on stable storage the
4112 * registered commit callback will be run as notification of completion.
4113 *
4114 * IN: ip - page mapped for inode.
4115 * pp - page to push (page is locked)
4116 * wbc - writeback control data
4117 *
4118 * RETURN: 0 if success
4119 * error code if failure
4120 *
4121 * Timestamps:
4122 * ip - ctime|mtime updated
4123 */
4124 /* ARGSUSED */
4125 int
4126 zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
4127 {
4128 znode_t *zp = ITOZ(ip);
4129 zfsvfs_t *zfsvfs = ITOZSB(ip);
4130 loff_t offset;
4131 loff_t pgoff;
4132 unsigned int pglen;
4133 rl_t *rl;
4134 dmu_tx_t *tx;
4135 caddr_t va;
4136 int err = 0;
4137 uint64_t mtime[2], ctime[2];
4138 sa_bulk_attr_t bulk[3];
4139 int cnt = 0;
4140 struct address_space *mapping;
4141
4142 ZFS_ENTER(zfsvfs);
4143 ZFS_VERIFY_ZP(zp);
4144
4145 ASSERT(PageLocked(pp));
4146
4147 pgoff = page_offset(pp); /* Page byte-offset in file */
4148 offset = i_size_read(ip); /* File length in bytes */
4149 pglen = MIN(PAGE_SIZE, /* Page length in bytes */
4150 P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
4151
4152 /* Page is beyond end of file */
4153 if (pgoff >= offset) {
4154 unlock_page(pp);
4155 ZFS_EXIT(zfsvfs);
4156 return (0);
4157 }
4158
4159 /* Truncate page length to end of file */
4160 if (pgoff + pglen > offset)
4161 pglen = offset - pgoff;
4162
4163 #if 0
4164 /*
4165 * FIXME: Allow mmap writes past its quota. The correct fix
4166 * is to register a page_mkwrite() handler to count the page
4167 * against its quota when it is about to be dirtied.
4168 */
4169 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
4170 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
4171 err = EDQUOT;
4172 }
4173 #endif
4174
4175 /*
4176 * The ordering here is critical and must adhere to the following
4177 * rules in order to avoid deadlocking in either zfs_read() or
4178 * zfs_free_range() due to a lock inversion.
4179 *
4180 * 1) The page must be unlocked prior to acquiring the range lock.
4181 * This is critical because zfs_read() calls find_lock_page()
4182 * which may block on the page lock while holding the range lock.
4183 *
4184 * 2) Before setting or clearing write back on a page the range lock
4185 * must be held in order to prevent a lock inversion with the
4186 * zfs_free_range() function.
4187 *
4188 * This presents a problem because upon entering this function the
4189 * page lock is already held. To safely acquire the range lock the
4190 * page lock must be dropped. This creates a window where another
4191 * process could truncate, invalidate, dirty, or write out the page.
4192 *
4193 * Therefore, after successfully reacquiring the range and page locks
4194 * the current page state is checked. In the common case everything
4195 * will be as is expected and it can be written out. However, if
4196 * the page state has changed it must be handled accordingly.
4197 */
4198 mapping = pp->mapping;
4199 redirty_page_for_writepage(wbc, pp);
4200 unlock_page(pp);
4201
4202 rl = zfs_range_lock(&zp->z_range_lock, pgoff, pglen, RL_WRITER);
4203 lock_page(pp);
4204
4205 /* Page mapping changed or it was no longer dirty, we're done */
4206 if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
4207 unlock_page(pp);
4208 zfs_range_unlock(rl);
4209 ZFS_EXIT(zfsvfs);
4210 return (0);
4211 }
4212
4213 /* Another process started write block if required */
4214 if (PageWriteback(pp)) {
4215 unlock_page(pp);
4216 zfs_range_unlock(rl);
4217
4218 if (wbc->sync_mode != WB_SYNC_NONE)
4219 wait_on_page_writeback(pp);
4220
4221 ZFS_EXIT(zfsvfs);
4222 return (0);
4223 }
4224
4225 /* Clear the dirty flag the required locks are held */
4226 if (!clear_page_dirty_for_io(pp)) {
4227 unlock_page(pp);
4228 zfs_range_unlock(rl);
4229 ZFS_EXIT(zfsvfs);
4230 return (0);
4231 }
4232
4233 /*
4234 * Counterpart for redirty_page_for_writepage() above. This page
4235 * was in fact not skipped and should not be counted as if it were.
4236 */
4237 wbc->pages_skipped--;
4238 set_page_writeback(pp);
4239 unlock_page(pp);
4240
4241 tx = dmu_tx_create(zfsvfs->z_os);
4242 dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
4243 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4244 zfs_sa_upgrade_txholds(tx, zp);
4245
4246 err = dmu_tx_assign(tx, TXG_NOWAIT);
4247 if (err != 0) {
4248 if (err == ERESTART)
4249 dmu_tx_wait(tx);
4250
4251 dmu_tx_abort(tx);
4252 __set_page_dirty_nobuffers(pp);
4253 ClearPageError(pp);
4254 end_page_writeback(pp);
4255 zfs_range_unlock(rl);
4256 ZFS_EXIT(zfsvfs);
4257 return (err);
4258 }
4259
4260 va = kmap(pp);
4261 ASSERT3U(pglen, <=, PAGE_SIZE);
4262 dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx);
4263 kunmap(pp);
4264
4265 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4266 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
4267 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL,
4268 &zp->z_pflags, 8);
4269
4270 /* Preserve the mtime and ctime provided by the inode */
4271 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4272 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
4273 zp->z_atime_dirty = 0;
4274 zp->z_seq++;
4275
4276 err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4277
4278 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
4279 zfs_putpage_commit_cb, pp);
4280 dmu_tx_commit(tx);
4281
4282 zfs_range_unlock(rl);
4283
4284 if (wbc->sync_mode != WB_SYNC_NONE) {
4285 /*
4286 * Note that this is rarely called under writepages(), because
4287 * writepages() normally handles the entire commit for
4288 * performance reasons.
4289 */
4290 zil_commit(zfsvfs->z_log, zp->z_id);
4291 }
4292
4293 ZFS_EXIT(zfsvfs);
4294 return (err);
4295 }
4296
4297 /*
4298 * Update the system attributes when the inode has been dirtied. For the
4299 * moment we only update the mode, atime, mtime, and ctime.
4300 */
4301 int
4302 zfs_dirty_inode(struct inode *ip, int flags)
4303 {
4304 znode_t *zp = ITOZ(ip);
4305 zfsvfs_t *zfsvfs = ITOZSB(ip);
4306 dmu_tx_t *tx;
4307 uint64_t mode, atime[2], mtime[2], ctime[2];
4308 sa_bulk_attr_t bulk[4];
4309 int error = 0;
4310 int cnt = 0;
4311
4312 if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
4313 return (0);
4314
4315 ZFS_ENTER(zfsvfs);
4316 ZFS_VERIFY_ZP(zp);
4317
4318 #ifdef I_DIRTY_TIME
4319 /*
4320 * This is the lazytime semantic indroduced in Linux 4.0
4321 * This flag will only be called from update_time when lazytime is set.
4322 * (Note, I_DIRTY_SYNC will also set if not lazytime)
4323 * Fortunately mtime and ctime are managed within ZFS itself, so we
4324 * only need to dirty atime.
4325 */
4326 if (flags == I_DIRTY_TIME) {
4327 zp->z_atime_dirty = 1;
4328 goto out;
4329 }
4330 #endif
4331
4332 tx = dmu_tx_create(zfsvfs->z_os);
4333
4334 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4335 zfs_sa_upgrade_txholds(tx, zp);
4336
4337 error = dmu_tx_assign(tx, TXG_WAIT);
4338 if (error) {
4339 dmu_tx_abort(tx);
4340 goto out;
4341 }
4342
4343 mutex_enter(&zp->z_lock);
4344 zp->z_atime_dirty = 0;
4345
4346 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
4347 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
4348 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4349 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
4350
4351 /* Preserve the mode, mtime and ctime provided by the inode */
4352 ZFS_TIME_ENCODE(&ip->i_atime, atime);
4353 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4354 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
4355 mode = ip->i_mode;
4356
4357 zp->z_mode = mode;
4358
4359 error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4360 mutex_exit(&zp->z_lock);
4361
4362 dmu_tx_commit(tx);
4363 out:
4364 ZFS_EXIT(zfsvfs);
4365 return (error);
4366 }
4367
4368 /*ARGSUSED*/
4369 void
4370 zfs_inactive(struct inode *ip)
4371 {
4372 znode_t *zp = ITOZ(ip);
4373 zfsvfs_t *zfsvfs = ITOZSB(ip);
4374 uint64_t atime[2];
4375 int error;
4376 int need_unlock = 0;
4377
4378 /* Only read lock if we haven't already write locked, e.g. rollback */
4379 if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) {
4380 need_unlock = 1;
4381 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4382 }
4383 if (zp->z_sa_hdl == NULL) {
4384 if (need_unlock)
4385 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4386 return;
4387 }
4388
4389 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4390 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4391
4392 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4393 zfs_sa_upgrade_txholds(tx, zp);
4394 error = dmu_tx_assign(tx, TXG_WAIT);
4395 if (error) {
4396 dmu_tx_abort(tx);
4397 } else {
4398 ZFS_TIME_ENCODE(&ip->i_atime, atime);
4399 mutex_enter(&zp->z_lock);
4400 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4401 (void *)&atime, sizeof (atime), tx);
4402 zp->z_atime_dirty = 0;
4403 mutex_exit(&zp->z_lock);
4404 dmu_tx_commit(tx);
4405 }
4406 }
4407
4408 zfs_zinactive(zp);
4409 if (need_unlock)
4410 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4411 }
4412
4413 /*
4414 * Bounds-check the seek operation.
4415 *
4416 * IN: ip - inode seeking within
4417 * ooff - old file offset
4418 * noffp - pointer to new file offset
4419 * ct - caller context
4420 *
4421 * RETURN: 0 if success
4422 * EINVAL if new offset invalid
4423 */
4424 /* ARGSUSED */
4425 int
4426 zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp)
4427 {
4428 if (S_ISDIR(ip->i_mode))
4429 return (0);
4430 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4431 }
4432
4433 /*
4434 * Fill pages with data from the disk.
4435 */
4436 static int
4437 zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
4438 {
4439 znode_t *zp = ITOZ(ip);
4440 zfsvfs_t *zfsvfs = ITOZSB(ip);
4441 objset_t *os;
4442 struct page *cur_pp;
4443 u_offset_t io_off, total;
4444 size_t io_len;
4445 loff_t i_size;
4446 unsigned page_idx;
4447 int err;
4448
4449 os = zfsvfs->z_os;
4450 io_len = nr_pages << PAGE_SHIFT;
4451 i_size = i_size_read(ip);
4452 io_off = page_offset(pl[0]);
4453
4454 if (io_off + io_len > i_size)
4455 io_len = i_size - io_off;
4456
4457 /*
4458 * Iterate over list of pages and read each page individually.
4459 */
4460 page_idx = 0;
4461 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4462 caddr_t va;
4463
4464 cur_pp = pl[page_idx++];
4465 va = kmap(cur_pp);
4466 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4467 DMU_READ_PREFETCH);
4468 kunmap(cur_pp);
4469 if (err) {
4470 /* convert checksum errors into IO errors */
4471 if (err == ECKSUM)
4472 err = SET_ERROR(EIO);
4473 return (err);
4474 }
4475 }
4476
4477 return (0);
4478 }
4479
4480 /*
4481 * Uses zfs_fillpage to read data from the file and fill the pages.
4482 *
4483 * IN: ip - inode of file to get data from.
4484 * pl - list of pages to read
4485 * nr_pages - number of pages to read
4486 *
4487 * RETURN: 0 on success, error code on failure.
4488 *
4489 * Timestamps:
4490 * vp - atime updated
4491 */
4492 /* ARGSUSED */
4493 int
4494 zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages)
4495 {
4496 znode_t *zp = ITOZ(ip);
4497 zfsvfs_t *zfsvfs = ITOZSB(ip);
4498 int err;
4499
4500 if (pl == NULL)
4501 return (0);
4502
4503 ZFS_ENTER(zfsvfs);
4504 ZFS_VERIFY_ZP(zp);
4505
4506 err = zfs_fillpage(ip, pl, nr_pages);
4507
4508 ZFS_EXIT(zfsvfs);
4509 return (err);
4510 }
4511
4512 /*
4513 * Check ZFS specific permissions to memory map a section of a file.
4514 *
4515 * IN: ip - inode of the file to mmap
4516 * off - file offset
4517 * addrp - start address in memory region
4518 * len - length of memory region
4519 * vm_flags- address flags
4520 *
4521 * RETURN: 0 if success
4522 * error code if failure
4523 */
4524 /*ARGSUSED*/
4525 int
4526 zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
4527 unsigned long vm_flags)
4528 {
4529 znode_t *zp = ITOZ(ip);
4530 zfsvfs_t *zfsvfs = ITOZSB(ip);
4531
4532 ZFS_ENTER(zfsvfs);
4533 ZFS_VERIFY_ZP(zp);
4534
4535 if ((vm_flags & VM_WRITE) && (zp->z_pflags &
4536 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4537 ZFS_EXIT(zfsvfs);
4538 return (SET_ERROR(EPERM));
4539 }
4540
4541 if ((vm_flags & (VM_READ | VM_EXEC)) &&
4542 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4543 ZFS_EXIT(zfsvfs);
4544 return (SET_ERROR(EACCES));
4545 }
4546
4547 if (off < 0 || len > MAXOFFSET_T - off) {
4548 ZFS_EXIT(zfsvfs);
4549 return (SET_ERROR(ENXIO));
4550 }
4551
4552 ZFS_EXIT(zfsvfs);
4553 return (0);
4554 }
4555
4556 /*
4557 * convoff - converts the given data (start, whence) to the
4558 * given whence.
4559 */
4560 int
4561 convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
4562 {
4563 vattr_t vap;
4564 int error;
4565
4566 if ((lckdat->l_whence == 2) || (whence == 2)) {
4567 if ((error = zfs_getattr(ip, &vap, 0, CRED())))
4568 return (error);
4569 }
4570
4571 switch (lckdat->l_whence) {
4572 case 1:
4573 lckdat->l_start += offset;
4574 break;
4575 case 2:
4576 lckdat->l_start += vap.va_size;
4577 /* FALLTHRU */
4578 case 0:
4579 break;
4580 default:
4581 return (SET_ERROR(EINVAL));
4582 }
4583
4584 if (lckdat->l_start < 0)
4585 return (SET_ERROR(EINVAL));
4586
4587 switch (whence) {
4588 case 1:
4589 lckdat->l_start -= offset;
4590 break;
4591 case 2:
4592 lckdat->l_start -= vap.va_size;
4593 /* FALLTHRU */
4594 case 0:
4595 break;
4596 default:
4597 return (SET_ERROR(EINVAL));
4598 }
4599
4600 lckdat->l_whence = (short)whence;
4601 return (0);
4602 }
4603
4604 /*
4605 * Free or allocate space in a file. Currently, this function only
4606 * supports the `F_FREESP' command. However, this command is somewhat
4607 * misnamed, as its functionality includes the ability to allocate as
4608 * well as free space.
4609 *
4610 * IN: ip - inode of file to free data in.
4611 * cmd - action to take (only F_FREESP supported).
4612 * bfp - section of file to free/alloc.
4613 * flag - current file open mode flags.
4614 * offset - current file offset.
4615 * cr - credentials of caller [UNUSED].
4616 *
4617 * RETURN: 0 on success, error code on failure.
4618 *
4619 * Timestamps:
4620 * ip - ctime|mtime updated
4621 */
4622 /* ARGSUSED */
4623 int
4624 zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
4625 offset_t offset, cred_t *cr)
4626 {
4627 znode_t *zp = ITOZ(ip);
4628 zfsvfs_t *zfsvfs = ITOZSB(ip);
4629 uint64_t off, len;
4630 int error;
4631
4632 ZFS_ENTER(zfsvfs);
4633 ZFS_VERIFY_ZP(zp);
4634
4635 if (cmd != F_FREESP) {
4636 ZFS_EXIT(zfsvfs);
4637 return (SET_ERROR(EINVAL));
4638 }
4639
4640 /*
4641 * Callers might not be able to detect properly that we are read-only,
4642 * so check it explicitly here.
4643 */
4644 if (zfs_is_readonly(zfsvfs)) {
4645 ZFS_EXIT(zfsvfs);
4646 return (SET_ERROR(EROFS));
4647 }
4648
4649 if ((error = convoff(ip, bfp, 0, offset))) {
4650 ZFS_EXIT(zfsvfs);
4651 return (error);
4652 }
4653
4654 if (bfp->l_len < 0) {
4655 ZFS_EXIT(zfsvfs);
4656 return (SET_ERROR(EINVAL));
4657 }
4658
4659 /*
4660 * Permissions aren't checked on Solaris because on this OS
4661 * zfs_space() can only be called with an opened file handle.
4662 * On Linux we can get here through truncate_range() which
4663 * operates directly on inodes, so we need to check access rights.
4664 */
4665 if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
4666 ZFS_EXIT(zfsvfs);
4667 return (error);
4668 }
4669
4670 off = bfp->l_start;
4671 len = bfp->l_len; /* 0 means from off to end of file */
4672
4673 error = zfs_freesp(zp, off, len, flag, TRUE);
4674
4675 ZFS_EXIT(zfsvfs);
4676 return (error);
4677 }
4678
4679 /*ARGSUSED*/
4680 int
4681 zfs_fid(struct inode *ip, fid_t *fidp)
4682 {
4683 znode_t *zp = ITOZ(ip);
4684 zfsvfs_t *zfsvfs = ITOZSB(ip);
4685 uint32_t gen;
4686 uint64_t gen64;
4687 uint64_t object = zp->z_id;
4688 zfid_short_t *zfid;
4689 int size, i, error;
4690
4691 ZFS_ENTER(zfsvfs);
4692 ZFS_VERIFY_ZP(zp);
4693
4694 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
4695 &gen64, sizeof (uint64_t))) != 0) {
4696 ZFS_EXIT(zfsvfs);
4697 return (error);
4698 }
4699
4700 gen = (uint32_t)gen64;
4701
4702 size = SHORT_FID_LEN;
4703
4704 zfid = (zfid_short_t *)fidp;
4705
4706 zfid->zf_len = size;
4707
4708 for (i = 0; i < sizeof (zfid->zf_object); i++)
4709 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4710
4711 /* Must have a non-zero generation number to distinguish from .zfs */
4712 if (gen == 0)
4713 gen = 1;
4714 for (i = 0; i < sizeof (zfid->zf_gen); i++)
4715 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4716
4717 ZFS_EXIT(zfsvfs);
4718 return (0);
4719 }
4720
4721 /*ARGSUSED*/
4722 int
4723 zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
4724 {
4725 znode_t *zp = ITOZ(ip);
4726 zfsvfs_t *zfsvfs = ITOZSB(ip);
4727 int error;
4728 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4729
4730 ZFS_ENTER(zfsvfs);
4731 ZFS_VERIFY_ZP(zp);
4732 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
4733 ZFS_EXIT(zfsvfs);
4734
4735 return (error);
4736 }
4737
4738 /*ARGSUSED*/
4739 int
4740 zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
4741 {
4742 znode_t *zp = ITOZ(ip);
4743 zfsvfs_t *zfsvfs = ITOZSB(ip);
4744 int error;
4745 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4746 zilog_t *zilog = zfsvfs->z_log;
4747
4748 ZFS_ENTER(zfsvfs);
4749 ZFS_VERIFY_ZP(zp);
4750
4751 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
4752
4753 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4754 zil_commit(zilog, 0);
4755
4756 ZFS_EXIT(zfsvfs);
4757 return (error);
4758 }
4759
4760 #ifdef HAVE_UIO_ZEROCOPY
4761 /*
4762 * Tunable, both must be a power of 2.
4763 *
4764 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
4765 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
4766 * an arcbuf for a partial block read
4767 */
4768 int zcr_blksz_min = (1 << 10); /* 1K */
4769 int zcr_blksz_max = (1 << 17); /* 128K */
4770
4771 /*ARGSUSED*/
4772 static int
4773 zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
4774 {
4775 znode_t *zp = ITOZ(ip);
4776 zfsvfs_t *zfsvfs = ITOZSB(ip);
4777 int max_blksz = zfsvfs->z_max_blksz;
4778 uio_t *uio = &xuio->xu_uio;
4779 ssize_t size = uio->uio_resid;
4780 offset_t offset = uio->uio_loffset;
4781 int blksz;
4782 int fullblk, i;
4783 arc_buf_t *abuf;
4784 ssize_t maxsize;
4785 int preamble, postamble;
4786
4787 if (xuio->xu_type != UIOTYPE_ZEROCOPY)
4788 return (SET_ERROR(EINVAL));
4789
4790 ZFS_ENTER(zfsvfs);
4791 ZFS_VERIFY_ZP(zp);
4792 switch (ioflag) {
4793 case UIO_WRITE:
4794 /*
4795 * Loan out an arc_buf for write if write size is bigger than
4796 * max_blksz, and the file's block size is also max_blksz.
4797 */
4798 blksz = max_blksz;
4799 if (size < blksz || zp->z_blksz != blksz) {
4800 ZFS_EXIT(zfsvfs);
4801 return (SET_ERROR(EINVAL));
4802 }
4803 /*
4804 * Caller requests buffers for write before knowing where the
4805 * write offset might be (e.g. NFS TCP write).
4806 */
4807 if (offset == -1) {
4808 preamble = 0;
4809 } else {
4810 preamble = P2PHASE(offset, blksz);
4811 if (preamble) {
4812 preamble = blksz - preamble;
4813 size -= preamble;
4814 }
4815 }
4816
4817 postamble = P2PHASE(size, blksz);
4818 size -= postamble;
4819
4820 fullblk = size / blksz;
4821 (void) dmu_xuio_init(xuio,
4822 (preamble != 0) + fullblk + (postamble != 0));
4823
4824 /*
4825 * Have to fix iov base/len for partial buffers. They
4826 * currently represent full arc_buf's.
4827 */
4828 if (preamble) {
4829 /* data begins in the middle of the arc_buf */
4830 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4831 blksz);
4832 ASSERT(abuf);
4833 (void) dmu_xuio_add(xuio, abuf,
4834 blksz - preamble, preamble);
4835 }
4836
4837 for (i = 0; i < fullblk; i++) {
4838 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4839 blksz);
4840 ASSERT(abuf);
4841 (void) dmu_xuio_add(xuio, abuf, 0, blksz);
4842 }
4843
4844 if (postamble) {
4845 /* data ends in the middle of the arc_buf */
4846 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4847 blksz);
4848 ASSERT(abuf);
4849 (void) dmu_xuio_add(xuio, abuf, 0, postamble);
4850 }
4851 break;
4852 case UIO_READ:
4853 /*
4854 * Loan out an arc_buf for read if the read size is larger than
4855 * the current file block size. Block alignment is not
4856 * considered. Partial arc_buf will be loaned out for read.
4857 */
4858 blksz = zp->z_blksz;
4859 if (blksz < zcr_blksz_min)
4860 blksz = zcr_blksz_min;
4861 if (blksz > zcr_blksz_max)
4862 blksz = zcr_blksz_max;
4863 /* avoid potential complexity of dealing with it */
4864 if (blksz > max_blksz) {
4865 ZFS_EXIT(zfsvfs);
4866 return (SET_ERROR(EINVAL));
4867 }
4868
4869 maxsize = zp->z_size - uio->uio_loffset;
4870 if (size > maxsize)
4871 size = maxsize;
4872
4873 if (size < blksz) {
4874 ZFS_EXIT(zfsvfs);
4875 return (SET_ERROR(EINVAL));
4876 }
4877 break;
4878 default:
4879 ZFS_EXIT(zfsvfs);
4880 return (SET_ERROR(EINVAL));
4881 }
4882
4883 uio->uio_extflg = UIO_XUIO;
4884 XUIO_XUZC_RW(xuio) = ioflag;
4885 ZFS_EXIT(zfsvfs);
4886 return (0);
4887 }
4888
4889 /*ARGSUSED*/
4890 static int
4891 zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
4892 {
4893 int i;
4894 arc_buf_t *abuf;
4895 int ioflag = XUIO_XUZC_RW(xuio);
4896
4897 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
4898
4899 i = dmu_xuio_cnt(xuio);
4900 while (i-- > 0) {
4901 abuf = dmu_xuio_arcbuf(xuio, i);
4902 /*
4903 * if abuf == NULL, it must be a write buffer
4904 * that has been returned in zfs_write().
4905 */
4906 if (abuf)
4907 dmu_return_arcbuf(abuf);
4908 ASSERT(abuf || ioflag == UIO_WRITE);
4909 }
4910
4911 dmu_xuio_fini(xuio);
4912 return (0);
4913 }
4914 #endif /* HAVE_UIO_ZEROCOPY */
4915
4916 #if defined(_KERNEL) && defined(HAVE_SPL)
4917 EXPORT_SYMBOL(zfs_open);
4918 EXPORT_SYMBOL(zfs_close);
4919 EXPORT_SYMBOL(zfs_read);
4920 EXPORT_SYMBOL(zfs_write);
4921 EXPORT_SYMBOL(zfs_access);
4922 EXPORT_SYMBOL(zfs_lookup);
4923 EXPORT_SYMBOL(zfs_create);
4924 EXPORT_SYMBOL(zfs_tmpfile);
4925 EXPORT_SYMBOL(zfs_remove);
4926 EXPORT_SYMBOL(zfs_mkdir);
4927 EXPORT_SYMBOL(zfs_rmdir);
4928 EXPORT_SYMBOL(zfs_readdir);
4929 EXPORT_SYMBOL(zfs_fsync);
4930 EXPORT_SYMBOL(zfs_getattr);
4931 EXPORT_SYMBOL(zfs_getattr_fast);
4932 EXPORT_SYMBOL(zfs_setattr);
4933 EXPORT_SYMBOL(zfs_rename);
4934 EXPORT_SYMBOL(zfs_symlink);
4935 EXPORT_SYMBOL(zfs_readlink);
4936 EXPORT_SYMBOL(zfs_link);
4937 EXPORT_SYMBOL(zfs_inactive);
4938 EXPORT_SYMBOL(zfs_space);
4939 EXPORT_SYMBOL(zfs_fid);
4940 EXPORT_SYMBOL(zfs_getsecattr);
4941 EXPORT_SYMBOL(zfs_setsecattr);
4942 EXPORT_SYMBOL(zfs_getpage);
4943 EXPORT_SYMBOL(zfs_putpage);
4944 EXPORT_SYMBOL(zfs_dirty_inode);
4945 EXPORT_SYMBOL(zfs_map);
4946
4947 /* CSTYLED */
4948 module_param(zfs_delete_blocks, ulong, 0644);
4949 MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
4950 module_param(zfs_read_chunk_size, long, 0644);
4951 MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");
4952 #endif