]> git.proxmox.com Git - mirror_zfs-debian.git/blob - module/zfs/zfs_vnops.c
Merge branch 'add_breaks_replaces_zfs_initramfs' into 'master'
[mirror_zfs-debian.git] / module / zfs / zfs_vnops.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc.
27 */
28
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
31
32
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/time.h>
36 #include <sys/systm.h>
37 #include <sys/sysmacros.h>
38 #include <sys/resource.h>
39 #include <sys/vfs.h>
40 #include <sys/vfs_opreg.h>
41 #include <sys/file.h>
42 #include <sys/stat.h>
43 #include <sys/kmem.h>
44 #include <sys/taskq.h>
45 #include <sys/uio.h>
46 #include <sys/vmsystm.h>
47 #include <sys/atomic.h>
48 #include <vm/pvn.h>
49 #include <sys/pathname.h>
50 #include <sys/cmn_err.h>
51 #include <sys/errno.h>
52 #include <sys/unistd.h>
53 #include <sys/zfs_dir.h>
54 #include <sys/zfs_acl.h>
55 #include <sys/zfs_ioctl.h>
56 #include <sys/fs/zfs.h>
57 #include <sys/dmu.h>
58 #include <sys/dmu_objset.h>
59 #include <sys/spa.h>
60 #include <sys/txg.h>
61 #include <sys/dbuf.h>
62 #include <sys/zap.h>
63 #include <sys/sa.h>
64 #include <sys/dirent.h>
65 #include <sys/policy.h>
66 #include <sys/sunddi.h>
67 #include <sys/sid.h>
68 #include <sys/mode.h>
69 #include "fs/fs_subr.h"
70 #include <sys/zfs_ctldir.h>
71 #include <sys/zfs_fuid.h>
72 #include <sys/zfs_sa.h>
73 #include <sys/zfs_vnops.h>
74 #include <sys/dnlc.h>
75 #include <sys/zfs_rlock.h>
76 #include <sys/extdirent.h>
77 #include <sys/kidmap.h>
78 #include <sys/cred.h>
79 #include <sys/attr.h>
80 #include <sys/zpl.h>
81
82 /*
83 * Programming rules.
84 *
85 * Each vnode op performs some logical unit of work. To do this, the ZPL must
86 * properly lock its in-core state, create a DMU transaction, do the work,
87 * record this work in the intent log (ZIL), commit the DMU transaction,
88 * and wait for the intent log to commit if it is a synchronous operation.
89 * Moreover, the vnode ops must work in both normal and log replay context.
90 * The ordering of events is important to avoid deadlocks and references
91 * to freed memory. The example below illustrates the following Big Rules:
92 *
93 * (1) A check must be made in each zfs thread for a mounted file system.
94 * This is done avoiding races using ZFS_ENTER(zfsvfs).
95 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
96 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
97 * can return EIO from the calling function.
98 *
99 * (2) iput() should always be the last thing except for zil_commit()
100 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
101 * First, if it's the last reference, the vnode/znode
102 * can be freed, so the zp may point to freed memory. Second, the last
103 * reference will call zfs_zinactive(), which may induce a lot of work --
104 * pushing cached pages (which acquires range locks) and syncing out
105 * cached atime changes. Third, zfs_zinactive() may require a new tx,
106 * which could deadlock the system if you were already holding one.
107 * If you must call iput() within a tx then use zfs_iput_async().
108 *
109 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
110 * as they can span dmu_tx_assign() calls.
111 *
112 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
113 * dmu_tx_assign(). This is critical because we don't want to block
114 * while holding locks.
115 *
116 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
117 * reduces lock contention and CPU usage when we must wait (note that if
118 * throughput is constrained by the storage, nearly every transaction
119 * must wait).
120 *
121 * Note, in particular, that if a lock is sometimes acquired before
122 * the tx assigns, and sometimes after (e.g. z_lock), then failing
123 * to use a non-blocking assign can deadlock the system. The scenario:
124 *
125 * Thread A has grabbed a lock before calling dmu_tx_assign().
126 * Thread B is in an already-assigned tx, and blocks for this lock.
127 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
128 * forever, because the previous txg can't quiesce until B's tx commits.
129 *
130 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
131 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
132 * calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
133 * to indicate that this operation has already called dmu_tx_wait().
134 * This will ensure that we don't retry forever, waiting a short bit
135 * each time.
136 *
137 * (5) If the operation succeeded, generate the intent log entry for it
138 * before dropping locks. This ensures that the ordering of events
139 * in the intent log matches the order in which they actually occurred.
140 * During ZIL replay the zfs_log_* functions will update the sequence
141 * number to indicate the zil transaction has replayed.
142 *
143 * (6) At the end of each vnode op, the DMU tx must always commit,
144 * regardless of whether there were any errors.
145 *
146 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
147 * to ensure that synchronous semantics are provided when necessary.
148 *
149 * In general, this is how things should be ordered in each vnode op:
150 *
151 * ZFS_ENTER(zfsvfs); // exit if unmounted
152 * top:
153 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
154 * rw_enter(...); // grab any other locks you need
155 * tx = dmu_tx_create(...); // get DMU tx
156 * dmu_tx_hold_*(); // hold each object you might modify
157 * error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
158 * if (error) {
159 * rw_exit(...); // drop locks
160 * zfs_dirent_unlock(dl); // unlock directory entry
161 * iput(...); // release held vnodes
162 * if (error == ERESTART) {
163 * waited = B_TRUE;
164 * dmu_tx_wait(tx);
165 * dmu_tx_abort(tx);
166 * goto top;
167 * }
168 * dmu_tx_abort(tx); // abort DMU tx
169 * ZFS_EXIT(zfsvfs); // finished in zfs
170 * return (error); // really out of space
171 * }
172 * error = do_real_work(); // do whatever this VOP does
173 * if (error == 0)
174 * zfs_log_*(...); // on success, make ZIL entry
175 * dmu_tx_commit(tx); // commit DMU tx -- error or not
176 * rw_exit(...); // drop locks
177 * zfs_dirent_unlock(dl); // unlock directory entry
178 * iput(...); // release held vnodes
179 * zil_commit(zilog, foid); // synchronous when necessary
180 * ZFS_EXIT(zfsvfs); // finished in zfs
181 * return (error); // done, report error
182 */
183
184 /*
185 * Virus scanning is unsupported. It would be possible to add a hook
186 * here to performance the required virus scan. This could be done
187 * entirely in the kernel or potentially as an update to invoke a
188 * scanning utility.
189 */
190 static int
191 zfs_vscan(struct inode *ip, cred_t *cr, int async)
192 {
193 return (0);
194 }
195
196 /* ARGSUSED */
197 int
198 zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
199 {
200 znode_t *zp = ITOZ(ip);
201 zfsvfs_t *zfsvfs = ITOZSB(ip);
202
203 ZFS_ENTER(zfsvfs);
204 ZFS_VERIFY_ZP(zp);
205
206 /* Honor ZFS_APPENDONLY file attribute */
207 if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
208 ((flag & O_APPEND) == 0)) {
209 ZFS_EXIT(zfsvfs);
210 return (SET_ERROR(EPERM));
211 }
212
213 /* Virus scan eligible files on open */
214 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
215 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
216 if (zfs_vscan(ip, cr, 0) != 0) {
217 ZFS_EXIT(zfsvfs);
218 return (SET_ERROR(EACCES));
219 }
220 }
221
222 /* Keep a count of the synchronous opens in the znode */
223 if (flag & O_SYNC)
224 atomic_inc_32(&zp->z_sync_cnt);
225
226 ZFS_EXIT(zfsvfs);
227 return (0);
228 }
229
230 /* ARGSUSED */
231 int
232 zfs_close(struct inode *ip, int flag, cred_t *cr)
233 {
234 znode_t *zp = ITOZ(ip);
235 zfsvfs_t *zfsvfs = ITOZSB(ip);
236
237 ZFS_ENTER(zfsvfs);
238 ZFS_VERIFY_ZP(zp);
239
240 /* Decrement the synchronous opens in the znode */
241 if (flag & O_SYNC)
242 atomic_dec_32(&zp->z_sync_cnt);
243
244 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
245 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
246 VERIFY(zfs_vscan(ip, cr, 1) == 0);
247
248 ZFS_EXIT(zfsvfs);
249 return (0);
250 }
251
252 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
253 /*
254 * Lseek support for finding holes (cmd == SEEK_HOLE) and
255 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
256 */
257 static int
258 zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
259 {
260 znode_t *zp = ITOZ(ip);
261 uint64_t noff = (uint64_t)*off; /* new offset */
262 uint64_t file_sz;
263 int error;
264 boolean_t hole;
265
266 file_sz = zp->z_size;
267 if (noff >= file_sz) {
268 return (SET_ERROR(ENXIO));
269 }
270
271 if (cmd == SEEK_HOLE)
272 hole = B_TRUE;
273 else
274 hole = B_FALSE;
275
276 error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
277
278 if (error == ESRCH)
279 return (SET_ERROR(ENXIO));
280
281 /* file was dirty, so fall back to using generic logic */
282 if (error == EBUSY) {
283 if (hole)
284 *off = file_sz;
285
286 return (0);
287 }
288
289 /*
290 * We could find a hole that begins after the logical end-of-file,
291 * because dmu_offset_next() only works on whole blocks. If the
292 * EOF falls mid-block, then indicate that the "virtual hole"
293 * at the end of the file begins at the logical EOF, rather than
294 * at the end of the last block.
295 */
296 if (noff > file_sz) {
297 ASSERT(hole);
298 noff = file_sz;
299 }
300
301 if (noff < *off)
302 return (error);
303 *off = noff;
304 return (error);
305 }
306
307 int
308 zfs_holey(struct inode *ip, int cmd, loff_t *off)
309 {
310 znode_t *zp = ITOZ(ip);
311 zfsvfs_t *zfsvfs = ITOZSB(ip);
312 int error;
313
314 ZFS_ENTER(zfsvfs);
315 ZFS_VERIFY_ZP(zp);
316
317 error = zfs_holey_common(ip, cmd, off);
318
319 ZFS_EXIT(zfsvfs);
320 return (error);
321 }
322 #endif /* SEEK_HOLE && SEEK_DATA */
323
324 #if defined(_KERNEL)
325 /*
326 * When a file is memory mapped, we must keep the IO data synchronized
327 * between the DMU cache and the memory mapped pages. What this means:
328 *
329 * On Write: If we find a memory mapped page, we write to *both*
330 * the page and the dmu buffer.
331 */
332 static void
333 update_pages(struct inode *ip, int64_t start, int len,
334 objset_t *os, uint64_t oid)
335 {
336 struct address_space *mp = ip->i_mapping;
337 struct page *pp;
338 uint64_t nbytes;
339 int64_t off;
340 void *pb;
341
342 off = start & (PAGE_SIZE-1);
343 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
344 nbytes = MIN(PAGE_SIZE - off, len);
345
346 pp = find_lock_page(mp, start >> PAGE_SHIFT);
347 if (pp) {
348 if (mapping_writably_mapped(mp))
349 flush_dcache_page(pp);
350
351 pb = kmap(pp);
352 (void) dmu_read(os, oid, start+off, nbytes, pb+off,
353 DMU_READ_PREFETCH);
354 kunmap(pp);
355
356 if (mapping_writably_mapped(mp))
357 flush_dcache_page(pp);
358
359 mark_page_accessed(pp);
360 SetPageUptodate(pp);
361 ClearPageError(pp);
362 unlock_page(pp);
363 put_page(pp);
364 }
365
366 len -= nbytes;
367 off = 0;
368 }
369 }
370
371 /*
372 * When a file is memory mapped, we must keep the IO data synchronized
373 * between the DMU cache and the memory mapped pages. What this means:
374 *
375 * On Read: We "read" preferentially from memory mapped pages,
376 * else we default from the dmu buffer.
377 *
378 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
379 * the file is memory mapped.
380 */
381 static int
382 mappedread(struct inode *ip, int nbytes, uio_t *uio)
383 {
384 struct address_space *mp = ip->i_mapping;
385 struct page *pp;
386 znode_t *zp = ITOZ(ip);
387 int64_t start, off;
388 uint64_t bytes;
389 int len = nbytes;
390 int error = 0;
391 void *pb;
392
393 start = uio->uio_loffset;
394 off = start & (PAGE_SIZE-1);
395 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
396 bytes = MIN(PAGE_SIZE - off, len);
397
398 pp = find_lock_page(mp, start >> PAGE_SHIFT);
399 if (pp) {
400 ASSERT(PageUptodate(pp));
401 unlock_page(pp);
402
403 pb = kmap(pp);
404 error = uiomove(pb + off, bytes, UIO_READ, uio);
405 kunmap(pp);
406
407 if (mapping_writably_mapped(mp))
408 flush_dcache_page(pp);
409
410 mark_page_accessed(pp);
411 put_page(pp);
412 } else {
413 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
414 uio, bytes);
415 }
416
417 len -= bytes;
418 off = 0;
419 if (error)
420 break;
421 }
422 return (error);
423 }
424 #endif /* _KERNEL */
425
426 unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
427 unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
428
429 /*
430 * Read bytes from specified file into supplied buffer.
431 *
432 * IN: ip - inode of file to be read from.
433 * uio - structure supplying read location, range info,
434 * and return buffer.
435 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
436 * O_DIRECT flag; used to bypass page cache.
437 * cr - credentials of caller.
438 *
439 * OUT: uio - updated offset and range, buffer filled.
440 *
441 * RETURN: 0 on success, error code on failure.
442 *
443 * Side Effects:
444 * inode - atime updated if byte count > 0
445 */
446 /* ARGSUSED */
447 int
448 zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
449 {
450 znode_t *zp = ITOZ(ip);
451 zfsvfs_t *zfsvfs = ITOZSB(ip);
452 ssize_t n, nbytes;
453 int error = 0;
454 rl_t *rl;
455 #ifdef HAVE_UIO_ZEROCOPY
456 xuio_t *xuio = NULL;
457 #endif /* HAVE_UIO_ZEROCOPY */
458
459 ZFS_ENTER(zfsvfs);
460 ZFS_VERIFY_ZP(zp);
461
462 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
463 ZFS_EXIT(zfsvfs);
464 return (SET_ERROR(EACCES));
465 }
466
467 /*
468 * Validate file offset
469 */
470 if (uio->uio_loffset < (offset_t)0) {
471 ZFS_EXIT(zfsvfs);
472 return (SET_ERROR(EINVAL));
473 }
474
475 /*
476 * Fasttrack empty reads
477 */
478 if (uio->uio_resid == 0) {
479 ZFS_EXIT(zfsvfs);
480 return (0);
481 }
482
483 /*
484 * If we're in FRSYNC mode, sync out this znode before reading it.
485 * Only do this for non-snapshots.
486 */
487 if (zfsvfs->z_log &&
488 (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
489 zil_commit(zfsvfs->z_log, zp->z_id);
490
491 /*
492 * Lock the range against changes.
493 */
494 rl = zfs_range_lock(&zp->z_range_lock, uio->uio_loffset, uio->uio_resid,
495 RL_READER);
496
497 /*
498 * If we are reading past end-of-file we can skip
499 * to the end; but we might still need to set atime.
500 */
501 if (uio->uio_loffset >= zp->z_size) {
502 error = 0;
503 goto out;
504 }
505
506 ASSERT(uio->uio_loffset < zp->z_size);
507 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
508
509 #ifdef HAVE_UIO_ZEROCOPY
510 if ((uio->uio_extflg == UIO_XUIO) &&
511 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
512 int nblk;
513 int blksz = zp->z_blksz;
514 uint64_t offset = uio->uio_loffset;
515
516 xuio = (xuio_t *)uio;
517 if ((ISP2(blksz))) {
518 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
519 blksz)) / blksz;
520 } else {
521 ASSERT(offset + n <= blksz);
522 nblk = 1;
523 }
524 (void) dmu_xuio_init(xuio, nblk);
525
526 if (vn_has_cached_data(ip)) {
527 /*
528 * For simplicity, we always allocate a full buffer
529 * even if we only expect to read a portion of a block.
530 */
531 while (--nblk >= 0) {
532 (void) dmu_xuio_add(xuio,
533 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
534 blksz), 0, blksz);
535 }
536 }
537 }
538 #endif /* HAVE_UIO_ZEROCOPY */
539
540 while (n > 0) {
541 nbytes = MIN(n, zfs_read_chunk_size -
542 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
543
544 if (zp->z_is_mapped && !(ioflag & O_DIRECT)) {
545 error = mappedread(ip, nbytes, uio);
546 } else {
547 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
548 uio, nbytes);
549 }
550
551 if (error) {
552 /* convert checksum errors into IO errors */
553 if (error == ECKSUM)
554 error = SET_ERROR(EIO);
555 break;
556 }
557
558 n -= nbytes;
559 }
560 out:
561 zfs_range_unlock(rl);
562
563 ZFS_EXIT(zfsvfs);
564 return (error);
565 }
566
567 /*
568 * Write the bytes to a file.
569 *
570 * IN: ip - inode of file to be written to.
571 * uio - structure supplying write location, range info,
572 * and data buffer.
573 * ioflag - FAPPEND flag set if in append mode.
574 * O_DIRECT flag; used to bypass page cache.
575 * cr - credentials of caller.
576 *
577 * OUT: uio - updated offset and range.
578 *
579 * RETURN: 0 if success
580 * error code if failure
581 *
582 * Timestamps:
583 * ip - ctime|mtime updated if byte count > 0
584 */
585
586 /* ARGSUSED */
587 int
588 zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
589 {
590 znode_t *zp = ITOZ(ip);
591 rlim64_t limit = uio->uio_limit;
592 ssize_t start_resid = uio->uio_resid;
593 ssize_t tx_bytes;
594 uint64_t end_size;
595 dmu_tx_t *tx;
596 zfsvfs_t *zfsvfs = ZTOZSB(zp);
597 zilog_t *zilog;
598 offset_t woff;
599 ssize_t n, nbytes;
600 rl_t *rl;
601 int max_blksz = zfsvfs->z_max_blksz;
602 int error = 0;
603 arc_buf_t *abuf;
604 const iovec_t *aiov = NULL;
605 xuio_t *xuio = NULL;
606 int write_eof;
607 int count = 0;
608 sa_bulk_attr_t bulk[4];
609 uint64_t mtime[2], ctime[2];
610 uint32_t uid;
611 #ifdef HAVE_UIO_ZEROCOPY
612 int i_iov = 0;
613 const iovec_t *iovp = uio->uio_iov;
614 ASSERTV(int iovcnt = uio->uio_iovcnt);
615 #endif
616
617 /*
618 * Fasttrack empty write
619 */
620 n = start_resid;
621 if (n == 0)
622 return (0);
623
624 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
625 limit = MAXOFFSET_T;
626
627 ZFS_ENTER(zfsvfs);
628 ZFS_VERIFY_ZP(zp);
629
630 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
631 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
632 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
633 &zp->z_size, 8);
634 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
635 &zp->z_pflags, 8);
636
637 /*
638 * Callers might not be able to detect properly that we are read-only,
639 * so check it explicitly here.
640 */
641 if (zfs_is_readonly(zfsvfs)) {
642 ZFS_EXIT(zfsvfs);
643 return (SET_ERROR(EROFS));
644 }
645
646 /*
647 * If immutable or not appending then return EPERM
648 */
649 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
650 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
651 (uio->uio_loffset < zp->z_size))) {
652 ZFS_EXIT(zfsvfs);
653 return (SET_ERROR(EPERM));
654 }
655
656 zilog = zfsvfs->z_log;
657
658 /*
659 * Validate file offset
660 */
661 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
662 if (woff < 0) {
663 ZFS_EXIT(zfsvfs);
664 return (SET_ERROR(EINVAL));
665 }
666
667 /*
668 * Pre-fault the pages to ensure slow (eg NFS) pages
669 * don't hold up txg.
670 * Skip this if uio contains loaned arc_buf.
671 */
672 #ifdef HAVE_UIO_ZEROCOPY
673 if ((uio->uio_extflg == UIO_XUIO) &&
674 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
675 xuio = (xuio_t *)uio;
676 else
677 #endif
678 uio_prefaultpages(MIN(n, max_blksz), uio);
679
680 /*
681 * If in append mode, set the io offset pointer to eof.
682 */
683 if (ioflag & FAPPEND) {
684 /*
685 * Obtain an appending range lock to guarantee file append
686 * semantics. We reset the write offset once we have the lock.
687 */
688 rl = zfs_range_lock(&zp->z_range_lock, 0, n, RL_APPEND);
689 woff = rl->r_off;
690 if (rl->r_len == UINT64_MAX) {
691 /*
692 * We overlocked the file because this write will cause
693 * the file block size to increase.
694 * Note that zp_size cannot change with this lock held.
695 */
696 woff = zp->z_size;
697 }
698 uio->uio_loffset = woff;
699 } else {
700 /*
701 * Note that if the file block size will change as a result of
702 * this write, then this range lock will lock the entire file
703 * so that we can re-write the block safely.
704 */
705 rl = zfs_range_lock(&zp->z_range_lock, woff, n, RL_WRITER);
706 }
707
708 if (woff >= limit) {
709 zfs_range_unlock(rl);
710 ZFS_EXIT(zfsvfs);
711 return (SET_ERROR(EFBIG));
712 }
713
714 if ((woff + n) > limit || woff > (limit - n))
715 n = limit - woff;
716
717 /* Will this write extend the file length? */
718 write_eof = (woff + n > zp->z_size);
719
720 end_size = MAX(zp->z_size, woff + n);
721
722 /*
723 * Write the file in reasonable size chunks. Each chunk is written
724 * in a separate transaction; this keeps the intent log records small
725 * and allows us to do more fine-grained space accounting.
726 */
727 while (n > 0) {
728 abuf = NULL;
729 woff = uio->uio_loffset;
730 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
731 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
732 if (abuf != NULL)
733 dmu_return_arcbuf(abuf);
734 error = SET_ERROR(EDQUOT);
735 break;
736 }
737
738 if (xuio && abuf == NULL) {
739 #ifdef HAVE_UIO_ZEROCOPY
740 ASSERT(i_iov < iovcnt);
741 ASSERT3U(uio->uio_segflg, !=, UIO_BVEC);
742 aiov = &iovp[i_iov];
743 abuf = dmu_xuio_arcbuf(xuio, i_iov);
744 dmu_xuio_clear(xuio, i_iov);
745 ASSERT((aiov->iov_base == abuf->b_data) ||
746 ((char *)aiov->iov_base - (char *)abuf->b_data +
747 aiov->iov_len == arc_buf_size(abuf)));
748 i_iov++;
749 #endif
750 } else if (abuf == NULL && n >= max_blksz &&
751 woff >= zp->z_size &&
752 P2PHASE(woff, max_blksz) == 0 &&
753 zp->z_blksz == max_blksz) {
754 /*
755 * This write covers a full block. "Borrow" a buffer
756 * from the dmu so that we can fill it before we enter
757 * a transaction. This avoids the possibility of
758 * holding up the transaction if the data copy hangs
759 * up on a pagefault (e.g., from an NFS server mapping).
760 */
761 size_t cbytes;
762
763 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
764 max_blksz);
765 ASSERT(abuf != NULL);
766 ASSERT(arc_buf_size(abuf) == max_blksz);
767 if ((error = uiocopy(abuf->b_data, max_blksz,
768 UIO_WRITE, uio, &cbytes))) {
769 dmu_return_arcbuf(abuf);
770 break;
771 }
772 ASSERT(cbytes == max_blksz);
773 }
774
775 /*
776 * Start a transaction.
777 */
778 tx = dmu_tx_create(zfsvfs->z_os);
779 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
780 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
781 zfs_sa_upgrade_txholds(tx, zp);
782 error = dmu_tx_assign(tx, TXG_WAIT);
783 if (error) {
784 dmu_tx_abort(tx);
785 if (abuf != NULL)
786 dmu_return_arcbuf(abuf);
787 break;
788 }
789
790 /*
791 * If zfs_range_lock() over-locked we grow the blocksize
792 * and then reduce the lock range. This will only happen
793 * on the first iteration since zfs_range_reduce() will
794 * shrink down r_len to the appropriate size.
795 */
796 if (rl->r_len == UINT64_MAX) {
797 uint64_t new_blksz;
798
799 if (zp->z_blksz > max_blksz) {
800 /*
801 * File's blocksize is already larger than the
802 * "recordsize" property. Only let it grow to
803 * the next power of 2.
804 */
805 ASSERT(!ISP2(zp->z_blksz));
806 new_blksz = MIN(end_size,
807 1 << highbit64(zp->z_blksz));
808 } else {
809 new_blksz = MIN(end_size, max_blksz);
810 }
811 zfs_grow_blocksize(zp, new_blksz, tx);
812 zfs_range_reduce(rl, woff, n);
813 }
814
815 /*
816 * XXX - should we really limit each write to z_max_blksz?
817 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
818 */
819 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
820
821 if (abuf == NULL) {
822 tx_bytes = uio->uio_resid;
823 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
824 uio, nbytes, tx);
825 tx_bytes -= uio->uio_resid;
826 } else {
827 tx_bytes = nbytes;
828 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
829 /*
830 * If this is not a full block write, but we are
831 * extending the file past EOF and this data starts
832 * block-aligned, use assign_arcbuf(). Otherwise,
833 * write via dmu_write().
834 */
835 if (tx_bytes < max_blksz && (!write_eof ||
836 aiov->iov_base != abuf->b_data)) {
837 ASSERT(xuio);
838 dmu_write(zfsvfs->z_os, zp->z_id, woff,
839 /* cppcheck-suppress nullPointer */
840 aiov->iov_len, aiov->iov_base, tx);
841 dmu_return_arcbuf(abuf);
842 xuio_stat_wbuf_copied();
843 } else {
844 ASSERT(xuio || tx_bytes == max_blksz);
845 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
846 woff, abuf, tx);
847 }
848 ASSERT(tx_bytes <= uio->uio_resid);
849 uioskip(uio, tx_bytes);
850 }
851 if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT)) {
852 update_pages(ip, woff,
853 tx_bytes, zfsvfs->z_os, zp->z_id);
854 }
855
856 /*
857 * If we made no progress, we're done. If we made even
858 * partial progress, update the znode and ZIL accordingly.
859 */
860 if (tx_bytes == 0) {
861 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
862 (void *)&zp->z_size, sizeof (uint64_t), tx);
863 dmu_tx_commit(tx);
864 ASSERT(error != 0);
865 break;
866 }
867
868 /*
869 * Clear Set-UID/Set-GID bits on successful write if not
870 * privileged and at least one of the execute bits is set.
871 *
872 * It would be nice to to this after all writes have
873 * been done, but that would still expose the ISUID/ISGID
874 * to another app after the partial write is committed.
875 *
876 * Note: we don't call zfs_fuid_map_id() here because
877 * user 0 is not an ephemeral uid.
878 */
879 mutex_enter(&zp->z_acl_lock);
880 uid = KUID_TO_SUID(ip->i_uid);
881 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
882 (S_IXUSR >> 6))) != 0 &&
883 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
884 secpolicy_vnode_setid_retain(cr,
885 ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
886 uint64_t newmode;
887 zp->z_mode &= ~(S_ISUID | S_ISGID);
888 ip->i_mode = newmode = zp->z_mode;
889 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
890 (void *)&newmode, sizeof (uint64_t), tx);
891 }
892 mutex_exit(&zp->z_acl_lock);
893
894 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
895
896 /*
897 * Update the file size (zp_size) if it has changed;
898 * account for possible concurrent updates.
899 */
900 while ((end_size = zp->z_size) < uio->uio_loffset) {
901 (void) atomic_cas_64(&zp->z_size, end_size,
902 uio->uio_loffset);
903 ASSERT(error == 0);
904 }
905 /*
906 * If we are replaying and eof is non zero then force
907 * the file size to the specified eof. Note, there's no
908 * concurrency during replay.
909 */
910 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
911 zp->z_size = zfsvfs->z_replay_eof;
912
913 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
914
915 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
916 NULL, NULL);
917 dmu_tx_commit(tx);
918
919 if (error != 0)
920 break;
921 ASSERT(tx_bytes == nbytes);
922 n -= nbytes;
923
924 if (!xuio && n > 0)
925 uio_prefaultpages(MIN(n, max_blksz), uio);
926 }
927
928 zfs_inode_update(zp);
929 zfs_range_unlock(rl);
930
931 /*
932 * If we're in replay mode, or we made no progress, return error.
933 * Otherwise, it's at least a partial write, so it's successful.
934 */
935 if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
936 ZFS_EXIT(zfsvfs);
937 return (error);
938 }
939
940 if (ioflag & (FSYNC | FDSYNC) ||
941 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
942 zil_commit(zilog, zp->z_id);
943
944 ZFS_EXIT(zfsvfs);
945 return (0);
946 }
947
948 /*
949 * Drop a reference on the passed inode asynchronously. This ensures
950 * that the caller will never drop the last reference on an inode in
951 * the current context. Doing so while holding open a tx could result
952 * in a deadlock if iput_final() re-enters the filesystem code.
953 */
954 void
955 zfs_iput_async(struct inode *ip)
956 {
957 objset_t *os = ITOZSB(ip)->z_os;
958
959 ASSERT(atomic_read(&ip->i_count) > 0);
960 ASSERT(os != NULL);
961
962 if (atomic_read(&ip->i_count) == 1)
963 VERIFY(taskq_dispatch(dsl_pool_iput_taskq(dmu_objset_pool(os)),
964 (task_func_t *)iput, ip, TQ_SLEEP) != TASKQID_INVALID);
965 else
966 iput(ip);
967 }
968
969 void
970 zfs_get_done(zgd_t *zgd, int error)
971 {
972 znode_t *zp = zgd->zgd_private;
973
974 if (zgd->zgd_db)
975 dmu_buf_rele(zgd->zgd_db, zgd);
976
977 zfs_range_unlock(zgd->zgd_rl);
978
979 /*
980 * Release the vnode asynchronously as we currently have the
981 * txg stopped from syncing.
982 */
983 zfs_iput_async(ZTOI(zp));
984
985 if (error == 0 && zgd->zgd_bp)
986 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
987
988 kmem_free(zgd, sizeof (zgd_t));
989 }
990
991 #ifdef DEBUG
992 static int zil_fault_io = 0;
993 #endif
994
995 /*
996 * Get data to generate a TX_WRITE intent log record.
997 */
998 int
999 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1000 {
1001 zfsvfs_t *zfsvfs = arg;
1002 objset_t *os = zfsvfs->z_os;
1003 znode_t *zp;
1004 uint64_t object = lr->lr_foid;
1005 uint64_t offset = lr->lr_offset;
1006 uint64_t size = lr->lr_length;
1007 dmu_buf_t *db;
1008 zgd_t *zgd;
1009 int error = 0;
1010
1011 ASSERT(zio != NULL);
1012 ASSERT(size != 0);
1013
1014 /*
1015 * Nothing to do if the file has been removed
1016 */
1017 if (zfs_zget(zfsvfs, object, &zp) != 0)
1018 return (SET_ERROR(ENOENT));
1019 if (zp->z_unlinked) {
1020 /*
1021 * Release the vnode asynchronously as we currently have the
1022 * txg stopped from syncing.
1023 */
1024 zfs_iput_async(ZTOI(zp));
1025 return (SET_ERROR(ENOENT));
1026 }
1027
1028 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1029 zgd->zgd_zilog = zfsvfs->z_log;
1030 zgd->zgd_private = zp;
1031
1032 /*
1033 * Write records come in two flavors: immediate and indirect.
1034 * For small writes it's cheaper to store the data with the
1035 * log record (immediate); for large writes it's cheaper to
1036 * sync the data and get a pointer to it (indirect) so that
1037 * we don't have to write the data twice.
1038 */
1039 if (buf != NULL) { /* immediate write */
1040 zgd->zgd_rl = zfs_range_lock(&zp->z_range_lock, offset, size,
1041 RL_READER);
1042 /* test for truncation needs to be done while range locked */
1043 if (offset >= zp->z_size) {
1044 error = SET_ERROR(ENOENT);
1045 } else {
1046 error = dmu_read(os, object, offset, size, buf,
1047 DMU_READ_NO_PREFETCH);
1048 }
1049 ASSERT(error == 0 || error == ENOENT);
1050 } else { /* indirect write */
1051 /*
1052 * Have to lock the whole block to ensure when it's
1053 * written out and its checksum is being calculated
1054 * that no one can change the data. We need to re-check
1055 * blocksize after we get the lock in case it's changed!
1056 */
1057 for (;;) {
1058 uint64_t blkoff;
1059 size = zp->z_blksz;
1060 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1061 offset -= blkoff;
1062 zgd->zgd_rl = zfs_range_lock(&zp->z_range_lock, offset,
1063 size, RL_READER);
1064 if (zp->z_blksz == size)
1065 break;
1066 offset += blkoff;
1067 zfs_range_unlock(zgd->zgd_rl);
1068 }
1069 /* test for truncation needs to be done while range locked */
1070 if (lr->lr_offset >= zp->z_size)
1071 error = SET_ERROR(ENOENT);
1072 #ifdef DEBUG
1073 if (zil_fault_io) {
1074 error = SET_ERROR(EIO);
1075 zil_fault_io = 0;
1076 }
1077 #endif
1078 if (error == 0)
1079 error = dmu_buf_hold(os, object, offset, zgd, &db,
1080 DMU_READ_NO_PREFETCH);
1081
1082 if (error == 0) {
1083 blkptr_t *bp = &lr->lr_blkptr;
1084
1085 zgd->zgd_db = db;
1086 zgd->zgd_bp = bp;
1087
1088 ASSERT(db->db_offset == offset);
1089 ASSERT(db->db_size == size);
1090
1091 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1092 zfs_get_done, zgd);
1093 ASSERT(error || lr->lr_length <= size);
1094
1095 /*
1096 * On success, we need to wait for the write I/O
1097 * initiated by dmu_sync() to complete before we can
1098 * release this dbuf. We will finish everything up
1099 * in the zfs_get_done() callback.
1100 */
1101 if (error == 0)
1102 return (0);
1103
1104 if (error == EALREADY) {
1105 lr->lr_common.lrc_txtype = TX_WRITE2;
1106 error = 0;
1107 }
1108 }
1109 }
1110
1111 zfs_get_done(zgd, error);
1112
1113 return (error);
1114 }
1115
1116 /*ARGSUSED*/
1117 int
1118 zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
1119 {
1120 znode_t *zp = ITOZ(ip);
1121 zfsvfs_t *zfsvfs = ITOZSB(ip);
1122 int error;
1123
1124 ZFS_ENTER(zfsvfs);
1125 ZFS_VERIFY_ZP(zp);
1126
1127 if (flag & V_ACE_MASK)
1128 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1129 else
1130 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1131
1132 ZFS_EXIT(zfsvfs);
1133 return (error);
1134 }
1135
1136 /*
1137 * Lookup an entry in a directory, or an extended attribute directory.
1138 * If it exists, return a held inode reference for it.
1139 *
1140 * IN: dip - inode of directory to search.
1141 * nm - name of entry to lookup.
1142 * flags - LOOKUP_XATTR set if looking for an attribute.
1143 * cr - credentials of caller.
1144 * direntflags - directory lookup flags
1145 * realpnp - returned pathname.
1146 *
1147 * OUT: ipp - inode of located entry, NULL if not found.
1148 *
1149 * RETURN: 0 on success, error code on failure.
1150 *
1151 * Timestamps:
1152 * NA
1153 */
1154 /* ARGSUSED */
1155 int
1156 zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
1157 cred_t *cr, int *direntflags, pathname_t *realpnp)
1158 {
1159 znode_t *zdp = ITOZ(dip);
1160 zfsvfs_t *zfsvfs = ITOZSB(dip);
1161 int error = 0;
1162
1163 /*
1164 * Fast path lookup, however we must skip DNLC lookup
1165 * for case folding or normalizing lookups because the
1166 * DNLC code only stores the passed in name. This means
1167 * creating 'a' and removing 'A' on a case insensitive
1168 * file system would work, but DNLC still thinks 'a'
1169 * exists and won't let you create it again on the next
1170 * pass through fast path.
1171 */
1172 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1173
1174 if (!S_ISDIR(dip->i_mode)) {
1175 return (SET_ERROR(ENOTDIR));
1176 } else if (zdp->z_sa_hdl == NULL) {
1177 return (SET_ERROR(EIO));
1178 }
1179
1180 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1181 error = zfs_fastaccesschk_execute(zdp, cr);
1182 if (!error) {
1183 *ipp = dip;
1184 igrab(*ipp);
1185 return (0);
1186 }
1187 return (error);
1188 #ifdef HAVE_DNLC
1189 } else if (!zdp->z_zfsvfs->z_norm &&
1190 (zdp->z_zfsvfs->z_case == ZFS_CASE_SENSITIVE)) {
1191
1192 vnode_t *tvp = dnlc_lookup(dvp, nm);
1193
1194 if (tvp) {
1195 error = zfs_fastaccesschk_execute(zdp, cr);
1196 if (error) {
1197 iput(tvp);
1198 return (error);
1199 }
1200 if (tvp == DNLC_NO_VNODE) {
1201 iput(tvp);
1202 return (SET_ERROR(ENOENT));
1203 } else {
1204 *vpp = tvp;
1205 return (specvp_check(vpp, cr));
1206 }
1207 }
1208 #endif /* HAVE_DNLC */
1209 }
1210 }
1211
1212 ZFS_ENTER(zfsvfs);
1213 ZFS_VERIFY_ZP(zdp);
1214
1215 *ipp = NULL;
1216
1217 if (flags & LOOKUP_XATTR) {
1218 /*
1219 * We don't allow recursive attributes..
1220 * Maybe someday we will.
1221 */
1222 if (zdp->z_pflags & ZFS_XATTR) {
1223 ZFS_EXIT(zfsvfs);
1224 return (SET_ERROR(EINVAL));
1225 }
1226
1227 if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
1228 ZFS_EXIT(zfsvfs);
1229 return (error);
1230 }
1231
1232 /*
1233 * Do we have permission to get into attribute directory?
1234 */
1235
1236 if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0,
1237 B_FALSE, cr))) {
1238 iput(*ipp);
1239 *ipp = NULL;
1240 }
1241
1242 ZFS_EXIT(zfsvfs);
1243 return (error);
1244 }
1245
1246 if (!S_ISDIR(dip->i_mode)) {
1247 ZFS_EXIT(zfsvfs);
1248 return (SET_ERROR(ENOTDIR));
1249 }
1250
1251 /*
1252 * Check accessibility of directory.
1253 */
1254
1255 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
1256 ZFS_EXIT(zfsvfs);
1257 return (error);
1258 }
1259
1260 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1261 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1262 ZFS_EXIT(zfsvfs);
1263 return (SET_ERROR(EILSEQ));
1264 }
1265
1266 error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
1267 if ((error == 0) && (*ipp))
1268 zfs_inode_update(ITOZ(*ipp));
1269
1270 ZFS_EXIT(zfsvfs);
1271 return (error);
1272 }
1273
1274 /*
1275 * Attempt to create a new entry in a directory. If the entry
1276 * already exists, truncate the file if permissible, else return
1277 * an error. Return the ip of the created or trunc'd file.
1278 *
1279 * IN: dip - inode of directory to put new file entry in.
1280 * name - name of new file entry.
1281 * vap - attributes of new file.
1282 * excl - flag indicating exclusive or non-exclusive mode.
1283 * mode - mode to open file with.
1284 * cr - credentials of caller.
1285 * flag - large file flag [UNUSED].
1286 * vsecp - ACL to be set
1287 *
1288 * OUT: ipp - inode of created or trunc'd entry.
1289 *
1290 * RETURN: 0 on success, error code on failure.
1291 *
1292 * Timestamps:
1293 * dip - ctime|mtime updated if new entry created
1294 * ip - ctime|mtime always, atime if new
1295 */
1296
1297 /* ARGSUSED */
1298 int
1299 zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
1300 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1301 {
1302 znode_t *zp, *dzp = ITOZ(dip);
1303 zfsvfs_t *zfsvfs = ITOZSB(dip);
1304 zilog_t *zilog;
1305 objset_t *os;
1306 zfs_dirlock_t *dl;
1307 dmu_tx_t *tx;
1308 int error;
1309 uid_t uid;
1310 gid_t gid;
1311 zfs_acl_ids_t acl_ids;
1312 boolean_t fuid_dirtied;
1313 boolean_t have_acl = B_FALSE;
1314 boolean_t waited = B_FALSE;
1315
1316 /*
1317 * If we have an ephemeral id, ACL, or XVATTR then
1318 * make sure file system is at proper version
1319 */
1320
1321 gid = crgetgid(cr);
1322 uid = crgetuid(cr);
1323
1324 if (zfsvfs->z_use_fuids == B_FALSE &&
1325 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1326 return (SET_ERROR(EINVAL));
1327
1328 if (name == NULL)
1329 return (SET_ERROR(EINVAL));
1330
1331 ZFS_ENTER(zfsvfs);
1332 ZFS_VERIFY_ZP(dzp);
1333 os = zfsvfs->z_os;
1334 zilog = zfsvfs->z_log;
1335
1336 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1337 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1338 ZFS_EXIT(zfsvfs);
1339 return (SET_ERROR(EILSEQ));
1340 }
1341
1342 if (vap->va_mask & ATTR_XVATTR) {
1343 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1344 crgetuid(cr), cr, vap->va_mode)) != 0) {
1345 ZFS_EXIT(zfsvfs);
1346 return (error);
1347 }
1348 }
1349
1350 top:
1351 *ipp = NULL;
1352 if (*name == '\0') {
1353 /*
1354 * Null component name refers to the directory itself.
1355 */
1356 igrab(dip);
1357 zp = dzp;
1358 dl = NULL;
1359 error = 0;
1360 } else {
1361 /* possible igrab(zp) */
1362 int zflg = 0;
1363
1364 if (flag & FIGNORECASE)
1365 zflg |= ZCILOOK;
1366
1367 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1368 NULL, NULL);
1369 if (error) {
1370 if (have_acl)
1371 zfs_acl_ids_free(&acl_ids);
1372 if (strcmp(name, "..") == 0)
1373 error = SET_ERROR(EISDIR);
1374 ZFS_EXIT(zfsvfs);
1375 return (error);
1376 }
1377 }
1378
1379 if (zp == NULL) {
1380 uint64_t txtype;
1381
1382 /*
1383 * Create a new file object and update the directory
1384 * to reference it.
1385 */
1386 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1387 if (have_acl)
1388 zfs_acl_ids_free(&acl_ids);
1389 goto out;
1390 }
1391
1392 /*
1393 * We only support the creation of regular files in
1394 * extended attribute directories.
1395 */
1396
1397 if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
1398 if (have_acl)
1399 zfs_acl_ids_free(&acl_ids);
1400 error = SET_ERROR(EINVAL);
1401 goto out;
1402 }
1403
1404 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1405 cr, vsecp, &acl_ids)) != 0)
1406 goto out;
1407 have_acl = B_TRUE;
1408
1409 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1410 zfs_acl_ids_free(&acl_ids);
1411 error = SET_ERROR(EDQUOT);
1412 goto out;
1413 }
1414
1415 tx = dmu_tx_create(os);
1416
1417 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1418 ZFS_SA_BASE_ATTR_SIZE);
1419
1420 fuid_dirtied = zfsvfs->z_fuid_dirty;
1421 if (fuid_dirtied)
1422 zfs_fuid_txhold(zfsvfs, tx);
1423 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1424 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1425 if (!zfsvfs->z_use_sa &&
1426 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1427 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1428 0, acl_ids.z_aclp->z_acl_bytes);
1429 }
1430
1431 error = dmu_tx_assign(tx,
1432 (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1433 if (error) {
1434 zfs_dirent_unlock(dl);
1435 if (error == ERESTART) {
1436 waited = B_TRUE;
1437 dmu_tx_wait(tx);
1438 dmu_tx_abort(tx);
1439 goto top;
1440 }
1441 zfs_acl_ids_free(&acl_ids);
1442 dmu_tx_abort(tx);
1443 ZFS_EXIT(zfsvfs);
1444 return (error);
1445 }
1446 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1447
1448 error = zfs_link_create(dl, zp, tx, ZNEW);
1449 if (error != 0) {
1450 /*
1451 * Since, we failed to add the directory entry for it,
1452 * delete the newly created dnode.
1453 */
1454 zfs_znode_delete(zp, tx);
1455 remove_inode_hash(ZTOI(zp));
1456 zfs_acl_ids_free(&acl_ids);
1457 dmu_tx_commit(tx);
1458 goto out;
1459 }
1460
1461 if (fuid_dirtied)
1462 zfs_fuid_sync(zfsvfs, tx);
1463
1464 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1465 if (flag & FIGNORECASE)
1466 txtype |= TX_CI;
1467 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1468 vsecp, acl_ids.z_fuidp, vap);
1469 zfs_acl_ids_free(&acl_ids);
1470 dmu_tx_commit(tx);
1471 } else {
1472 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1473
1474 if (have_acl)
1475 zfs_acl_ids_free(&acl_ids);
1476 have_acl = B_FALSE;
1477
1478 /*
1479 * A directory entry already exists for this name.
1480 */
1481 /*
1482 * Can't truncate an existing file if in exclusive mode.
1483 */
1484 if (excl) {
1485 error = SET_ERROR(EEXIST);
1486 goto out;
1487 }
1488 /*
1489 * Can't open a directory for writing.
1490 */
1491 if (S_ISDIR(ZTOI(zp)->i_mode)) {
1492 error = SET_ERROR(EISDIR);
1493 goto out;
1494 }
1495 /*
1496 * Verify requested access to file.
1497 */
1498 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1499 goto out;
1500 }
1501
1502 mutex_enter(&dzp->z_lock);
1503 dzp->z_seq++;
1504 mutex_exit(&dzp->z_lock);
1505
1506 /*
1507 * Truncate regular files if requested.
1508 */
1509 if (S_ISREG(ZTOI(zp)->i_mode) &&
1510 (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
1511 /* we can't hold any locks when calling zfs_freesp() */
1512 if (dl) {
1513 zfs_dirent_unlock(dl);
1514 dl = NULL;
1515 }
1516 error = zfs_freesp(zp, 0, 0, mode, TRUE);
1517 }
1518 }
1519 out:
1520
1521 if (dl)
1522 zfs_dirent_unlock(dl);
1523
1524 if (error) {
1525 if (zp)
1526 iput(ZTOI(zp));
1527 } else {
1528 zfs_inode_update(dzp);
1529 zfs_inode_update(zp);
1530 *ipp = ZTOI(zp);
1531 }
1532
1533 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1534 zil_commit(zilog, 0);
1535
1536 ZFS_EXIT(zfsvfs);
1537 return (error);
1538 }
1539
1540 /* ARGSUSED */
1541 int
1542 zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
1543 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1544 {
1545 znode_t *zp = NULL, *dzp = ITOZ(dip);
1546 zfsvfs_t *zfsvfs = ITOZSB(dip);
1547 objset_t *os;
1548 dmu_tx_t *tx;
1549 int error;
1550 uid_t uid;
1551 gid_t gid;
1552 zfs_acl_ids_t acl_ids;
1553 boolean_t fuid_dirtied;
1554 boolean_t have_acl = B_FALSE;
1555 boolean_t waited = B_FALSE;
1556
1557 /*
1558 * If we have an ephemeral id, ACL, or XVATTR then
1559 * make sure file system is at proper version
1560 */
1561
1562 gid = crgetgid(cr);
1563 uid = crgetuid(cr);
1564
1565 if (zfsvfs->z_use_fuids == B_FALSE &&
1566 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1567 return (SET_ERROR(EINVAL));
1568
1569 ZFS_ENTER(zfsvfs);
1570 ZFS_VERIFY_ZP(dzp);
1571 os = zfsvfs->z_os;
1572
1573 if (vap->va_mask & ATTR_XVATTR) {
1574 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1575 crgetuid(cr), cr, vap->va_mode)) != 0) {
1576 ZFS_EXIT(zfsvfs);
1577 return (error);
1578 }
1579 }
1580
1581 top:
1582 *ipp = NULL;
1583
1584 /*
1585 * Create a new file object and update the directory
1586 * to reference it.
1587 */
1588 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1589 if (have_acl)
1590 zfs_acl_ids_free(&acl_ids);
1591 goto out;
1592 }
1593
1594 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1595 cr, vsecp, &acl_ids)) != 0)
1596 goto out;
1597 have_acl = B_TRUE;
1598
1599 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1600 zfs_acl_ids_free(&acl_ids);
1601 error = SET_ERROR(EDQUOT);
1602 goto out;
1603 }
1604
1605 tx = dmu_tx_create(os);
1606
1607 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1608 ZFS_SA_BASE_ATTR_SIZE);
1609 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1610
1611 fuid_dirtied = zfsvfs->z_fuid_dirty;
1612 if (fuid_dirtied)
1613 zfs_fuid_txhold(zfsvfs, tx);
1614 if (!zfsvfs->z_use_sa &&
1615 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1616 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1617 0, acl_ids.z_aclp->z_acl_bytes);
1618 }
1619 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1620 if (error) {
1621 if (error == ERESTART) {
1622 waited = B_TRUE;
1623 dmu_tx_wait(tx);
1624 dmu_tx_abort(tx);
1625 goto top;
1626 }
1627 zfs_acl_ids_free(&acl_ids);
1628 dmu_tx_abort(tx);
1629 ZFS_EXIT(zfsvfs);
1630 return (error);
1631 }
1632 zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids);
1633
1634 if (fuid_dirtied)
1635 zfs_fuid_sync(zfsvfs, tx);
1636
1637 /* Add to unlinked set */
1638 zp->z_unlinked = 1;
1639 zfs_unlinked_add(zp, tx);
1640 zfs_acl_ids_free(&acl_ids);
1641 dmu_tx_commit(tx);
1642 out:
1643
1644 if (error) {
1645 if (zp)
1646 iput(ZTOI(zp));
1647 } else {
1648 zfs_inode_update(dzp);
1649 zfs_inode_update(zp);
1650 *ipp = ZTOI(zp);
1651 }
1652
1653 ZFS_EXIT(zfsvfs);
1654 return (error);
1655 }
1656
1657 /*
1658 * Remove an entry from a directory.
1659 *
1660 * IN: dip - inode of directory to remove entry from.
1661 * name - name of entry to remove.
1662 * cr - credentials of caller.
1663 *
1664 * RETURN: 0 if success
1665 * error code if failure
1666 *
1667 * Timestamps:
1668 * dip - ctime|mtime
1669 * ip - ctime (if nlink > 0)
1670 */
1671
1672 uint64_t null_xattr = 0;
1673
1674 /*ARGSUSED*/
1675 int
1676 zfs_remove(struct inode *dip, char *name, cred_t *cr, int flags)
1677 {
1678 znode_t *zp, *dzp = ITOZ(dip);
1679 znode_t *xzp;
1680 struct inode *ip;
1681 zfsvfs_t *zfsvfs = ITOZSB(dip);
1682 zilog_t *zilog;
1683 uint64_t acl_obj, xattr_obj;
1684 uint64_t xattr_obj_unlinked = 0;
1685 uint64_t obj = 0;
1686 uint64_t links;
1687 zfs_dirlock_t *dl;
1688 dmu_tx_t *tx;
1689 boolean_t may_delete_now, delete_now = FALSE;
1690 boolean_t unlinked, toobig = FALSE;
1691 uint64_t txtype;
1692 pathname_t *realnmp = NULL;
1693 pathname_t realnm;
1694 int error;
1695 int zflg = ZEXISTS;
1696 boolean_t waited = B_FALSE;
1697
1698 if (name == NULL)
1699 return (SET_ERROR(EINVAL));
1700
1701 ZFS_ENTER(zfsvfs);
1702 ZFS_VERIFY_ZP(dzp);
1703 zilog = zfsvfs->z_log;
1704
1705 if (flags & FIGNORECASE) {
1706 zflg |= ZCILOOK;
1707 pn_alloc(&realnm);
1708 realnmp = &realnm;
1709 }
1710
1711 top:
1712 xattr_obj = 0;
1713 xzp = NULL;
1714 /*
1715 * Attempt to lock directory; fail if entry doesn't exist.
1716 */
1717 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1718 NULL, realnmp))) {
1719 if (realnmp)
1720 pn_free(realnmp);
1721 ZFS_EXIT(zfsvfs);
1722 return (error);
1723 }
1724
1725 ip = ZTOI(zp);
1726
1727 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1728 goto out;
1729 }
1730
1731 /*
1732 * Need to use rmdir for removing directories.
1733 */
1734 if (S_ISDIR(ip->i_mode)) {
1735 error = SET_ERROR(EPERM);
1736 goto out;
1737 }
1738
1739 #ifdef HAVE_DNLC
1740 if (realnmp)
1741 dnlc_remove(dvp, realnmp->pn_buf);
1742 else
1743 dnlc_remove(dvp, name);
1744 #endif /* HAVE_DNLC */
1745
1746 mutex_enter(&zp->z_lock);
1747 may_delete_now = atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped);
1748 mutex_exit(&zp->z_lock);
1749
1750 /*
1751 * We may delete the znode now, or we may put it in the unlinked set;
1752 * it depends on whether we're the last link, and on whether there are
1753 * other holds on the inode. So we dmu_tx_hold() the right things to
1754 * allow for either case.
1755 */
1756 obj = zp->z_id;
1757 tx = dmu_tx_create(zfsvfs->z_os);
1758 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1759 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1760 zfs_sa_upgrade_txholds(tx, zp);
1761 zfs_sa_upgrade_txholds(tx, dzp);
1762 if (may_delete_now) {
1763 toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks;
1764 /* if the file is too big, only hold_free a token amount */
1765 dmu_tx_hold_free(tx, zp->z_id, 0,
1766 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1767 }
1768
1769 /* are there any extended attributes? */
1770 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1771 &xattr_obj, sizeof (xattr_obj));
1772 if (error == 0 && xattr_obj) {
1773 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1774 ASSERT0(error);
1775 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1776 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1777 }
1778
1779 mutex_enter(&zp->z_lock);
1780 if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1781 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1782 mutex_exit(&zp->z_lock);
1783
1784 /* charge as an update -- would be nice not to charge at all */
1785 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1786
1787 /*
1788 * Mark this transaction as typically resulting in a net free of space
1789 */
1790 dmu_tx_mark_netfree(tx);
1791
1792 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1793 if (error) {
1794 zfs_dirent_unlock(dl);
1795 if (error == ERESTART) {
1796 waited = B_TRUE;
1797 dmu_tx_wait(tx);
1798 dmu_tx_abort(tx);
1799 iput(ip);
1800 if (xzp)
1801 iput(ZTOI(xzp));
1802 goto top;
1803 }
1804 if (realnmp)
1805 pn_free(realnmp);
1806 dmu_tx_abort(tx);
1807 iput(ip);
1808 if (xzp)
1809 iput(ZTOI(xzp));
1810 ZFS_EXIT(zfsvfs);
1811 return (error);
1812 }
1813
1814 /*
1815 * Remove the directory entry.
1816 */
1817 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1818
1819 if (error) {
1820 dmu_tx_commit(tx);
1821 goto out;
1822 }
1823
1824 if (unlinked) {
1825 /*
1826 * Hold z_lock so that we can make sure that the ACL obj
1827 * hasn't changed. Could have been deleted due to
1828 * zfs_sa_upgrade().
1829 */
1830 mutex_enter(&zp->z_lock);
1831 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1832 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1833 delete_now = may_delete_now && !toobig &&
1834 atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped) &&
1835 xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
1836 acl_obj;
1837 }
1838
1839 if (delete_now) {
1840 if (xattr_obj_unlinked) {
1841 ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2);
1842 mutex_enter(&xzp->z_lock);
1843 xzp->z_unlinked = 1;
1844 clear_nlink(ZTOI(xzp));
1845 links = 0;
1846 error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
1847 &links, sizeof (links), tx);
1848 ASSERT3U(error, ==, 0);
1849 mutex_exit(&xzp->z_lock);
1850 zfs_unlinked_add(xzp, tx);
1851
1852 if (zp->z_is_sa)
1853 error = sa_remove(zp->z_sa_hdl,
1854 SA_ZPL_XATTR(zfsvfs), tx);
1855 else
1856 error = sa_update(zp->z_sa_hdl,
1857 SA_ZPL_XATTR(zfsvfs), &null_xattr,
1858 sizeof (uint64_t), tx);
1859 ASSERT0(error);
1860 }
1861 /*
1862 * Add to the unlinked set because a new reference could be
1863 * taken concurrently resulting in a deferred destruction.
1864 */
1865 zfs_unlinked_add(zp, tx);
1866 mutex_exit(&zp->z_lock);
1867 } else if (unlinked) {
1868 mutex_exit(&zp->z_lock);
1869 zfs_unlinked_add(zp, tx);
1870 }
1871
1872 txtype = TX_REMOVE;
1873 if (flags & FIGNORECASE)
1874 txtype |= TX_CI;
1875 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1876
1877 dmu_tx_commit(tx);
1878 out:
1879 if (realnmp)
1880 pn_free(realnmp);
1881
1882 zfs_dirent_unlock(dl);
1883 zfs_inode_update(dzp);
1884 zfs_inode_update(zp);
1885
1886 if (delete_now)
1887 iput(ip);
1888 else
1889 zfs_iput_async(ip);
1890
1891 if (xzp) {
1892 zfs_inode_update(xzp);
1893 zfs_iput_async(ZTOI(xzp));
1894 }
1895
1896 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1897 zil_commit(zilog, 0);
1898
1899 ZFS_EXIT(zfsvfs);
1900 return (error);
1901 }
1902
1903 /*
1904 * Create a new directory and insert it into dip using the name
1905 * provided. Return a pointer to the inserted directory.
1906 *
1907 * IN: dip - inode of directory to add subdir to.
1908 * dirname - name of new directory.
1909 * vap - attributes of new directory.
1910 * cr - credentials of caller.
1911 * vsecp - ACL to be set
1912 *
1913 * OUT: ipp - inode of created directory.
1914 *
1915 * RETURN: 0 if success
1916 * error code if failure
1917 *
1918 * Timestamps:
1919 * dip - ctime|mtime updated
1920 * ipp - ctime|mtime|atime updated
1921 */
1922 /*ARGSUSED*/
1923 int
1924 zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
1925 cred_t *cr, int flags, vsecattr_t *vsecp)
1926 {
1927 znode_t *zp, *dzp = ITOZ(dip);
1928 zfsvfs_t *zfsvfs = ITOZSB(dip);
1929 zilog_t *zilog;
1930 zfs_dirlock_t *dl;
1931 uint64_t txtype;
1932 dmu_tx_t *tx;
1933 int error;
1934 int zf = ZNEW;
1935 uid_t uid;
1936 gid_t gid = crgetgid(cr);
1937 zfs_acl_ids_t acl_ids;
1938 boolean_t fuid_dirtied;
1939 boolean_t waited = B_FALSE;
1940
1941 ASSERT(S_ISDIR(vap->va_mode));
1942
1943 /*
1944 * If we have an ephemeral id, ACL, or XVATTR then
1945 * make sure file system is at proper version
1946 */
1947
1948 uid = crgetuid(cr);
1949 if (zfsvfs->z_use_fuids == B_FALSE &&
1950 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1951 return (SET_ERROR(EINVAL));
1952
1953 if (dirname == NULL)
1954 return (SET_ERROR(EINVAL));
1955
1956 ZFS_ENTER(zfsvfs);
1957 ZFS_VERIFY_ZP(dzp);
1958 zilog = zfsvfs->z_log;
1959
1960 if (dzp->z_pflags & ZFS_XATTR) {
1961 ZFS_EXIT(zfsvfs);
1962 return (SET_ERROR(EINVAL));
1963 }
1964
1965 if (zfsvfs->z_utf8 && u8_validate(dirname,
1966 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1967 ZFS_EXIT(zfsvfs);
1968 return (SET_ERROR(EILSEQ));
1969 }
1970 if (flags & FIGNORECASE)
1971 zf |= ZCILOOK;
1972
1973 if (vap->va_mask & ATTR_XVATTR) {
1974 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1975 crgetuid(cr), cr, vap->va_mode)) != 0) {
1976 ZFS_EXIT(zfsvfs);
1977 return (error);
1978 }
1979 }
1980
1981 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1982 vsecp, &acl_ids)) != 0) {
1983 ZFS_EXIT(zfsvfs);
1984 return (error);
1985 }
1986 /*
1987 * First make sure the new directory doesn't exist.
1988 *
1989 * Existence is checked first to make sure we don't return
1990 * EACCES instead of EEXIST which can cause some applications
1991 * to fail.
1992 */
1993 top:
1994 *ipp = NULL;
1995
1996 if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1997 NULL, NULL))) {
1998 zfs_acl_ids_free(&acl_ids);
1999 ZFS_EXIT(zfsvfs);
2000 return (error);
2001 }
2002
2003 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
2004 zfs_acl_ids_free(&acl_ids);
2005 zfs_dirent_unlock(dl);
2006 ZFS_EXIT(zfsvfs);
2007 return (error);
2008 }
2009
2010 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
2011 zfs_acl_ids_free(&acl_ids);
2012 zfs_dirent_unlock(dl);
2013 ZFS_EXIT(zfsvfs);
2014 return (SET_ERROR(EDQUOT));
2015 }
2016
2017 /*
2018 * Add a new entry to the directory.
2019 */
2020 tx = dmu_tx_create(zfsvfs->z_os);
2021 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
2022 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
2023 fuid_dirtied = zfsvfs->z_fuid_dirty;
2024 if (fuid_dirtied)
2025 zfs_fuid_txhold(zfsvfs, tx);
2026 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2027 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
2028 acl_ids.z_aclp->z_acl_bytes);
2029 }
2030
2031 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
2032 ZFS_SA_BASE_ATTR_SIZE);
2033
2034 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
2035 if (error) {
2036 zfs_dirent_unlock(dl);
2037 if (error == ERESTART) {
2038 waited = B_TRUE;
2039 dmu_tx_wait(tx);
2040 dmu_tx_abort(tx);
2041 goto top;
2042 }
2043 zfs_acl_ids_free(&acl_ids);
2044 dmu_tx_abort(tx);
2045 ZFS_EXIT(zfsvfs);
2046 return (error);
2047 }
2048
2049 /*
2050 * Create new node.
2051 */
2052 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
2053
2054 /*
2055 * Now put new name in parent dir.
2056 */
2057 error = zfs_link_create(dl, zp, tx, ZNEW);
2058 if (error != 0) {
2059 zfs_znode_delete(zp, tx);
2060 remove_inode_hash(ZTOI(zp));
2061 goto out;
2062 }
2063
2064 if (fuid_dirtied)
2065 zfs_fuid_sync(zfsvfs, tx);
2066
2067 *ipp = ZTOI(zp);
2068
2069 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
2070 if (flags & FIGNORECASE)
2071 txtype |= TX_CI;
2072 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
2073 acl_ids.z_fuidp, vap);
2074
2075 out:
2076 zfs_acl_ids_free(&acl_ids);
2077
2078 dmu_tx_commit(tx);
2079
2080 zfs_dirent_unlock(dl);
2081
2082 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2083 zil_commit(zilog, 0);
2084
2085 if (error != 0) {
2086 iput(ZTOI(zp));
2087 } else {
2088 zfs_inode_update(dzp);
2089 zfs_inode_update(zp);
2090 }
2091 ZFS_EXIT(zfsvfs);
2092 return (error);
2093 }
2094
2095 /*
2096 * Remove a directory subdir entry. If the current working
2097 * directory is the same as the subdir to be removed, the
2098 * remove will fail.
2099 *
2100 * IN: dip - inode of directory to remove from.
2101 * name - name of directory to be removed.
2102 * cwd - inode of current working directory.
2103 * cr - credentials of caller.
2104 * flags - case flags
2105 *
2106 * RETURN: 0 on success, error code on failure.
2107 *
2108 * Timestamps:
2109 * dip - ctime|mtime updated
2110 */
2111 /*ARGSUSED*/
2112 int
2113 zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr,
2114 int flags)
2115 {
2116 znode_t *dzp = ITOZ(dip);
2117 znode_t *zp;
2118 struct inode *ip;
2119 zfsvfs_t *zfsvfs = ITOZSB(dip);
2120 zilog_t *zilog;
2121 zfs_dirlock_t *dl;
2122 dmu_tx_t *tx;
2123 int error;
2124 int zflg = ZEXISTS;
2125 boolean_t waited = B_FALSE;
2126
2127 if (name == NULL)
2128 return (SET_ERROR(EINVAL));
2129
2130 ZFS_ENTER(zfsvfs);
2131 ZFS_VERIFY_ZP(dzp);
2132 zilog = zfsvfs->z_log;
2133
2134 if (flags & FIGNORECASE)
2135 zflg |= ZCILOOK;
2136 top:
2137 zp = NULL;
2138
2139 /*
2140 * Attempt to lock directory; fail if entry doesn't exist.
2141 */
2142 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
2143 NULL, NULL))) {
2144 ZFS_EXIT(zfsvfs);
2145 return (error);
2146 }
2147
2148 ip = ZTOI(zp);
2149
2150 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
2151 goto out;
2152 }
2153
2154 if (!S_ISDIR(ip->i_mode)) {
2155 error = SET_ERROR(ENOTDIR);
2156 goto out;
2157 }
2158
2159 if (ip == cwd) {
2160 error = SET_ERROR(EINVAL);
2161 goto out;
2162 }
2163
2164 /*
2165 * Grab a lock on the directory to make sure that no one is
2166 * trying to add (or lookup) entries while we are removing it.
2167 */
2168 rw_enter(&zp->z_name_lock, RW_WRITER);
2169
2170 /*
2171 * Grab a lock on the parent pointer to make sure we play well
2172 * with the treewalk and directory rename code.
2173 */
2174 rw_enter(&zp->z_parent_lock, RW_WRITER);
2175
2176 tx = dmu_tx_create(zfsvfs->z_os);
2177 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2178 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2179 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2180 zfs_sa_upgrade_txholds(tx, zp);
2181 zfs_sa_upgrade_txholds(tx, dzp);
2182 dmu_tx_mark_netfree(tx);
2183 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
2184 if (error) {
2185 rw_exit(&zp->z_parent_lock);
2186 rw_exit(&zp->z_name_lock);
2187 zfs_dirent_unlock(dl);
2188 if (error == ERESTART) {
2189 waited = B_TRUE;
2190 dmu_tx_wait(tx);
2191 dmu_tx_abort(tx);
2192 iput(ip);
2193 goto top;
2194 }
2195 dmu_tx_abort(tx);
2196 iput(ip);
2197 ZFS_EXIT(zfsvfs);
2198 return (error);
2199 }
2200
2201 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
2202
2203 if (error == 0) {
2204 uint64_t txtype = TX_RMDIR;
2205 if (flags & FIGNORECASE)
2206 txtype |= TX_CI;
2207 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2208 }
2209
2210 dmu_tx_commit(tx);
2211
2212 rw_exit(&zp->z_parent_lock);
2213 rw_exit(&zp->z_name_lock);
2214 out:
2215 zfs_dirent_unlock(dl);
2216
2217 zfs_inode_update(dzp);
2218 zfs_inode_update(zp);
2219 iput(ip);
2220
2221 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2222 zil_commit(zilog, 0);
2223
2224 ZFS_EXIT(zfsvfs);
2225 return (error);
2226 }
2227
2228 /*
2229 * Read as many directory entries as will fit into the provided
2230 * dirent buffer from the given directory cursor position.
2231 *
2232 * IN: ip - inode of directory to read.
2233 * dirent - buffer for directory entries.
2234 *
2235 * OUT: dirent - filler buffer of directory entries.
2236 *
2237 * RETURN: 0 if success
2238 * error code if failure
2239 *
2240 * Timestamps:
2241 * ip - atime updated
2242 *
2243 * Note that the low 4 bits of the cookie returned by zap is always zero.
2244 * This allows us to use the low range for "special" directory entries:
2245 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2246 * we use the offset 2 for the '.zfs' directory.
2247 */
2248 /* ARGSUSED */
2249 int
2250 zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr)
2251 {
2252 znode_t *zp = ITOZ(ip);
2253 zfsvfs_t *zfsvfs = ITOZSB(ip);
2254 objset_t *os;
2255 zap_cursor_t zc;
2256 zap_attribute_t zap;
2257 int error;
2258 uint8_t prefetch;
2259 uint8_t type;
2260 int done = 0;
2261 uint64_t parent;
2262 uint64_t offset; /* must be unsigned; checks for < 1 */
2263
2264 ZFS_ENTER(zfsvfs);
2265 ZFS_VERIFY_ZP(zp);
2266
2267 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2268 &parent, sizeof (parent))) != 0)
2269 goto out;
2270
2271 /*
2272 * Quit if directory has been removed (posix)
2273 */
2274 if (zp->z_unlinked)
2275 goto out;
2276
2277 error = 0;
2278 os = zfsvfs->z_os;
2279 offset = ctx->pos;
2280 prefetch = zp->z_zn_prefetch;
2281
2282 /*
2283 * Initialize the iterator cursor.
2284 */
2285 if (offset <= 3) {
2286 /*
2287 * Start iteration from the beginning of the directory.
2288 */
2289 zap_cursor_init(&zc, os, zp->z_id);
2290 } else {
2291 /*
2292 * The offset is a serialized cursor.
2293 */
2294 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2295 }
2296
2297 /*
2298 * Transform to file-system independent format
2299 */
2300 while (!done) {
2301 uint64_t objnum;
2302 /*
2303 * Special case `.', `..', and `.zfs'.
2304 */
2305 if (offset == 0) {
2306 (void) strcpy(zap.za_name, ".");
2307 zap.za_normalization_conflict = 0;
2308 objnum = zp->z_id;
2309 type = DT_DIR;
2310 } else if (offset == 1) {
2311 (void) strcpy(zap.za_name, "..");
2312 zap.za_normalization_conflict = 0;
2313 objnum = parent;
2314 type = DT_DIR;
2315 } else if (offset == 2 && zfs_show_ctldir(zp)) {
2316 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2317 zap.za_normalization_conflict = 0;
2318 objnum = ZFSCTL_INO_ROOT;
2319 type = DT_DIR;
2320 } else {
2321 /*
2322 * Grab next entry.
2323 */
2324 if ((error = zap_cursor_retrieve(&zc, &zap))) {
2325 if (error == ENOENT)
2326 break;
2327 else
2328 goto update;
2329 }
2330
2331 /*
2332 * Allow multiple entries provided the first entry is
2333 * the object id. Non-zpl consumers may safely make
2334 * use of the additional space.
2335 *
2336 * XXX: This should be a feature flag for compatibility
2337 */
2338 if (zap.za_integer_length != 8 ||
2339 zap.za_num_integers == 0) {
2340 cmn_err(CE_WARN, "zap_readdir: bad directory "
2341 "entry, obj = %lld, offset = %lld, "
2342 "length = %d, num = %lld\n",
2343 (u_longlong_t)zp->z_id,
2344 (u_longlong_t)offset,
2345 zap.za_integer_length,
2346 (u_longlong_t)zap.za_num_integers);
2347 error = SET_ERROR(ENXIO);
2348 goto update;
2349 }
2350
2351 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2352 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2353 }
2354
2355 done = !zpl_dir_emit(ctx, zap.za_name, strlen(zap.za_name),
2356 objnum, type);
2357 if (done)
2358 break;
2359
2360 /* Prefetch znode */
2361 if (prefetch) {
2362 dmu_prefetch(os, objnum, 0, 0, 0,
2363 ZIO_PRIORITY_SYNC_READ);
2364 }
2365
2366 /*
2367 * Move to the next entry, fill in the previous offset.
2368 */
2369 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2370 zap_cursor_advance(&zc);
2371 offset = zap_cursor_serialize(&zc);
2372 } else {
2373 offset += 1;
2374 }
2375 ctx->pos = offset;
2376 }
2377 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2378
2379 update:
2380 zap_cursor_fini(&zc);
2381 if (error == ENOENT)
2382 error = 0;
2383 out:
2384 ZFS_EXIT(zfsvfs);
2385
2386 return (error);
2387 }
2388
2389 ulong_t zfs_fsync_sync_cnt = 4;
2390
2391 int
2392 zfs_fsync(struct inode *ip, int syncflag, cred_t *cr)
2393 {
2394 znode_t *zp = ITOZ(ip);
2395 zfsvfs_t *zfsvfs = ITOZSB(ip);
2396
2397 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2398
2399 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2400 ZFS_ENTER(zfsvfs);
2401 ZFS_VERIFY_ZP(zp);
2402 zil_commit(zfsvfs->z_log, zp->z_id);
2403 ZFS_EXIT(zfsvfs);
2404 }
2405 tsd_set(zfs_fsyncer_key, NULL);
2406
2407 return (0);
2408 }
2409
2410
2411 /*
2412 * Get the requested file attributes and place them in the provided
2413 * vattr structure.
2414 *
2415 * IN: ip - inode of file.
2416 * vap - va_mask identifies requested attributes.
2417 * If ATTR_XVATTR set, then optional attrs are requested
2418 * flags - ATTR_NOACLCHECK (CIFS server context)
2419 * cr - credentials of caller.
2420 *
2421 * OUT: vap - attribute values.
2422 *
2423 * RETURN: 0 (always succeeds)
2424 */
2425 /* ARGSUSED */
2426 int
2427 zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2428 {
2429 znode_t *zp = ITOZ(ip);
2430 zfsvfs_t *zfsvfs = ITOZSB(ip);
2431 int error = 0;
2432 uint64_t links;
2433 uint64_t atime[2], mtime[2], ctime[2];
2434 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2435 xoptattr_t *xoap = NULL;
2436 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2437 sa_bulk_attr_t bulk[3];
2438 int count = 0;
2439
2440 ZFS_ENTER(zfsvfs);
2441 ZFS_VERIFY_ZP(zp);
2442
2443 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2444
2445 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
2446 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2447 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2448
2449 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2450 ZFS_EXIT(zfsvfs);
2451 return (error);
2452 }
2453
2454 /*
2455 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2456 * Also, if we are the owner don't bother, since owner should
2457 * always be allowed to read basic attributes of file.
2458 */
2459 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2460 (vap->va_uid != crgetuid(cr))) {
2461 if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2462 skipaclchk, cr))) {
2463 ZFS_EXIT(zfsvfs);
2464 return (error);
2465 }
2466 }
2467
2468 /*
2469 * Return all attributes. It's cheaper to provide the answer
2470 * than to determine whether we were asked the question.
2471 */
2472
2473 mutex_enter(&zp->z_lock);
2474 vap->va_type = vn_mode_to_vtype(zp->z_mode);
2475 vap->va_mode = zp->z_mode;
2476 vap->va_fsid = ZTOI(zp)->i_sb->s_dev;
2477 vap->va_nodeid = zp->z_id;
2478 if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
2479 links = ZTOI(zp)->i_nlink + 1;
2480 else
2481 links = ZTOI(zp)->i_nlink;
2482 vap->va_nlink = MIN(links, ZFS_LINK_MAX);
2483 vap->va_size = i_size_read(ip);
2484 vap->va_rdev = ip->i_rdev;
2485 vap->va_seq = ip->i_generation;
2486
2487 /*
2488 * Add in any requested optional attributes and the create time.
2489 * Also set the corresponding bits in the returned attribute bitmap.
2490 */
2491 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2492 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2493 xoap->xoa_archive =
2494 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2495 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2496 }
2497
2498 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2499 xoap->xoa_readonly =
2500 ((zp->z_pflags & ZFS_READONLY) != 0);
2501 XVA_SET_RTN(xvap, XAT_READONLY);
2502 }
2503
2504 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2505 xoap->xoa_system =
2506 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2507 XVA_SET_RTN(xvap, XAT_SYSTEM);
2508 }
2509
2510 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2511 xoap->xoa_hidden =
2512 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2513 XVA_SET_RTN(xvap, XAT_HIDDEN);
2514 }
2515
2516 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2517 xoap->xoa_nounlink =
2518 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2519 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2520 }
2521
2522 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2523 xoap->xoa_immutable =
2524 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2525 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2526 }
2527
2528 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2529 xoap->xoa_appendonly =
2530 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2531 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2532 }
2533
2534 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2535 xoap->xoa_nodump =
2536 ((zp->z_pflags & ZFS_NODUMP) != 0);
2537 XVA_SET_RTN(xvap, XAT_NODUMP);
2538 }
2539
2540 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2541 xoap->xoa_opaque =
2542 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2543 XVA_SET_RTN(xvap, XAT_OPAQUE);
2544 }
2545
2546 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2547 xoap->xoa_av_quarantined =
2548 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2549 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2550 }
2551
2552 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2553 xoap->xoa_av_modified =
2554 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2555 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2556 }
2557
2558 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2559 S_ISREG(ip->i_mode)) {
2560 zfs_sa_get_scanstamp(zp, xvap);
2561 }
2562
2563 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2564 uint64_t times[2];
2565
2566 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
2567 times, sizeof (times));
2568 ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2569 XVA_SET_RTN(xvap, XAT_CREATETIME);
2570 }
2571
2572 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2573 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2574 XVA_SET_RTN(xvap, XAT_REPARSE);
2575 }
2576 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2577 xoap->xoa_generation = ip->i_generation;
2578 XVA_SET_RTN(xvap, XAT_GEN);
2579 }
2580
2581 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2582 xoap->xoa_offline =
2583 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2584 XVA_SET_RTN(xvap, XAT_OFFLINE);
2585 }
2586
2587 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2588 xoap->xoa_sparse =
2589 ((zp->z_pflags & ZFS_SPARSE) != 0);
2590 XVA_SET_RTN(xvap, XAT_SPARSE);
2591 }
2592 }
2593
2594 ZFS_TIME_DECODE(&vap->va_atime, atime);
2595 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2596 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2597
2598 mutex_exit(&zp->z_lock);
2599
2600 sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2601
2602 if (zp->z_blksz == 0) {
2603 /*
2604 * Block size hasn't been set; suggest maximal I/O transfers.
2605 */
2606 vap->va_blksize = zfsvfs->z_max_blksz;
2607 }
2608
2609 ZFS_EXIT(zfsvfs);
2610 return (0);
2611 }
2612
2613 /*
2614 * Get the basic file attributes and place them in the provided kstat
2615 * structure. The inode is assumed to be the authoritative source
2616 * for most of the attributes. However, the znode currently has the
2617 * authoritative atime, blksize, and block count.
2618 *
2619 * IN: ip - inode of file.
2620 *
2621 * OUT: sp - kstat values.
2622 *
2623 * RETURN: 0 (always succeeds)
2624 */
2625 /* ARGSUSED */
2626 int
2627 zfs_getattr_fast(struct inode *ip, struct kstat *sp)
2628 {
2629 znode_t *zp = ITOZ(ip);
2630 zfsvfs_t *zfsvfs = ITOZSB(ip);
2631 uint32_t blksize;
2632 u_longlong_t nblocks;
2633
2634 ZFS_ENTER(zfsvfs);
2635 ZFS_VERIFY_ZP(zp);
2636
2637 mutex_enter(&zp->z_lock);
2638
2639 generic_fillattr(ip, sp);
2640
2641 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
2642 sp->blksize = blksize;
2643 sp->blocks = nblocks;
2644
2645 if (unlikely(zp->z_blksz == 0)) {
2646 /*
2647 * Block size hasn't been set; suggest maximal I/O transfers.
2648 */
2649 sp->blksize = zfsvfs->z_max_blksz;
2650 }
2651
2652 mutex_exit(&zp->z_lock);
2653
2654 /*
2655 * Required to prevent NFS client from detecting different inode
2656 * numbers of snapshot root dentry before and after snapshot mount.
2657 */
2658 if (zfsvfs->z_issnap) {
2659 if (ip->i_sb->s_root->d_inode == ip)
2660 sp->ino = ZFSCTL_INO_SNAPDIRS -
2661 dmu_objset_id(zfsvfs->z_os);
2662 }
2663
2664 ZFS_EXIT(zfsvfs);
2665
2666 return (0);
2667 }
2668
2669 /*
2670 * Set the file attributes to the values contained in the
2671 * vattr structure.
2672 *
2673 * IN: ip - inode of file to be modified.
2674 * vap - new attribute values.
2675 * If ATTR_XVATTR set, then optional attrs are being set
2676 * flags - ATTR_UTIME set if non-default time values provided.
2677 * - ATTR_NOACLCHECK (CIFS context only).
2678 * cr - credentials of caller.
2679 *
2680 * RETURN: 0 if success
2681 * error code if failure
2682 *
2683 * Timestamps:
2684 * ip - ctime updated, mtime updated if size changed.
2685 */
2686 /* ARGSUSED */
2687 int
2688 zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2689 {
2690 znode_t *zp = ITOZ(ip);
2691 zfsvfs_t *zfsvfs = ITOZSB(ip);
2692 zilog_t *zilog;
2693 dmu_tx_t *tx;
2694 vattr_t oldva;
2695 xvattr_t *tmpxvattr;
2696 uint_t mask = vap->va_mask;
2697 uint_t saved_mask = 0;
2698 int trim_mask = 0;
2699 uint64_t new_mode;
2700 uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid;
2701 uint64_t xattr_obj;
2702 uint64_t mtime[2], ctime[2], atime[2];
2703 znode_t *attrzp;
2704 int need_policy = FALSE;
2705 int err, err2;
2706 zfs_fuid_info_t *fuidp = NULL;
2707 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2708 xoptattr_t *xoap;
2709 zfs_acl_t *aclp;
2710 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2711 boolean_t fuid_dirtied = B_FALSE;
2712 sa_bulk_attr_t *bulk, *xattr_bulk;
2713 int count = 0, xattr_count = 0;
2714
2715 if (mask == 0)
2716 return (0);
2717
2718 ZFS_ENTER(zfsvfs);
2719 ZFS_VERIFY_ZP(zp);
2720
2721 zilog = zfsvfs->z_log;
2722
2723 /*
2724 * Make sure that if we have ephemeral uid/gid or xvattr specified
2725 * that file system is at proper version level
2726 */
2727
2728 if (zfsvfs->z_use_fuids == B_FALSE &&
2729 (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2730 ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2731 (mask & ATTR_XVATTR))) {
2732 ZFS_EXIT(zfsvfs);
2733 return (SET_ERROR(EINVAL));
2734 }
2735
2736 if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
2737 ZFS_EXIT(zfsvfs);
2738 return (SET_ERROR(EISDIR));
2739 }
2740
2741 if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
2742 ZFS_EXIT(zfsvfs);
2743 return (SET_ERROR(EINVAL));
2744 }
2745
2746 /*
2747 * If this is an xvattr_t, then get a pointer to the structure of
2748 * optional attributes. If this is NULL, then we have a vattr_t.
2749 */
2750 xoap = xva_getxoptattr(xvap);
2751
2752 tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
2753 xva_init(tmpxvattr);
2754
2755 bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP);
2756 xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP);
2757
2758 /*
2759 * Immutable files can only alter immutable bit and atime
2760 */
2761 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2762 ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
2763 ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2764 err = EPERM;
2765 goto out3;
2766 }
2767
2768 if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2769 err = EPERM;
2770 goto out3;
2771 }
2772
2773 /*
2774 * Verify timestamps doesn't overflow 32 bits.
2775 * ZFS can handle large timestamps, but 32bit syscalls can't
2776 * handle times greater than 2039. This check should be removed
2777 * once large timestamps are fully supported.
2778 */
2779 if (mask & (ATTR_ATIME | ATTR_MTIME)) {
2780 if (((mask & ATTR_ATIME) &&
2781 TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2782 ((mask & ATTR_MTIME) &&
2783 TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2784 err = EOVERFLOW;
2785 goto out3;
2786 }
2787 }
2788
2789 top:
2790 attrzp = NULL;
2791 aclp = NULL;
2792
2793 /* Can this be moved to before the top label? */
2794 if (zfs_is_readonly(zfsvfs)) {
2795 err = EROFS;
2796 goto out3;
2797 }
2798
2799 /*
2800 * First validate permissions
2801 */
2802
2803 if (mask & ATTR_SIZE) {
2804 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2805 if (err)
2806 goto out3;
2807
2808 /*
2809 * XXX - Note, we are not providing any open
2810 * mode flags here (like FNDELAY), so we may
2811 * block if there are locks present... this
2812 * should be addressed in openat().
2813 */
2814 /* XXX - would it be OK to generate a log record here? */
2815 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2816 if (err)
2817 goto out3;
2818 }
2819
2820 if (mask & (ATTR_ATIME|ATTR_MTIME) ||
2821 ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2822 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2823 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2824 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2825 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2826 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2827 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2828 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2829 skipaclchk, cr);
2830 }
2831
2832 if (mask & (ATTR_UID|ATTR_GID)) {
2833 int idmask = (mask & (ATTR_UID|ATTR_GID));
2834 int take_owner;
2835 int take_group;
2836
2837 /*
2838 * NOTE: even if a new mode is being set,
2839 * we may clear S_ISUID/S_ISGID bits.
2840 */
2841
2842 if (!(mask & ATTR_MODE))
2843 vap->va_mode = zp->z_mode;
2844
2845 /*
2846 * Take ownership or chgrp to group we are a member of
2847 */
2848
2849 take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
2850 take_group = (mask & ATTR_GID) &&
2851 zfs_groupmember(zfsvfs, vap->va_gid, cr);
2852
2853 /*
2854 * If both ATTR_UID and ATTR_GID are set then take_owner and
2855 * take_group must both be set in order to allow taking
2856 * ownership.
2857 *
2858 * Otherwise, send the check through secpolicy_vnode_setattr()
2859 *
2860 */
2861
2862 if (((idmask == (ATTR_UID|ATTR_GID)) &&
2863 take_owner && take_group) ||
2864 ((idmask == ATTR_UID) && take_owner) ||
2865 ((idmask == ATTR_GID) && take_group)) {
2866 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2867 skipaclchk, cr) == 0) {
2868 /*
2869 * Remove setuid/setgid for non-privileged users
2870 */
2871 (void) secpolicy_setid_clear(vap, cr);
2872 trim_mask = (mask & (ATTR_UID|ATTR_GID));
2873 } else {
2874 need_policy = TRUE;
2875 }
2876 } else {
2877 need_policy = TRUE;
2878 }
2879 }
2880
2881 mutex_enter(&zp->z_lock);
2882 oldva.va_mode = zp->z_mode;
2883 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2884 if (mask & ATTR_XVATTR) {
2885 /*
2886 * Update xvattr mask to include only those attributes
2887 * that are actually changing.
2888 *
2889 * the bits will be restored prior to actually setting
2890 * the attributes so the caller thinks they were set.
2891 */
2892 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2893 if (xoap->xoa_appendonly !=
2894 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2895 need_policy = TRUE;
2896 } else {
2897 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2898 XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
2899 }
2900 }
2901
2902 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2903 if (xoap->xoa_nounlink !=
2904 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2905 need_policy = TRUE;
2906 } else {
2907 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2908 XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
2909 }
2910 }
2911
2912 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2913 if (xoap->xoa_immutable !=
2914 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2915 need_policy = TRUE;
2916 } else {
2917 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2918 XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
2919 }
2920 }
2921
2922 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2923 if (xoap->xoa_nodump !=
2924 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2925 need_policy = TRUE;
2926 } else {
2927 XVA_CLR_REQ(xvap, XAT_NODUMP);
2928 XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
2929 }
2930 }
2931
2932 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2933 if (xoap->xoa_av_modified !=
2934 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2935 need_policy = TRUE;
2936 } else {
2937 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2938 XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
2939 }
2940 }
2941
2942 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2943 if ((!S_ISREG(ip->i_mode) &&
2944 xoap->xoa_av_quarantined) ||
2945 xoap->xoa_av_quarantined !=
2946 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2947 need_policy = TRUE;
2948 } else {
2949 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2950 XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
2951 }
2952 }
2953
2954 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2955 mutex_exit(&zp->z_lock);
2956 err = EPERM;
2957 goto out3;
2958 }
2959
2960 if (need_policy == FALSE &&
2961 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2962 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2963 need_policy = TRUE;
2964 }
2965 }
2966
2967 mutex_exit(&zp->z_lock);
2968
2969 if (mask & ATTR_MODE) {
2970 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
2971 err = secpolicy_setid_setsticky_clear(ip, vap,
2972 &oldva, cr);
2973 if (err)
2974 goto out3;
2975
2976 trim_mask |= ATTR_MODE;
2977 } else {
2978 need_policy = TRUE;
2979 }
2980 }
2981
2982 if (need_policy) {
2983 /*
2984 * If trim_mask is set then take ownership
2985 * has been granted or write_acl is present and user
2986 * has the ability to modify mode. In that case remove
2987 * UID|GID and or MODE from mask so that
2988 * secpolicy_vnode_setattr() doesn't revoke it.
2989 */
2990
2991 if (trim_mask) {
2992 saved_mask = vap->va_mask;
2993 vap->va_mask &= ~trim_mask;
2994 }
2995 err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
2996 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
2997 if (err)
2998 goto out3;
2999
3000 if (trim_mask)
3001 vap->va_mask |= saved_mask;
3002 }
3003
3004 /*
3005 * secpolicy_vnode_setattr, or take ownership may have
3006 * changed va_mask
3007 */
3008 mask = vap->va_mask;
3009
3010 if ((mask & (ATTR_UID | ATTR_GID))) {
3011 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
3012 &xattr_obj, sizeof (xattr_obj));
3013
3014 if (err == 0 && xattr_obj) {
3015 err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
3016 if (err)
3017 goto out2;
3018 }
3019 if (mask & ATTR_UID) {
3020 new_kuid = zfs_fuid_create(zfsvfs,
3021 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
3022 if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
3023 zfs_fuid_overquota(zfsvfs, B_FALSE, new_kuid)) {
3024 if (attrzp)
3025 iput(ZTOI(attrzp));
3026 err = EDQUOT;
3027 goto out2;
3028 }
3029 }
3030
3031 if (mask & ATTR_GID) {
3032 new_kgid = zfs_fuid_create(zfsvfs,
3033 (uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
3034 if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
3035 zfs_fuid_overquota(zfsvfs, B_TRUE, new_kgid)) {
3036 if (attrzp)
3037 iput(ZTOI(attrzp));
3038 err = EDQUOT;
3039 goto out2;
3040 }
3041 }
3042 }
3043 tx = dmu_tx_create(zfsvfs->z_os);
3044
3045 if (mask & ATTR_MODE) {
3046 uint64_t pmode = zp->z_mode;
3047 uint64_t acl_obj;
3048 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
3049
3050 zfs_acl_chmod_setattr(zp, &aclp, new_mode);
3051
3052 mutex_enter(&zp->z_lock);
3053 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
3054 /*
3055 * Are we upgrading ACL from old V0 format
3056 * to V1 format?
3057 */
3058 if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
3059 zfs_znode_acl_version(zp) ==
3060 ZFS_ACL_VERSION_INITIAL) {
3061 dmu_tx_hold_free(tx, acl_obj, 0,
3062 DMU_OBJECT_END);
3063 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3064 0, aclp->z_acl_bytes);
3065 } else {
3066 dmu_tx_hold_write(tx, acl_obj, 0,
3067 aclp->z_acl_bytes);
3068 }
3069 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3070 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3071 0, aclp->z_acl_bytes);
3072 }
3073 mutex_exit(&zp->z_lock);
3074 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3075 } else {
3076 if ((mask & ATTR_XVATTR) &&
3077 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3078 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3079 else
3080 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3081 }
3082
3083 if (attrzp) {
3084 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
3085 }
3086
3087 fuid_dirtied = zfsvfs->z_fuid_dirty;
3088 if (fuid_dirtied)
3089 zfs_fuid_txhold(zfsvfs, tx);
3090
3091 zfs_sa_upgrade_txholds(tx, zp);
3092
3093 err = dmu_tx_assign(tx, TXG_WAIT);
3094 if (err)
3095 goto out;
3096
3097 count = 0;
3098 /*
3099 * Set each attribute requested.
3100 * We group settings according to the locks they need to acquire.
3101 *
3102 * Note: you cannot set ctime directly, although it will be
3103 * updated as a side-effect of calling this function.
3104 */
3105
3106
3107 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3108 mutex_enter(&zp->z_acl_lock);
3109 mutex_enter(&zp->z_lock);
3110
3111 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3112 &zp->z_pflags, sizeof (zp->z_pflags));
3113
3114 if (attrzp) {
3115 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3116 mutex_enter(&attrzp->z_acl_lock);
3117 mutex_enter(&attrzp->z_lock);
3118 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3119 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3120 sizeof (attrzp->z_pflags));
3121 }
3122
3123 if (mask & (ATTR_UID|ATTR_GID)) {
3124
3125 if (mask & ATTR_UID) {
3126 ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid);
3127 new_uid = zfs_uid_read(ZTOI(zp));
3128 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3129 &new_uid, sizeof (new_uid));
3130 if (attrzp) {
3131 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3132 SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3133 sizeof (new_uid));
3134 ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid);
3135 }
3136 }
3137
3138 if (mask & ATTR_GID) {
3139 ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid);
3140 new_gid = zfs_gid_read(ZTOI(zp));
3141 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3142 NULL, &new_gid, sizeof (new_gid));
3143 if (attrzp) {
3144 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3145 SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3146 sizeof (new_gid));
3147 ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid);
3148 }
3149 }
3150 if (!(mask & ATTR_MODE)) {
3151 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3152 NULL, &new_mode, sizeof (new_mode));
3153 new_mode = zp->z_mode;
3154 }
3155 err = zfs_acl_chown_setattr(zp);
3156 ASSERT(err == 0);
3157 if (attrzp) {
3158 err = zfs_acl_chown_setattr(attrzp);
3159 ASSERT(err == 0);
3160 }
3161 }
3162
3163 if (mask & ATTR_MODE) {
3164 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3165 &new_mode, sizeof (new_mode));
3166 zp->z_mode = ZTOI(zp)->i_mode = new_mode;
3167 ASSERT3P(aclp, !=, NULL);
3168 err = zfs_aclset_common(zp, aclp, cr, tx);
3169 ASSERT0(err);
3170 if (zp->z_acl_cached)
3171 zfs_acl_free(zp->z_acl_cached);
3172 zp->z_acl_cached = aclp;
3173 aclp = NULL;
3174 }
3175
3176 if ((mask & ATTR_ATIME) || zp->z_atime_dirty) {
3177 zp->z_atime_dirty = 0;
3178 ZFS_TIME_ENCODE(&ip->i_atime, atime);
3179 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3180 &atime, sizeof (atime));
3181 }
3182
3183 if (mask & (ATTR_MTIME | ATTR_SIZE)) {
3184 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3185 ZTOI(zp)->i_mtime = zpl_inode_timespec_trunc(vap->va_mtime,
3186 ZTOI(zp)->i_sb->s_time_gran);
3187
3188 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3189 mtime, sizeof (mtime));
3190 }
3191
3192 if (mask & (ATTR_CTIME | ATTR_SIZE)) {
3193 ZFS_TIME_ENCODE(&vap->va_ctime, ctime);
3194 ZTOI(zp)->i_ctime = zpl_inode_timespec_trunc(vap->va_ctime,
3195 ZTOI(zp)->i_sb->s_time_gran);
3196 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3197 ctime, sizeof (ctime));
3198 }
3199
3200 if (attrzp && mask) {
3201 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3202 SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
3203 sizeof (ctime));
3204 }
3205
3206 /*
3207 * Do this after setting timestamps to prevent timestamp
3208 * update from toggling bit
3209 */
3210
3211 if (xoap && (mask & ATTR_XVATTR)) {
3212
3213 /*
3214 * restore trimmed off masks
3215 * so that return masks can be set for caller.
3216 */
3217
3218 if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
3219 XVA_SET_REQ(xvap, XAT_APPENDONLY);
3220 }
3221 if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
3222 XVA_SET_REQ(xvap, XAT_NOUNLINK);
3223 }
3224 if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
3225 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3226 }
3227 if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
3228 XVA_SET_REQ(xvap, XAT_NODUMP);
3229 }
3230 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
3231 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3232 }
3233 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
3234 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3235 }
3236
3237 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3238 ASSERT(S_ISREG(ip->i_mode));
3239
3240 zfs_xvattr_set(zp, xvap, tx);
3241 }
3242
3243 if (fuid_dirtied)
3244 zfs_fuid_sync(zfsvfs, tx);
3245
3246 if (mask != 0)
3247 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3248
3249 mutex_exit(&zp->z_lock);
3250 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3251 mutex_exit(&zp->z_acl_lock);
3252
3253 if (attrzp) {
3254 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3255 mutex_exit(&attrzp->z_acl_lock);
3256 mutex_exit(&attrzp->z_lock);
3257 }
3258 out:
3259 if (err == 0 && attrzp) {
3260 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3261 xattr_count, tx);
3262 ASSERT(err2 == 0);
3263 }
3264
3265 if (aclp)
3266 zfs_acl_free(aclp);
3267
3268 if (fuidp) {
3269 zfs_fuid_info_free(fuidp);
3270 fuidp = NULL;
3271 }
3272
3273 if (err) {
3274 dmu_tx_abort(tx);
3275 if (attrzp)
3276 iput(ZTOI(attrzp));
3277 if (err == ERESTART)
3278 goto top;
3279 } else {
3280 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3281 dmu_tx_commit(tx);
3282 if (attrzp)
3283 iput(ZTOI(attrzp));
3284 zfs_inode_update(zp);
3285 }
3286
3287 out2:
3288 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3289 zil_commit(zilog, 0);
3290
3291 out3:
3292 kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * 7);
3293 kmem_free(bulk, sizeof (sa_bulk_attr_t) * 7);
3294 kmem_free(tmpxvattr, sizeof (xvattr_t));
3295 ZFS_EXIT(zfsvfs);
3296 return (err);
3297 }
3298
3299 typedef struct zfs_zlock {
3300 krwlock_t *zl_rwlock; /* lock we acquired */
3301 znode_t *zl_znode; /* znode we held */
3302 struct zfs_zlock *zl_next; /* next in list */
3303 } zfs_zlock_t;
3304
3305 /*
3306 * Drop locks and release vnodes that were held by zfs_rename_lock().
3307 */
3308 static void
3309 zfs_rename_unlock(zfs_zlock_t **zlpp)
3310 {
3311 zfs_zlock_t *zl;
3312
3313 while ((zl = *zlpp) != NULL) {
3314 if (zl->zl_znode != NULL)
3315 zfs_iput_async(ZTOI(zl->zl_znode));
3316 rw_exit(zl->zl_rwlock);
3317 *zlpp = zl->zl_next;
3318 kmem_free(zl, sizeof (*zl));
3319 }
3320 }
3321
3322 /*
3323 * Search back through the directory tree, using the ".." entries.
3324 * Lock each directory in the chain to prevent concurrent renames.
3325 * Fail any attempt to move a directory into one of its own descendants.
3326 * XXX - z_parent_lock can overlap with map or grow locks
3327 */
3328 static int
3329 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3330 {
3331 zfs_zlock_t *zl;
3332 znode_t *zp = tdzp;
3333 uint64_t rootid = ZTOZSB(zp)->z_root;
3334 uint64_t oidp = zp->z_id;
3335 krwlock_t *rwlp = &szp->z_parent_lock;
3336 krw_t rw = RW_WRITER;
3337
3338 /*
3339 * First pass write-locks szp and compares to zp->z_id.
3340 * Later passes read-lock zp and compare to zp->z_parent.
3341 */
3342 do {
3343 if (!rw_tryenter(rwlp, rw)) {
3344 /*
3345 * Another thread is renaming in this path.
3346 * Note that if we are a WRITER, we don't have any
3347 * parent_locks held yet.
3348 */
3349 if (rw == RW_READER && zp->z_id > szp->z_id) {
3350 /*
3351 * Drop our locks and restart
3352 */
3353 zfs_rename_unlock(&zl);
3354 *zlpp = NULL;
3355 zp = tdzp;
3356 oidp = zp->z_id;
3357 rwlp = &szp->z_parent_lock;
3358 rw = RW_WRITER;
3359 continue;
3360 } else {
3361 /*
3362 * Wait for other thread to drop its locks
3363 */
3364 rw_enter(rwlp, rw);
3365 }
3366 }
3367
3368 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3369 zl->zl_rwlock = rwlp;
3370 zl->zl_znode = NULL;
3371 zl->zl_next = *zlpp;
3372 *zlpp = zl;
3373
3374 if (oidp == szp->z_id) /* We're a descendant of szp */
3375 return (SET_ERROR(EINVAL));
3376
3377 if (oidp == rootid) /* We've hit the top */
3378 return (0);
3379
3380 if (rw == RW_READER) { /* i.e. not the first pass */
3381 int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
3382 if (error)
3383 return (error);
3384 zl->zl_znode = zp;
3385 }
3386 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
3387 &oidp, sizeof (oidp));
3388 rwlp = &zp->z_parent_lock;
3389 rw = RW_READER;
3390
3391 } while (zp->z_id != sdzp->z_id);
3392
3393 return (0);
3394 }
3395
3396 /*
3397 * Move an entry from the provided source directory to the target
3398 * directory. Change the entry name as indicated.
3399 *
3400 * IN: sdip - Source directory containing the "old entry".
3401 * snm - Old entry name.
3402 * tdip - Target directory to contain the "new entry".
3403 * tnm - New entry name.
3404 * cr - credentials of caller.
3405 * flags - case flags
3406 *
3407 * RETURN: 0 on success, error code on failure.
3408 *
3409 * Timestamps:
3410 * sdip,tdip - ctime|mtime updated
3411 */
3412 /*ARGSUSED*/
3413 int
3414 zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
3415 cred_t *cr, int flags)
3416 {
3417 znode_t *tdzp, *szp, *tzp;
3418 znode_t *sdzp = ITOZ(sdip);
3419 zfsvfs_t *zfsvfs = ITOZSB(sdip);
3420 zilog_t *zilog;
3421 zfs_dirlock_t *sdl, *tdl;
3422 dmu_tx_t *tx;
3423 zfs_zlock_t *zl;
3424 int cmp, serr, terr;
3425 int error = 0;
3426 int zflg = 0;
3427 boolean_t waited = B_FALSE;
3428
3429 if (snm == NULL || tnm == NULL)
3430 return (SET_ERROR(EINVAL));
3431
3432 ZFS_ENTER(zfsvfs);
3433 ZFS_VERIFY_ZP(sdzp);
3434 zilog = zfsvfs->z_log;
3435
3436 tdzp = ITOZ(tdip);
3437 ZFS_VERIFY_ZP(tdzp);
3438
3439 /*
3440 * We check i_sb because snapshots and the ctldir must have different
3441 * super blocks.
3442 */
3443 if (tdip->i_sb != sdip->i_sb || zfsctl_is_node(tdip)) {
3444 ZFS_EXIT(zfsvfs);
3445 return (SET_ERROR(EXDEV));
3446 }
3447
3448 if (zfsvfs->z_utf8 && u8_validate(tnm,
3449 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3450 ZFS_EXIT(zfsvfs);
3451 return (SET_ERROR(EILSEQ));
3452 }
3453
3454 if (flags & FIGNORECASE)
3455 zflg |= ZCILOOK;
3456
3457 top:
3458 szp = NULL;
3459 tzp = NULL;
3460 zl = NULL;
3461
3462 /*
3463 * This is to prevent the creation of links into attribute space
3464 * by renaming a linked file into/outof an attribute directory.
3465 * See the comment in zfs_link() for why this is considered bad.
3466 */
3467 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3468 ZFS_EXIT(zfsvfs);
3469 return (SET_ERROR(EINVAL));
3470 }
3471
3472 /*
3473 * Lock source and target directory entries. To prevent deadlock,
3474 * a lock ordering must be defined. We lock the directory with
3475 * the smallest object id first, or if it's a tie, the one with
3476 * the lexically first name.
3477 */
3478 if (sdzp->z_id < tdzp->z_id) {
3479 cmp = -1;
3480 } else if (sdzp->z_id > tdzp->z_id) {
3481 cmp = 1;
3482 } else {
3483 /*
3484 * First compare the two name arguments without
3485 * considering any case folding.
3486 */
3487 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
3488
3489 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3490 ASSERT(error == 0 || !zfsvfs->z_utf8);
3491 if (cmp == 0) {
3492 /*
3493 * POSIX: "If the old argument and the new argument
3494 * both refer to links to the same existing file,
3495 * the rename() function shall return successfully
3496 * and perform no other action."
3497 */
3498 ZFS_EXIT(zfsvfs);
3499 return (0);
3500 }
3501 /*
3502 * If the file system is case-folding, then we may
3503 * have some more checking to do. A case-folding file
3504 * system is either supporting mixed case sensitivity
3505 * access or is completely case-insensitive. Note
3506 * that the file system is always case preserving.
3507 *
3508 * In mixed sensitivity mode case sensitive behavior
3509 * is the default. FIGNORECASE must be used to
3510 * explicitly request case insensitive behavior.
3511 *
3512 * If the source and target names provided differ only
3513 * by case (e.g., a request to rename 'tim' to 'Tim'),
3514 * we will treat this as a special case in the
3515 * case-insensitive mode: as long as the source name
3516 * is an exact match, we will allow this to proceed as
3517 * a name-change request.
3518 */
3519 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
3520 (zfsvfs->z_case == ZFS_CASE_MIXED &&
3521 flags & FIGNORECASE)) &&
3522 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
3523 &error) == 0) {
3524 /*
3525 * case preserving rename request, require exact
3526 * name matches
3527 */
3528 zflg |= ZCIEXACT;
3529 zflg &= ~ZCILOOK;
3530 }
3531 }
3532
3533 /*
3534 * If the source and destination directories are the same, we should
3535 * grab the z_name_lock of that directory only once.
3536 */
3537 if (sdzp == tdzp) {
3538 zflg |= ZHAVELOCK;
3539 rw_enter(&sdzp->z_name_lock, RW_READER);
3540 }
3541
3542 if (cmp < 0) {
3543 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3544 ZEXISTS | zflg, NULL, NULL);
3545 terr = zfs_dirent_lock(&tdl,
3546 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3547 } else {
3548 terr = zfs_dirent_lock(&tdl,
3549 tdzp, tnm, &tzp, zflg, NULL, NULL);
3550 serr = zfs_dirent_lock(&sdl,
3551 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3552 NULL, NULL);
3553 }
3554
3555 if (serr) {
3556 /*
3557 * Source entry invalid or not there.
3558 */
3559 if (!terr) {
3560 zfs_dirent_unlock(tdl);
3561 if (tzp)
3562 iput(ZTOI(tzp));
3563 }
3564
3565 if (sdzp == tdzp)
3566 rw_exit(&sdzp->z_name_lock);
3567
3568 if (strcmp(snm, "..") == 0)
3569 serr = EINVAL;
3570 ZFS_EXIT(zfsvfs);
3571 return (serr);
3572 }
3573 if (terr) {
3574 zfs_dirent_unlock(sdl);
3575 iput(ZTOI(szp));
3576
3577 if (sdzp == tdzp)
3578 rw_exit(&sdzp->z_name_lock);
3579
3580 if (strcmp(tnm, "..") == 0)
3581 terr = EINVAL;
3582 ZFS_EXIT(zfsvfs);
3583 return (terr);
3584 }
3585
3586 /*
3587 * Must have write access at the source to remove the old entry
3588 * and write access at the target to create the new entry.
3589 * Note that if target and source are the same, this can be
3590 * done in a single check.
3591 */
3592
3593 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
3594 goto out;
3595
3596 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3597 /*
3598 * Check to make sure rename is valid.
3599 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3600 */
3601 if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
3602 goto out;
3603 }
3604
3605 /*
3606 * Does target exist?
3607 */
3608 if (tzp) {
3609 /*
3610 * Source and target must be the same type.
3611 */
3612 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3613 if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
3614 error = SET_ERROR(ENOTDIR);
3615 goto out;
3616 }
3617 } else {
3618 if (S_ISDIR(ZTOI(tzp)->i_mode)) {
3619 error = SET_ERROR(EISDIR);
3620 goto out;
3621 }
3622 }
3623 /*
3624 * POSIX dictates that when the source and target
3625 * entries refer to the same file object, rename
3626 * must do nothing and exit without error.
3627 */
3628 if (szp->z_id == tzp->z_id) {
3629 error = 0;
3630 goto out;
3631 }
3632 }
3633
3634 tx = dmu_tx_create(zfsvfs->z_os);
3635 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3636 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3637 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3638 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3639 if (sdzp != tdzp) {
3640 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3641 zfs_sa_upgrade_txholds(tx, tdzp);
3642 }
3643 if (tzp) {
3644 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3645 zfs_sa_upgrade_txholds(tx, tzp);
3646 }
3647
3648 zfs_sa_upgrade_txholds(tx, szp);
3649 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3650 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
3651 if (error) {
3652 if (zl != NULL)
3653 zfs_rename_unlock(&zl);
3654 zfs_dirent_unlock(sdl);
3655 zfs_dirent_unlock(tdl);
3656
3657 if (sdzp == tdzp)
3658 rw_exit(&sdzp->z_name_lock);
3659
3660 if (error == ERESTART) {
3661 waited = B_TRUE;
3662 dmu_tx_wait(tx);
3663 dmu_tx_abort(tx);
3664 iput(ZTOI(szp));
3665 if (tzp)
3666 iput(ZTOI(tzp));
3667 goto top;
3668 }
3669 dmu_tx_abort(tx);
3670 iput(ZTOI(szp));
3671 if (tzp)
3672 iput(ZTOI(tzp));
3673 ZFS_EXIT(zfsvfs);
3674 return (error);
3675 }
3676
3677 if (tzp) /* Attempt to remove the existing target */
3678 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3679
3680 if (error == 0) {
3681 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3682 if (error == 0) {
3683 szp->z_pflags |= ZFS_AV_MODIFIED;
3684
3685 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3686 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3687 ASSERT0(error);
3688
3689 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3690 if (error == 0) {
3691 zfs_log_rename(zilog, tx, TX_RENAME |
3692 (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3693 sdl->dl_name, tdzp, tdl->dl_name, szp);
3694 } else {
3695 /*
3696 * At this point, we have successfully created
3697 * the target name, but have failed to remove
3698 * the source name. Since the create was done
3699 * with the ZRENAMING flag, there are
3700 * complications; for one, the link count is
3701 * wrong. The easiest way to deal with this
3702 * is to remove the newly created target, and
3703 * return the original error. This must
3704 * succeed; fortunately, it is very unlikely to
3705 * fail, since we just created it.
3706 */
3707 VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3708 ZRENAMING, NULL), ==, 0);
3709 }
3710 } else {
3711 /*
3712 * If we had removed the existing target, subsequent
3713 * call to zfs_link_create() to add back the same entry
3714 * but, the new dnode (szp) should not fail.
3715 */
3716 ASSERT(tzp == NULL);
3717 }
3718 }
3719
3720 dmu_tx_commit(tx);
3721 out:
3722 if (zl != NULL)
3723 zfs_rename_unlock(&zl);
3724
3725 zfs_dirent_unlock(sdl);
3726 zfs_dirent_unlock(tdl);
3727
3728 zfs_inode_update(sdzp);
3729 if (sdzp == tdzp)
3730 rw_exit(&sdzp->z_name_lock);
3731
3732 if (sdzp != tdzp)
3733 zfs_inode_update(tdzp);
3734
3735 zfs_inode_update(szp);
3736 iput(ZTOI(szp));
3737 if (tzp) {
3738 zfs_inode_update(tzp);
3739 iput(ZTOI(tzp));
3740 }
3741
3742 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3743 zil_commit(zilog, 0);
3744
3745 ZFS_EXIT(zfsvfs);
3746 return (error);
3747 }
3748
3749 /*
3750 * Insert the indicated symbolic reference entry into the directory.
3751 *
3752 * IN: dip - Directory to contain new symbolic link.
3753 * link - Name for new symlink entry.
3754 * vap - Attributes of new entry.
3755 * target - Target path of new symlink.
3756 *
3757 * cr - credentials of caller.
3758 * flags - case flags
3759 *
3760 * RETURN: 0 on success, error code on failure.
3761 *
3762 * Timestamps:
3763 * dip - ctime|mtime updated
3764 */
3765 /*ARGSUSED*/
3766 int
3767 zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
3768 struct inode **ipp, cred_t *cr, int flags)
3769 {
3770 znode_t *zp, *dzp = ITOZ(dip);
3771 zfs_dirlock_t *dl;
3772 dmu_tx_t *tx;
3773 zfsvfs_t *zfsvfs = ITOZSB(dip);
3774 zilog_t *zilog;
3775 uint64_t len = strlen(link);
3776 int error;
3777 int zflg = ZNEW;
3778 zfs_acl_ids_t acl_ids;
3779 boolean_t fuid_dirtied;
3780 uint64_t txtype = TX_SYMLINK;
3781 boolean_t waited = B_FALSE;
3782
3783 ASSERT(S_ISLNK(vap->va_mode));
3784
3785 if (name == NULL)
3786 return (SET_ERROR(EINVAL));
3787
3788 ZFS_ENTER(zfsvfs);
3789 ZFS_VERIFY_ZP(dzp);
3790 zilog = zfsvfs->z_log;
3791
3792 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
3793 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3794 ZFS_EXIT(zfsvfs);
3795 return (SET_ERROR(EILSEQ));
3796 }
3797 if (flags & FIGNORECASE)
3798 zflg |= ZCILOOK;
3799
3800 if (len > MAXPATHLEN) {
3801 ZFS_EXIT(zfsvfs);
3802 return (SET_ERROR(ENAMETOOLONG));
3803 }
3804
3805 if ((error = zfs_acl_ids_create(dzp, 0,
3806 vap, cr, NULL, &acl_ids)) != 0) {
3807 ZFS_EXIT(zfsvfs);
3808 return (error);
3809 }
3810 top:
3811 *ipp = NULL;
3812
3813 /*
3814 * Attempt to lock directory; fail if entry already exists.
3815 */
3816 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3817 if (error) {
3818 zfs_acl_ids_free(&acl_ids);
3819 ZFS_EXIT(zfsvfs);
3820 return (error);
3821 }
3822
3823 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
3824 zfs_acl_ids_free(&acl_ids);
3825 zfs_dirent_unlock(dl);
3826 ZFS_EXIT(zfsvfs);
3827 return (error);
3828 }
3829
3830 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
3831 zfs_acl_ids_free(&acl_ids);
3832 zfs_dirent_unlock(dl);
3833 ZFS_EXIT(zfsvfs);
3834 return (SET_ERROR(EDQUOT));
3835 }
3836 tx = dmu_tx_create(zfsvfs->z_os);
3837 fuid_dirtied = zfsvfs->z_fuid_dirty;
3838 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3839 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3840 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3841 ZFS_SA_BASE_ATTR_SIZE + len);
3842 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3843 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3844 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3845 acl_ids.z_aclp->z_acl_bytes);
3846 }
3847 if (fuid_dirtied)
3848 zfs_fuid_txhold(zfsvfs, tx);
3849 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
3850 if (error) {
3851 zfs_dirent_unlock(dl);
3852 if (error == ERESTART) {
3853 waited = B_TRUE;
3854 dmu_tx_wait(tx);
3855 dmu_tx_abort(tx);
3856 goto top;
3857 }
3858 zfs_acl_ids_free(&acl_ids);
3859 dmu_tx_abort(tx);
3860 ZFS_EXIT(zfsvfs);
3861 return (error);
3862 }
3863
3864 /*
3865 * Create a new object for the symlink.
3866 * for version 4 ZPL datsets the symlink will be an SA attribute
3867 */
3868 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3869
3870 if (fuid_dirtied)
3871 zfs_fuid_sync(zfsvfs, tx);
3872
3873 mutex_enter(&zp->z_lock);
3874 if (zp->z_is_sa)
3875 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
3876 link, len, tx);
3877 else
3878 zfs_sa_symlink(zp, link, len, tx);
3879 mutex_exit(&zp->z_lock);
3880
3881 zp->z_size = len;
3882 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
3883 &zp->z_size, sizeof (zp->z_size), tx);
3884 /*
3885 * Insert the new object into the directory.
3886 */
3887 error = zfs_link_create(dl, zp, tx, ZNEW);
3888 if (error != 0) {
3889 zfs_znode_delete(zp, tx);
3890 remove_inode_hash(ZTOI(zp));
3891 } else {
3892 if (flags & FIGNORECASE)
3893 txtype |= TX_CI;
3894 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3895
3896 zfs_inode_update(dzp);
3897 zfs_inode_update(zp);
3898 }
3899
3900 zfs_acl_ids_free(&acl_ids);
3901
3902 dmu_tx_commit(tx);
3903
3904 zfs_dirent_unlock(dl);
3905
3906 if (error == 0) {
3907 *ipp = ZTOI(zp);
3908
3909 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3910 zil_commit(zilog, 0);
3911 } else {
3912 iput(ZTOI(zp));
3913 }
3914
3915 ZFS_EXIT(zfsvfs);
3916 return (error);
3917 }
3918
3919 /*
3920 * Return, in the buffer contained in the provided uio structure,
3921 * the symbolic path referred to by ip.
3922 *
3923 * IN: ip - inode of symbolic link
3924 * uio - structure to contain the link path.
3925 * cr - credentials of caller.
3926 *
3927 * RETURN: 0 if success
3928 * error code if failure
3929 *
3930 * Timestamps:
3931 * ip - atime updated
3932 */
3933 /* ARGSUSED */
3934 int
3935 zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr)
3936 {
3937 znode_t *zp = ITOZ(ip);
3938 zfsvfs_t *zfsvfs = ITOZSB(ip);
3939 int error;
3940
3941 ZFS_ENTER(zfsvfs);
3942 ZFS_VERIFY_ZP(zp);
3943
3944 mutex_enter(&zp->z_lock);
3945 if (zp->z_is_sa)
3946 error = sa_lookup_uio(zp->z_sa_hdl,
3947 SA_ZPL_SYMLINK(zfsvfs), uio);
3948 else
3949 error = zfs_sa_readlink(zp, uio);
3950 mutex_exit(&zp->z_lock);
3951
3952 ZFS_EXIT(zfsvfs);
3953 return (error);
3954 }
3955
3956 /*
3957 * Insert a new entry into directory tdip referencing sip.
3958 *
3959 * IN: tdip - Directory to contain new entry.
3960 * sip - inode of new entry.
3961 * name - name of new entry.
3962 * cr - credentials of caller.
3963 *
3964 * RETURN: 0 if success
3965 * error code if failure
3966 *
3967 * Timestamps:
3968 * tdip - ctime|mtime updated
3969 * sip - ctime updated
3970 */
3971 /* ARGSUSED */
3972 int
3973 zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr,
3974 int flags)
3975 {
3976 znode_t *dzp = ITOZ(tdip);
3977 znode_t *tzp, *szp;
3978 zfsvfs_t *zfsvfs = ITOZSB(tdip);
3979 zilog_t *zilog;
3980 zfs_dirlock_t *dl;
3981 dmu_tx_t *tx;
3982 int error;
3983 int zf = ZNEW;
3984 uint64_t parent;
3985 uid_t owner;
3986 boolean_t waited = B_FALSE;
3987 boolean_t is_tmpfile = 0;
3988 uint64_t txg;
3989 #ifdef HAVE_TMPFILE
3990 is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE));
3991 #endif
3992 ASSERT(S_ISDIR(tdip->i_mode));
3993
3994 if (name == NULL)
3995 return (SET_ERROR(EINVAL));
3996
3997 ZFS_ENTER(zfsvfs);
3998 ZFS_VERIFY_ZP(dzp);
3999 zilog = zfsvfs->z_log;
4000
4001 /*
4002 * POSIX dictates that we return EPERM here.
4003 * Better choices include ENOTSUP or EISDIR.
4004 */
4005 if (S_ISDIR(sip->i_mode)) {
4006 ZFS_EXIT(zfsvfs);
4007 return (SET_ERROR(EPERM));
4008 }
4009
4010 szp = ITOZ(sip);
4011 ZFS_VERIFY_ZP(szp);
4012
4013 /*
4014 * We check i_sb because snapshots and the ctldir must have different
4015 * super blocks.
4016 */
4017 if (sip->i_sb != tdip->i_sb || zfsctl_is_node(sip)) {
4018 ZFS_EXIT(zfsvfs);
4019 return (SET_ERROR(EXDEV));
4020 }
4021
4022 /* Prevent links to .zfs/shares files */
4023
4024 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
4025 &parent, sizeof (uint64_t))) != 0) {
4026 ZFS_EXIT(zfsvfs);
4027 return (error);
4028 }
4029 if (parent == zfsvfs->z_shares_dir) {
4030 ZFS_EXIT(zfsvfs);
4031 return (SET_ERROR(EPERM));
4032 }
4033
4034 if (zfsvfs->z_utf8 && u8_validate(name,
4035 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4036 ZFS_EXIT(zfsvfs);
4037 return (SET_ERROR(EILSEQ));
4038 }
4039 if (flags & FIGNORECASE)
4040 zf |= ZCILOOK;
4041
4042 /*
4043 * We do not support links between attributes and non-attributes
4044 * because of the potential security risk of creating links
4045 * into "normal" file space in order to circumvent restrictions
4046 * imposed in attribute space.
4047 */
4048 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
4049 ZFS_EXIT(zfsvfs);
4050 return (SET_ERROR(EINVAL));
4051 }
4052
4053 owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid),
4054 cr, ZFS_OWNER);
4055 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
4056 ZFS_EXIT(zfsvfs);
4057 return (SET_ERROR(EPERM));
4058 }
4059
4060 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
4061 ZFS_EXIT(zfsvfs);
4062 return (error);
4063 }
4064
4065 top:
4066 /*
4067 * Attempt to lock directory; fail if entry already exists.
4068 */
4069 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
4070 if (error) {
4071 ZFS_EXIT(zfsvfs);
4072 return (error);
4073 }
4074
4075 tx = dmu_tx_create(zfsvfs->z_os);
4076 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
4077 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4078 if (is_tmpfile)
4079 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
4080
4081 zfs_sa_upgrade_txholds(tx, szp);
4082 zfs_sa_upgrade_txholds(tx, dzp);
4083 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
4084 if (error) {
4085 zfs_dirent_unlock(dl);
4086 if (error == ERESTART) {
4087 waited = B_TRUE;
4088 dmu_tx_wait(tx);
4089 dmu_tx_abort(tx);
4090 goto top;
4091 }
4092 dmu_tx_abort(tx);
4093 ZFS_EXIT(zfsvfs);
4094 return (error);
4095 }
4096 /* unmark z_unlinked so zfs_link_create will not reject */
4097 if (is_tmpfile)
4098 szp->z_unlinked = 0;
4099 error = zfs_link_create(dl, szp, tx, 0);
4100
4101 if (error == 0) {
4102 uint64_t txtype = TX_LINK;
4103 /*
4104 * tmpfile is created to be in z_unlinkedobj, so remove it.
4105 * Also, we don't log in ZIL, be cause all previous file
4106 * operation on the tmpfile are ignored by ZIL. Instead we
4107 * always wait for txg to sync to make sure all previous
4108 * operation are sync safe.
4109 */
4110 if (is_tmpfile) {
4111 VERIFY(zap_remove_int(zfsvfs->z_os,
4112 zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
4113 } else {
4114 if (flags & FIGNORECASE)
4115 txtype |= TX_CI;
4116 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4117 }
4118 } else if (is_tmpfile) {
4119 /* restore z_unlinked since when linking failed */
4120 szp->z_unlinked = 1;
4121 }
4122 txg = dmu_tx_get_txg(tx);
4123 dmu_tx_commit(tx);
4124
4125 zfs_dirent_unlock(dl);
4126
4127 if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4128 zil_commit(zilog, 0);
4129
4130 if (is_tmpfile)
4131 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg);
4132
4133 zfs_inode_update(dzp);
4134 zfs_inode_update(szp);
4135 ZFS_EXIT(zfsvfs);
4136 return (error);
4137 }
4138
4139 static void
4140 zfs_putpage_commit_cb(void *arg)
4141 {
4142 struct page *pp = arg;
4143
4144 ClearPageError(pp);
4145 end_page_writeback(pp);
4146 }
4147
4148 /*
4149 * Push a page out to disk, once the page is on stable storage the
4150 * registered commit callback will be run as notification of completion.
4151 *
4152 * IN: ip - page mapped for inode.
4153 * pp - page to push (page is locked)
4154 * wbc - writeback control data
4155 *
4156 * RETURN: 0 if success
4157 * error code if failure
4158 *
4159 * Timestamps:
4160 * ip - ctime|mtime updated
4161 */
4162 /* ARGSUSED */
4163 int
4164 zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
4165 {
4166 znode_t *zp = ITOZ(ip);
4167 zfsvfs_t *zfsvfs = ITOZSB(ip);
4168 loff_t offset;
4169 loff_t pgoff;
4170 unsigned int pglen;
4171 rl_t *rl;
4172 dmu_tx_t *tx;
4173 caddr_t va;
4174 int err = 0;
4175 uint64_t mtime[2], ctime[2];
4176 sa_bulk_attr_t bulk[3];
4177 int cnt = 0;
4178 struct address_space *mapping;
4179
4180 ZFS_ENTER(zfsvfs);
4181 ZFS_VERIFY_ZP(zp);
4182
4183 ASSERT(PageLocked(pp));
4184
4185 pgoff = page_offset(pp); /* Page byte-offset in file */
4186 offset = i_size_read(ip); /* File length in bytes */
4187 pglen = MIN(PAGE_SIZE, /* Page length in bytes */
4188 P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
4189
4190 /* Page is beyond end of file */
4191 if (pgoff >= offset) {
4192 unlock_page(pp);
4193 ZFS_EXIT(zfsvfs);
4194 return (0);
4195 }
4196
4197 /* Truncate page length to end of file */
4198 if (pgoff + pglen > offset)
4199 pglen = offset - pgoff;
4200
4201 #if 0
4202 /*
4203 * FIXME: Allow mmap writes past its quota. The correct fix
4204 * is to register a page_mkwrite() handler to count the page
4205 * against its quota when it is about to be dirtied.
4206 */
4207 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
4208 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
4209 err = EDQUOT;
4210 }
4211 #endif
4212
4213 /*
4214 * The ordering here is critical and must adhere to the following
4215 * rules in order to avoid deadlocking in either zfs_read() or
4216 * zfs_free_range() due to a lock inversion.
4217 *
4218 * 1) The page must be unlocked prior to acquiring the range lock.
4219 * This is critical because zfs_read() calls find_lock_page()
4220 * which may block on the page lock while holding the range lock.
4221 *
4222 * 2) Before setting or clearing write back on a page the range lock
4223 * must be held in order to prevent a lock inversion with the
4224 * zfs_free_range() function.
4225 *
4226 * This presents a problem because upon entering this function the
4227 * page lock is already held. To safely acquire the range lock the
4228 * page lock must be dropped. This creates a window where another
4229 * process could truncate, invalidate, dirty, or write out the page.
4230 *
4231 * Therefore, after successfully reacquiring the range and page locks
4232 * the current page state is checked. In the common case everything
4233 * will be as is expected and it can be written out. However, if
4234 * the page state has changed it must be handled accordingly.
4235 */
4236 mapping = pp->mapping;
4237 redirty_page_for_writepage(wbc, pp);
4238 unlock_page(pp);
4239
4240 rl = zfs_range_lock(&zp->z_range_lock, pgoff, pglen, RL_WRITER);
4241 lock_page(pp);
4242
4243 /* Page mapping changed or it was no longer dirty, we're done */
4244 if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
4245 unlock_page(pp);
4246 zfs_range_unlock(rl);
4247 ZFS_EXIT(zfsvfs);
4248 return (0);
4249 }
4250
4251 /* Another process started write block if required */
4252 if (PageWriteback(pp)) {
4253 unlock_page(pp);
4254 zfs_range_unlock(rl);
4255
4256 if (wbc->sync_mode != WB_SYNC_NONE)
4257 wait_on_page_writeback(pp);
4258
4259 ZFS_EXIT(zfsvfs);
4260 return (0);
4261 }
4262
4263 /* Clear the dirty flag the required locks are held */
4264 if (!clear_page_dirty_for_io(pp)) {
4265 unlock_page(pp);
4266 zfs_range_unlock(rl);
4267 ZFS_EXIT(zfsvfs);
4268 return (0);
4269 }
4270
4271 /*
4272 * Counterpart for redirty_page_for_writepage() above. This page
4273 * was in fact not skipped and should not be counted as if it were.
4274 */
4275 wbc->pages_skipped--;
4276 set_page_writeback(pp);
4277 unlock_page(pp);
4278
4279 tx = dmu_tx_create(zfsvfs->z_os);
4280 dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
4281 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4282 zfs_sa_upgrade_txholds(tx, zp);
4283
4284 err = dmu_tx_assign(tx, TXG_NOWAIT);
4285 if (err != 0) {
4286 if (err == ERESTART)
4287 dmu_tx_wait(tx);
4288
4289 dmu_tx_abort(tx);
4290 __set_page_dirty_nobuffers(pp);
4291 ClearPageError(pp);
4292 end_page_writeback(pp);
4293 zfs_range_unlock(rl);
4294 ZFS_EXIT(zfsvfs);
4295 return (err);
4296 }
4297
4298 va = kmap(pp);
4299 ASSERT3U(pglen, <=, PAGE_SIZE);
4300 dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx);
4301 kunmap(pp);
4302
4303 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4304 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
4305 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL,
4306 &zp->z_pflags, 8);
4307
4308 /* Preserve the mtime and ctime provided by the inode */
4309 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4310 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
4311 zp->z_atime_dirty = 0;
4312 zp->z_seq++;
4313
4314 err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4315
4316 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
4317 zfs_putpage_commit_cb, pp);
4318 dmu_tx_commit(tx);
4319
4320 zfs_range_unlock(rl);
4321
4322 if (wbc->sync_mode != WB_SYNC_NONE) {
4323 /*
4324 * Note that this is rarely called under writepages(), because
4325 * writepages() normally handles the entire commit for
4326 * performance reasons.
4327 */
4328 zil_commit(zfsvfs->z_log, zp->z_id);
4329 }
4330
4331 ZFS_EXIT(zfsvfs);
4332 return (err);
4333 }
4334
4335 /*
4336 * Update the system attributes when the inode has been dirtied. For the
4337 * moment we only update the mode, atime, mtime, and ctime.
4338 */
4339 int
4340 zfs_dirty_inode(struct inode *ip, int flags)
4341 {
4342 znode_t *zp = ITOZ(ip);
4343 zfsvfs_t *zfsvfs = ITOZSB(ip);
4344 dmu_tx_t *tx;
4345 uint64_t mode, atime[2], mtime[2], ctime[2];
4346 sa_bulk_attr_t bulk[4];
4347 int error = 0;
4348 int cnt = 0;
4349
4350 if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
4351 return (0);
4352
4353 ZFS_ENTER(zfsvfs);
4354 ZFS_VERIFY_ZP(zp);
4355
4356 #ifdef I_DIRTY_TIME
4357 /*
4358 * This is the lazytime semantic indroduced in Linux 4.0
4359 * This flag will only be called from update_time when lazytime is set.
4360 * (Note, I_DIRTY_SYNC will also set if not lazytime)
4361 * Fortunately mtime and ctime are managed within ZFS itself, so we
4362 * only need to dirty atime.
4363 */
4364 if (flags == I_DIRTY_TIME) {
4365 zp->z_atime_dirty = 1;
4366 goto out;
4367 }
4368 #endif
4369
4370 tx = dmu_tx_create(zfsvfs->z_os);
4371
4372 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4373 zfs_sa_upgrade_txholds(tx, zp);
4374
4375 error = dmu_tx_assign(tx, TXG_WAIT);
4376 if (error) {
4377 dmu_tx_abort(tx);
4378 goto out;
4379 }
4380
4381 mutex_enter(&zp->z_lock);
4382 zp->z_atime_dirty = 0;
4383
4384 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
4385 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
4386 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4387 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
4388
4389 /* Preserve the mode, mtime and ctime provided by the inode */
4390 ZFS_TIME_ENCODE(&ip->i_atime, atime);
4391 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4392 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
4393 mode = ip->i_mode;
4394
4395 zp->z_mode = mode;
4396
4397 error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4398 mutex_exit(&zp->z_lock);
4399
4400 dmu_tx_commit(tx);
4401 out:
4402 ZFS_EXIT(zfsvfs);
4403 return (error);
4404 }
4405
4406 /*ARGSUSED*/
4407 void
4408 zfs_inactive(struct inode *ip)
4409 {
4410 znode_t *zp = ITOZ(ip);
4411 zfsvfs_t *zfsvfs = ITOZSB(ip);
4412 uint64_t atime[2];
4413 int error;
4414 int need_unlock = 0;
4415
4416 /* Only read lock if we haven't already write locked, e.g. rollback */
4417 if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) {
4418 need_unlock = 1;
4419 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4420 }
4421 if (zp->z_sa_hdl == NULL) {
4422 if (need_unlock)
4423 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4424 return;
4425 }
4426
4427 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4428 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4429
4430 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4431 zfs_sa_upgrade_txholds(tx, zp);
4432 error = dmu_tx_assign(tx, TXG_WAIT);
4433 if (error) {
4434 dmu_tx_abort(tx);
4435 } else {
4436 ZFS_TIME_ENCODE(&ip->i_atime, atime);
4437 mutex_enter(&zp->z_lock);
4438 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4439 (void *)&atime, sizeof (atime), tx);
4440 zp->z_atime_dirty = 0;
4441 mutex_exit(&zp->z_lock);
4442 dmu_tx_commit(tx);
4443 }
4444 }
4445
4446 zfs_zinactive(zp);
4447 if (need_unlock)
4448 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4449 }
4450
4451 /*
4452 * Bounds-check the seek operation.
4453 *
4454 * IN: ip - inode seeking within
4455 * ooff - old file offset
4456 * noffp - pointer to new file offset
4457 * ct - caller context
4458 *
4459 * RETURN: 0 if success
4460 * EINVAL if new offset invalid
4461 */
4462 /* ARGSUSED */
4463 int
4464 zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp)
4465 {
4466 if (S_ISDIR(ip->i_mode))
4467 return (0);
4468 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4469 }
4470
4471 /*
4472 * Fill pages with data from the disk.
4473 */
4474 static int
4475 zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
4476 {
4477 znode_t *zp = ITOZ(ip);
4478 zfsvfs_t *zfsvfs = ITOZSB(ip);
4479 objset_t *os;
4480 struct page *cur_pp;
4481 u_offset_t io_off, total;
4482 size_t io_len;
4483 loff_t i_size;
4484 unsigned page_idx;
4485 int err;
4486
4487 os = zfsvfs->z_os;
4488 io_len = nr_pages << PAGE_SHIFT;
4489 i_size = i_size_read(ip);
4490 io_off = page_offset(pl[0]);
4491
4492 if (io_off + io_len > i_size)
4493 io_len = i_size - io_off;
4494
4495 /*
4496 * Iterate over list of pages and read each page individually.
4497 */
4498 page_idx = 0;
4499 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4500 caddr_t va;
4501
4502 cur_pp = pl[page_idx++];
4503 va = kmap(cur_pp);
4504 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4505 DMU_READ_PREFETCH);
4506 kunmap(cur_pp);
4507 if (err) {
4508 /* convert checksum errors into IO errors */
4509 if (err == ECKSUM)
4510 err = SET_ERROR(EIO);
4511 return (err);
4512 }
4513 }
4514
4515 return (0);
4516 }
4517
4518 /*
4519 * Uses zfs_fillpage to read data from the file and fill the pages.
4520 *
4521 * IN: ip - inode of file to get data from.
4522 * pl - list of pages to read
4523 * nr_pages - number of pages to read
4524 *
4525 * RETURN: 0 on success, error code on failure.
4526 *
4527 * Timestamps:
4528 * vp - atime updated
4529 */
4530 /* ARGSUSED */
4531 int
4532 zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages)
4533 {
4534 znode_t *zp = ITOZ(ip);
4535 zfsvfs_t *zfsvfs = ITOZSB(ip);
4536 int err;
4537
4538 if (pl == NULL)
4539 return (0);
4540
4541 ZFS_ENTER(zfsvfs);
4542 ZFS_VERIFY_ZP(zp);
4543
4544 err = zfs_fillpage(ip, pl, nr_pages);
4545
4546 ZFS_EXIT(zfsvfs);
4547 return (err);
4548 }
4549
4550 /*
4551 * Check ZFS specific permissions to memory map a section of a file.
4552 *
4553 * IN: ip - inode of the file to mmap
4554 * off - file offset
4555 * addrp - start address in memory region
4556 * len - length of memory region
4557 * vm_flags- address flags
4558 *
4559 * RETURN: 0 if success
4560 * error code if failure
4561 */
4562 /*ARGSUSED*/
4563 int
4564 zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
4565 unsigned long vm_flags)
4566 {
4567 znode_t *zp = ITOZ(ip);
4568 zfsvfs_t *zfsvfs = ITOZSB(ip);
4569
4570 ZFS_ENTER(zfsvfs);
4571 ZFS_VERIFY_ZP(zp);
4572
4573 if ((vm_flags & VM_WRITE) && (zp->z_pflags &
4574 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4575 ZFS_EXIT(zfsvfs);
4576 return (SET_ERROR(EPERM));
4577 }
4578
4579 if ((vm_flags & (VM_READ | VM_EXEC)) &&
4580 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4581 ZFS_EXIT(zfsvfs);
4582 return (SET_ERROR(EACCES));
4583 }
4584
4585 if (off < 0 || len > MAXOFFSET_T - off) {
4586 ZFS_EXIT(zfsvfs);
4587 return (SET_ERROR(ENXIO));
4588 }
4589
4590 ZFS_EXIT(zfsvfs);
4591 return (0);
4592 }
4593
4594 /*
4595 * convoff - converts the given data (start, whence) to the
4596 * given whence.
4597 */
4598 int
4599 convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
4600 {
4601 vattr_t vap;
4602 int error;
4603
4604 if ((lckdat->l_whence == 2) || (whence == 2)) {
4605 if ((error = zfs_getattr(ip, &vap, 0, CRED())))
4606 return (error);
4607 }
4608
4609 switch (lckdat->l_whence) {
4610 case 1:
4611 lckdat->l_start += offset;
4612 break;
4613 case 2:
4614 lckdat->l_start += vap.va_size;
4615 /* FALLTHRU */
4616 case 0:
4617 break;
4618 default:
4619 return (SET_ERROR(EINVAL));
4620 }
4621
4622 if (lckdat->l_start < 0)
4623 return (SET_ERROR(EINVAL));
4624
4625 switch (whence) {
4626 case 1:
4627 lckdat->l_start -= offset;
4628 break;
4629 case 2:
4630 lckdat->l_start -= vap.va_size;
4631 /* FALLTHRU */
4632 case 0:
4633 break;
4634 default:
4635 return (SET_ERROR(EINVAL));
4636 }
4637
4638 lckdat->l_whence = (short)whence;
4639 return (0);
4640 }
4641
4642 /*
4643 * Free or allocate space in a file. Currently, this function only
4644 * supports the `F_FREESP' command. However, this command is somewhat
4645 * misnamed, as its functionality includes the ability to allocate as
4646 * well as free space.
4647 *
4648 * IN: ip - inode of file to free data in.
4649 * cmd - action to take (only F_FREESP supported).
4650 * bfp - section of file to free/alloc.
4651 * flag - current file open mode flags.
4652 * offset - current file offset.
4653 * cr - credentials of caller [UNUSED].
4654 *
4655 * RETURN: 0 on success, error code on failure.
4656 *
4657 * Timestamps:
4658 * ip - ctime|mtime updated
4659 */
4660 /* ARGSUSED */
4661 int
4662 zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
4663 offset_t offset, cred_t *cr)
4664 {
4665 znode_t *zp = ITOZ(ip);
4666 zfsvfs_t *zfsvfs = ITOZSB(ip);
4667 uint64_t off, len;
4668 int error;
4669
4670 ZFS_ENTER(zfsvfs);
4671 ZFS_VERIFY_ZP(zp);
4672
4673 if (cmd != F_FREESP) {
4674 ZFS_EXIT(zfsvfs);
4675 return (SET_ERROR(EINVAL));
4676 }
4677
4678 /*
4679 * Callers might not be able to detect properly that we are read-only,
4680 * so check it explicitly here.
4681 */
4682 if (zfs_is_readonly(zfsvfs)) {
4683 ZFS_EXIT(zfsvfs);
4684 return (SET_ERROR(EROFS));
4685 }
4686
4687 if ((error = convoff(ip, bfp, 0, offset))) {
4688 ZFS_EXIT(zfsvfs);
4689 return (error);
4690 }
4691
4692 if (bfp->l_len < 0) {
4693 ZFS_EXIT(zfsvfs);
4694 return (SET_ERROR(EINVAL));
4695 }
4696
4697 /*
4698 * Permissions aren't checked on Solaris because on this OS
4699 * zfs_space() can only be called with an opened file handle.
4700 * On Linux we can get here through truncate_range() which
4701 * operates directly on inodes, so we need to check access rights.
4702 */
4703 if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
4704 ZFS_EXIT(zfsvfs);
4705 return (error);
4706 }
4707
4708 off = bfp->l_start;
4709 len = bfp->l_len; /* 0 means from off to end of file */
4710
4711 error = zfs_freesp(zp, off, len, flag, TRUE);
4712
4713 ZFS_EXIT(zfsvfs);
4714 return (error);
4715 }
4716
4717 /*ARGSUSED*/
4718 int
4719 zfs_fid(struct inode *ip, fid_t *fidp)
4720 {
4721 znode_t *zp = ITOZ(ip);
4722 zfsvfs_t *zfsvfs = ITOZSB(ip);
4723 uint32_t gen;
4724 uint64_t gen64;
4725 uint64_t object = zp->z_id;
4726 zfid_short_t *zfid;
4727 int size, i, error;
4728
4729 ZFS_ENTER(zfsvfs);
4730 ZFS_VERIFY_ZP(zp);
4731
4732 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
4733 &gen64, sizeof (uint64_t))) != 0) {
4734 ZFS_EXIT(zfsvfs);
4735 return (error);
4736 }
4737
4738 gen = (uint32_t)gen64;
4739
4740 size = SHORT_FID_LEN;
4741
4742 zfid = (zfid_short_t *)fidp;
4743
4744 zfid->zf_len = size;
4745
4746 for (i = 0; i < sizeof (zfid->zf_object); i++)
4747 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4748
4749 /* Must have a non-zero generation number to distinguish from .zfs */
4750 if (gen == 0)
4751 gen = 1;
4752 for (i = 0; i < sizeof (zfid->zf_gen); i++)
4753 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4754
4755 ZFS_EXIT(zfsvfs);
4756 return (0);
4757 }
4758
4759 /*ARGSUSED*/
4760 int
4761 zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
4762 {
4763 znode_t *zp = ITOZ(ip);
4764 zfsvfs_t *zfsvfs = ITOZSB(ip);
4765 int error;
4766 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4767
4768 ZFS_ENTER(zfsvfs);
4769 ZFS_VERIFY_ZP(zp);
4770 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
4771 ZFS_EXIT(zfsvfs);
4772
4773 return (error);
4774 }
4775
4776 /*ARGSUSED*/
4777 int
4778 zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
4779 {
4780 znode_t *zp = ITOZ(ip);
4781 zfsvfs_t *zfsvfs = ITOZSB(ip);
4782 int error;
4783 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4784 zilog_t *zilog = zfsvfs->z_log;
4785
4786 ZFS_ENTER(zfsvfs);
4787 ZFS_VERIFY_ZP(zp);
4788
4789 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
4790
4791 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4792 zil_commit(zilog, 0);
4793
4794 ZFS_EXIT(zfsvfs);
4795 return (error);
4796 }
4797
4798 #ifdef HAVE_UIO_ZEROCOPY
4799 /*
4800 * Tunable, both must be a power of 2.
4801 *
4802 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
4803 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
4804 * an arcbuf for a partial block read
4805 */
4806 int zcr_blksz_min = (1 << 10); /* 1K */
4807 int zcr_blksz_max = (1 << 17); /* 128K */
4808
4809 /*ARGSUSED*/
4810 static int
4811 zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
4812 {
4813 znode_t *zp = ITOZ(ip);
4814 zfsvfs_t *zfsvfs = ITOZSB(ip);
4815 int max_blksz = zfsvfs->z_max_blksz;
4816 uio_t *uio = &xuio->xu_uio;
4817 ssize_t size = uio->uio_resid;
4818 offset_t offset = uio->uio_loffset;
4819 int blksz;
4820 int fullblk, i;
4821 arc_buf_t *abuf;
4822 ssize_t maxsize;
4823 int preamble, postamble;
4824
4825 if (xuio->xu_type != UIOTYPE_ZEROCOPY)
4826 return (SET_ERROR(EINVAL));
4827
4828 ZFS_ENTER(zfsvfs);
4829 ZFS_VERIFY_ZP(zp);
4830 switch (ioflag) {
4831 case UIO_WRITE:
4832 /*
4833 * Loan out an arc_buf for write if write size is bigger than
4834 * max_blksz, and the file's block size is also max_blksz.
4835 */
4836 blksz = max_blksz;
4837 if (size < blksz || zp->z_blksz != blksz) {
4838 ZFS_EXIT(zfsvfs);
4839 return (SET_ERROR(EINVAL));
4840 }
4841 /*
4842 * Caller requests buffers for write before knowing where the
4843 * write offset might be (e.g. NFS TCP write).
4844 */
4845 if (offset == -1) {
4846 preamble = 0;
4847 } else {
4848 preamble = P2PHASE(offset, blksz);
4849 if (preamble) {
4850 preamble = blksz - preamble;
4851 size -= preamble;
4852 }
4853 }
4854
4855 postamble = P2PHASE(size, blksz);
4856 size -= postamble;
4857
4858 fullblk = size / blksz;
4859 (void) dmu_xuio_init(xuio,
4860 (preamble != 0) + fullblk + (postamble != 0));
4861
4862 /*
4863 * Have to fix iov base/len for partial buffers. They
4864 * currently represent full arc_buf's.
4865 */
4866 if (preamble) {
4867 /* data begins in the middle of the arc_buf */
4868 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4869 blksz);
4870 ASSERT(abuf);
4871 (void) dmu_xuio_add(xuio, abuf,
4872 blksz - preamble, preamble);
4873 }
4874
4875 for (i = 0; i < fullblk; i++) {
4876 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4877 blksz);
4878 ASSERT(abuf);
4879 (void) dmu_xuio_add(xuio, abuf, 0, blksz);
4880 }
4881
4882 if (postamble) {
4883 /* data ends in the middle of the arc_buf */
4884 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4885 blksz);
4886 ASSERT(abuf);
4887 (void) dmu_xuio_add(xuio, abuf, 0, postamble);
4888 }
4889 break;
4890 case UIO_READ:
4891 /*
4892 * Loan out an arc_buf for read if the read size is larger than
4893 * the current file block size. Block alignment is not
4894 * considered. Partial arc_buf will be loaned out for read.
4895 */
4896 blksz = zp->z_blksz;
4897 if (blksz < zcr_blksz_min)
4898 blksz = zcr_blksz_min;
4899 if (blksz > zcr_blksz_max)
4900 blksz = zcr_blksz_max;
4901 /* avoid potential complexity of dealing with it */
4902 if (blksz > max_blksz) {
4903 ZFS_EXIT(zfsvfs);
4904 return (SET_ERROR(EINVAL));
4905 }
4906
4907 maxsize = zp->z_size - uio->uio_loffset;
4908 if (size > maxsize)
4909 size = maxsize;
4910
4911 if (size < blksz) {
4912 ZFS_EXIT(zfsvfs);
4913 return (SET_ERROR(EINVAL));
4914 }
4915 break;
4916 default:
4917 ZFS_EXIT(zfsvfs);
4918 return (SET_ERROR(EINVAL));
4919 }
4920
4921 uio->uio_extflg = UIO_XUIO;
4922 XUIO_XUZC_RW(xuio) = ioflag;
4923 ZFS_EXIT(zfsvfs);
4924 return (0);
4925 }
4926
4927 /*ARGSUSED*/
4928 static int
4929 zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
4930 {
4931 int i;
4932 arc_buf_t *abuf;
4933 int ioflag = XUIO_XUZC_RW(xuio);
4934
4935 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
4936
4937 i = dmu_xuio_cnt(xuio);
4938 while (i-- > 0) {
4939 abuf = dmu_xuio_arcbuf(xuio, i);
4940 /*
4941 * if abuf == NULL, it must be a write buffer
4942 * that has been returned in zfs_write().
4943 */
4944 if (abuf)
4945 dmu_return_arcbuf(abuf);
4946 ASSERT(abuf || ioflag == UIO_WRITE);
4947 }
4948
4949 dmu_xuio_fini(xuio);
4950 return (0);
4951 }
4952 #endif /* HAVE_UIO_ZEROCOPY */
4953
4954 #if defined(_KERNEL) && defined(HAVE_SPL)
4955 EXPORT_SYMBOL(zfs_open);
4956 EXPORT_SYMBOL(zfs_close);
4957 EXPORT_SYMBOL(zfs_read);
4958 EXPORT_SYMBOL(zfs_write);
4959 EXPORT_SYMBOL(zfs_access);
4960 EXPORT_SYMBOL(zfs_lookup);
4961 EXPORT_SYMBOL(zfs_create);
4962 EXPORT_SYMBOL(zfs_tmpfile);
4963 EXPORT_SYMBOL(zfs_remove);
4964 EXPORT_SYMBOL(zfs_mkdir);
4965 EXPORT_SYMBOL(zfs_rmdir);
4966 EXPORT_SYMBOL(zfs_readdir);
4967 EXPORT_SYMBOL(zfs_fsync);
4968 EXPORT_SYMBOL(zfs_getattr);
4969 EXPORT_SYMBOL(zfs_getattr_fast);
4970 EXPORT_SYMBOL(zfs_setattr);
4971 EXPORT_SYMBOL(zfs_rename);
4972 EXPORT_SYMBOL(zfs_symlink);
4973 EXPORT_SYMBOL(zfs_readlink);
4974 EXPORT_SYMBOL(zfs_link);
4975 EXPORT_SYMBOL(zfs_inactive);
4976 EXPORT_SYMBOL(zfs_space);
4977 EXPORT_SYMBOL(zfs_fid);
4978 EXPORT_SYMBOL(zfs_getsecattr);
4979 EXPORT_SYMBOL(zfs_setsecattr);
4980 EXPORT_SYMBOL(zfs_getpage);
4981 EXPORT_SYMBOL(zfs_putpage);
4982 EXPORT_SYMBOL(zfs_dirty_inode);
4983 EXPORT_SYMBOL(zfs_map);
4984
4985 /* CSTYLED */
4986 module_param(zfs_delete_blocks, ulong, 0644);
4987 MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
4988 module_param(zfs_read_chunk_size, long, 0644);
4989 MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");
4990 #endif