]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zfs_vnops.c
Update build system and packaging
[mirror_zfs.git] / module / zfs / zfs_vnops.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc.
27 */
28
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
31
32
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/time.h>
36 #include <sys/sysmacros.h>
37 #include <sys/vfs.h>
38 #include <sys/file.h>
39 #include <sys/stat.h>
40 #include <sys/kmem.h>
41 #include <sys/taskq.h>
42 #include <sys/uio.h>
43 #include <sys/vmsystm.h>
44 #include <sys/atomic.h>
45 #include <sys/pathname.h>
46 #include <sys/cmn_err.h>
47 #include <sys/errno.h>
48 #include <sys/zfs_dir.h>
49 #include <sys/zfs_acl.h>
50 #include <sys/zfs_ioctl.h>
51 #include <sys/fs/zfs.h>
52 #include <sys/dmu.h>
53 #include <sys/dmu_objset.h>
54 #include <sys/spa.h>
55 #include <sys/txg.h>
56 #include <sys/dbuf.h>
57 #include <sys/zap.h>
58 #include <sys/sa.h>
59 #include <sys/policy.h>
60 #include <sys/sunddi.h>
61 #include <sys/sid.h>
62 #include <sys/mode.h>
63 #include <sys/zfs_ctldir.h>
64 #include <sys/zfs_fuid.h>
65 #include <sys/zfs_sa.h>
66 #include <sys/zfs_vnops.h>
67 #include <sys/zfs_rlock.h>
68 #include <sys/cred.h>
69 #include <sys/zpl.h>
70 #include <sys/zil.h>
71 #include <sys/sa_impl.h>
72
73 /*
74 * Programming rules.
75 *
76 * Each vnode op performs some logical unit of work. To do this, the ZPL must
77 * properly lock its in-core state, create a DMU transaction, do the work,
78 * record this work in the intent log (ZIL), commit the DMU transaction,
79 * and wait for the intent log to commit if it is a synchronous operation.
80 * Moreover, the vnode ops must work in both normal and log replay context.
81 * The ordering of events is important to avoid deadlocks and references
82 * to freed memory. The example below illustrates the following Big Rules:
83 *
84 * (1) A check must be made in each zfs thread for a mounted file system.
85 * This is done avoiding races using ZFS_ENTER(zfsvfs).
86 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
87 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
88 * can return EIO from the calling function.
89 *
90 * (2) iput() should always be the last thing except for zil_commit()
91 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
92 * First, if it's the last reference, the vnode/znode
93 * can be freed, so the zp may point to freed memory. Second, the last
94 * reference will call zfs_zinactive(), which may induce a lot of work --
95 * pushing cached pages (which acquires range locks) and syncing out
96 * cached atime changes. Third, zfs_zinactive() may require a new tx,
97 * which could deadlock the system if you were already holding one.
98 * If you must call iput() within a tx then use zfs_iput_async().
99 *
100 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
101 * as they can span dmu_tx_assign() calls.
102 *
103 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
104 * dmu_tx_assign(). This is critical because we don't want to block
105 * while holding locks.
106 *
107 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
108 * reduces lock contention and CPU usage when we must wait (note that if
109 * throughput is constrained by the storage, nearly every transaction
110 * must wait).
111 *
112 * Note, in particular, that if a lock is sometimes acquired before
113 * the tx assigns, and sometimes after (e.g. z_lock), then failing
114 * to use a non-blocking assign can deadlock the system. The scenario:
115 *
116 * Thread A has grabbed a lock before calling dmu_tx_assign().
117 * Thread B is in an already-assigned tx, and blocks for this lock.
118 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
119 * forever, because the previous txg can't quiesce until B's tx commits.
120 *
121 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
122 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
123 * calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
124 * to indicate that this operation has already called dmu_tx_wait().
125 * This will ensure that we don't retry forever, waiting a short bit
126 * each time.
127 *
128 * (5) If the operation succeeded, generate the intent log entry for it
129 * before dropping locks. This ensures that the ordering of events
130 * in the intent log matches the order in which they actually occurred.
131 * During ZIL replay the zfs_log_* functions will update the sequence
132 * number to indicate the zil transaction has replayed.
133 *
134 * (6) At the end of each vnode op, the DMU tx must always commit,
135 * regardless of whether there were any errors.
136 *
137 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
138 * to ensure that synchronous semantics are provided when necessary.
139 *
140 * In general, this is how things should be ordered in each vnode op:
141 *
142 * ZFS_ENTER(zfsvfs); // exit if unmounted
143 * top:
144 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
145 * rw_enter(...); // grab any other locks you need
146 * tx = dmu_tx_create(...); // get DMU tx
147 * dmu_tx_hold_*(); // hold each object you might modify
148 * error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
149 * if (error) {
150 * rw_exit(...); // drop locks
151 * zfs_dirent_unlock(dl); // unlock directory entry
152 * iput(...); // release held vnodes
153 * if (error == ERESTART) {
154 * waited = B_TRUE;
155 * dmu_tx_wait(tx);
156 * dmu_tx_abort(tx);
157 * goto top;
158 * }
159 * dmu_tx_abort(tx); // abort DMU tx
160 * ZFS_EXIT(zfsvfs); // finished in zfs
161 * return (error); // really out of space
162 * }
163 * error = do_real_work(); // do whatever this VOP does
164 * if (error == 0)
165 * zfs_log_*(...); // on success, make ZIL entry
166 * dmu_tx_commit(tx); // commit DMU tx -- error or not
167 * rw_exit(...); // drop locks
168 * zfs_dirent_unlock(dl); // unlock directory entry
169 * iput(...); // release held vnodes
170 * zil_commit(zilog, foid); // synchronous when necessary
171 * ZFS_EXIT(zfsvfs); // finished in zfs
172 * return (error); // done, report error
173 */
174
175 /*
176 * Virus scanning is unsupported. It would be possible to add a hook
177 * here to performance the required virus scan. This could be done
178 * entirely in the kernel or potentially as an update to invoke a
179 * scanning utility.
180 */
181 static int
182 zfs_vscan(struct inode *ip, cred_t *cr, int async)
183 {
184 return (0);
185 }
186
187 /* ARGSUSED */
188 int
189 zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
190 {
191 znode_t *zp = ITOZ(ip);
192 zfsvfs_t *zfsvfs = ITOZSB(ip);
193
194 ZFS_ENTER(zfsvfs);
195 ZFS_VERIFY_ZP(zp);
196
197 /* Honor ZFS_APPENDONLY file attribute */
198 if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
199 ((flag & O_APPEND) == 0)) {
200 ZFS_EXIT(zfsvfs);
201 return (SET_ERROR(EPERM));
202 }
203
204 /* Virus scan eligible files on open */
205 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
206 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
207 if (zfs_vscan(ip, cr, 0) != 0) {
208 ZFS_EXIT(zfsvfs);
209 return (SET_ERROR(EACCES));
210 }
211 }
212
213 /* Keep a count of the synchronous opens in the znode */
214 if (flag & O_SYNC)
215 atomic_inc_32(&zp->z_sync_cnt);
216
217 ZFS_EXIT(zfsvfs);
218 return (0);
219 }
220
221 /* ARGSUSED */
222 int
223 zfs_close(struct inode *ip, int flag, cred_t *cr)
224 {
225 znode_t *zp = ITOZ(ip);
226 zfsvfs_t *zfsvfs = ITOZSB(ip);
227
228 ZFS_ENTER(zfsvfs);
229 ZFS_VERIFY_ZP(zp);
230
231 /* Decrement the synchronous opens in the znode */
232 if (flag & O_SYNC)
233 atomic_dec_32(&zp->z_sync_cnt);
234
235 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
236 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
237 VERIFY(zfs_vscan(ip, cr, 1) == 0);
238
239 ZFS_EXIT(zfsvfs);
240 return (0);
241 }
242
243 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
244 /*
245 * Lseek support for finding holes (cmd == SEEK_HOLE) and
246 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
247 */
248 static int
249 zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
250 {
251 znode_t *zp = ITOZ(ip);
252 uint64_t noff = (uint64_t)*off; /* new offset */
253 uint64_t file_sz;
254 int error;
255 boolean_t hole;
256
257 file_sz = zp->z_size;
258 if (noff >= file_sz) {
259 return (SET_ERROR(ENXIO));
260 }
261
262 if (cmd == SEEK_HOLE)
263 hole = B_TRUE;
264 else
265 hole = B_FALSE;
266
267 error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
268
269 if (error == ESRCH)
270 return (SET_ERROR(ENXIO));
271
272 /* file was dirty, so fall back to using generic logic */
273 if (error == EBUSY) {
274 if (hole)
275 *off = file_sz;
276
277 return (0);
278 }
279
280 /*
281 * We could find a hole that begins after the logical end-of-file,
282 * because dmu_offset_next() only works on whole blocks. If the
283 * EOF falls mid-block, then indicate that the "virtual hole"
284 * at the end of the file begins at the logical EOF, rather than
285 * at the end of the last block.
286 */
287 if (noff > file_sz) {
288 ASSERT(hole);
289 noff = file_sz;
290 }
291
292 if (noff < *off)
293 return (error);
294 *off = noff;
295 return (error);
296 }
297
298 int
299 zfs_holey(struct inode *ip, int cmd, loff_t *off)
300 {
301 znode_t *zp = ITOZ(ip);
302 zfsvfs_t *zfsvfs = ITOZSB(ip);
303 int error;
304
305 ZFS_ENTER(zfsvfs);
306 ZFS_VERIFY_ZP(zp);
307
308 error = zfs_holey_common(ip, cmd, off);
309
310 ZFS_EXIT(zfsvfs);
311 return (error);
312 }
313 #endif /* SEEK_HOLE && SEEK_DATA */
314
315 #if defined(_KERNEL)
316 /*
317 * When a file is memory mapped, we must keep the IO data synchronized
318 * between the DMU cache and the memory mapped pages. What this means:
319 *
320 * On Write: If we find a memory mapped page, we write to *both*
321 * the page and the dmu buffer.
322 */
323 static void
324 update_pages(struct inode *ip, int64_t start, int len,
325 objset_t *os, uint64_t oid)
326 {
327 struct address_space *mp = ip->i_mapping;
328 struct page *pp;
329 uint64_t nbytes;
330 int64_t off;
331 void *pb;
332
333 off = start & (PAGE_SIZE-1);
334 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
335 nbytes = MIN(PAGE_SIZE - off, len);
336
337 pp = find_lock_page(mp, start >> PAGE_SHIFT);
338 if (pp) {
339 if (mapping_writably_mapped(mp))
340 flush_dcache_page(pp);
341
342 pb = kmap(pp);
343 (void) dmu_read(os, oid, start+off, nbytes, pb+off,
344 DMU_READ_PREFETCH);
345 kunmap(pp);
346
347 if (mapping_writably_mapped(mp))
348 flush_dcache_page(pp);
349
350 mark_page_accessed(pp);
351 SetPageUptodate(pp);
352 ClearPageError(pp);
353 unlock_page(pp);
354 put_page(pp);
355 }
356
357 len -= nbytes;
358 off = 0;
359 }
360 }
361
362 /*
363 * When a file is memory mapped, we must keep the IO data synchronized
364 * between the DMU cache and the memory mapped pages. What this means:
365 *
366 * On Read: We "read" preferentially from memory mapped pages,
367 * else we default from the dmu buffer.
368 *
369 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
370 * the file is memory mapped.
371 */
372 static int
373 mappedread(struct inode *ip, int nbytes, uio_t *uio)
374 {
375 struct address_space *mp = ip->i_mapping;
376 struct page *pp;
377 znode_t *zp = ITOZ(ip);
378 int64_t start, off;
379 uint64_t bytes;
380 int len = nbytes;
381 int error = 0;
382 void *pb;
383
384 start = uio->uio_loffset;
385 off = start & (PAGE_SIZE-1);
386 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
387 bytes = MIN(PAGE_SIZE - off, len);
388
389 pp = find_lock_page(mp, start >> PAGE_SHIFT);
390 if (pp) {
391 ASSERT(PageUptodate(pp));
392 unlock_page(pp);
393
394 pb = kmap(pp);
395 error = uiomove(pb + off, bytes, UIO_READ, uio);
396 kunmap(pp);
397
398 if (mapping_writably_mapped(mp))
399 flush_dcache_page(pp);
400
401 mark_page_accessed(pp);
402 put_page(pp);
403 } else {
404 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
405 uio, bytes);
406 }
407
408 len -= bytes;
409 off = 0;
410 if (error)
411 break;
412 }
413 return (error);
414 }
415 #endif /* _KERNEL */
416
417 unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
418 unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
419
420 /*
421 * Read bytes from specified file into supplied buffer.
422 *
423 * IN: ip - inode of file to be read from.
424 * uio - structure supplying read location, range info,
425 * and return buffer.
426 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
427 * O_DIRECT flag; used to bypass page cache.
428 * cr - credentials of caller.
429 *
430 * OUT: uio - updated offset and range, buffer filled.
431 *
432 * RETURN: 0 on success, error code on failure.
433 *
434 * Side Effects:
435 * inode - atime updated if byte count > 0
436 */
437 /* ARGSUSED */
438 int
439 zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
440 {
441 znode_t *zp = ITOZ(ip);
442 zfsvfs_t *zfsvfs = ITOZSB(ip);
443 ssize_t n, nbytes;
444 int error = 0;
445 rl_t *rl;
446 #ifdef HAVE_UIO_ZEROCOPY
447 xuio_t *xuio = NULL;
448 #endif /* HAVE_UIO_ZEROCOPY */
449
450 ZFS_ENTER(zfsvfs);
451 ZFS_VERIFY_ZP(zp);
452
453 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
454 ZFS_EXIT(zfsvfs);
455 return (SET_ERROR(EACCES));
456 }
457
458 /*
459 * Validate file offset
460 */
461 if (uio->uio_loffset < (offset_t)0) {
462 ZFS_EXIT(zfsvfs);
463 return (SET_ERROR(EINVAL));
464 }
465
466 /*
467 * Fasttrack empty reads
468 */
469 if (uio->uio_resid == 0) {
470 ZFS_EXIT(zfsvfs);
471 return (0);
472 }
473
474 /*
475 * If we're in FRSYNC mode, sync out this znode before reading it.
476 * Only do this for non-snapshots.
477 */
478 if (zfsvfs->z_log &&
479 (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
480 zil_commit(zfsvfs->z_log, zp->z_id);
481
482 /*
483 * Lock the range against changes.
484 */
485 rl = zfs_range_lock(&zp->z_range_lock, uio->uio_loffset, uio->uio_resid,
486 RL_READER);
487
488 /*
489 * If we are reading past end-of-file we can skip
490 * to the end; but we might still need to set atime.
491 */
492 if (uio->uio_loffset >= zp->z_size) {
493 error = 0;
494 goto out;
495 }
496
497 ASSERT(uio->uio_loffset < zp->z_size);
498 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
499
500 #ifdef HAVE_UIO_ZEROCOPY
501 if ((uio->uio_extflg == UIO_XUIO) &&
502 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
503 int nblk;
504 int blksz = zp->z_blksz;
505 uint64_t offset = uio->uio_loffset;
506
507 xuio = (xuio_t *)uio;
508 if ((ISP2(blksz))) {
509 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
510 blksz)) / blksz;
511 } else {
512 ASSERT(offset + n <= blksz);
513 nblk = 1;
514 }
515 (void) dmu_xuio_init(xuio, nblk);
516
517 if (vn_has_cached_data(ip)) {
518 /*
519 * For simplicity, we always allocate a full buffer
520 * even if we only expect to read a portion of a block.
521 */
522 while (--nblk >= 0) {
523 (void) dmu_xuio_add(xuio,
524 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
525 blksz), 0, blksz);
526 }
527 }
528 }
529 #endif /* HAVE_UIO_ZEROCOPY */
530
531 while (n > 0) {
532 nbytes = MIN(n, zfs_read_chunk_size -
533 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
534
535 if (zp->z_is_mapped && !(ioflag & O_DIRECT)) {
536 error = mappedread(ip, nbytes, uio);
537 } else {
538 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
539 uio, nbytes);
540 }
541
542 if (error) {
543 /* convert checksum errors into IO errors */
544 if (error == ECKSUM)
545 error = SET_ERROR(EIO);
546 break;
547 }
548
549 n -= nbytes;
550 }
551 out:
552 zfs_range_unlock(rl);
553
554 ZFS_EXIT(zfsvfs);
555 return (error);
556 }
557
558 /*
559 * Write the bytes to a file.
560 *
561 * IN: ip - inode of file to be written to.
562 * uio - structure supplying write location, range info,
563 * and data buffer.
564 * ioflag - FAPPEND flag set if in append mode.
565 * O_DIRECT flag; used to bypass page cache.
566 * cr - credentials of caller.
567 *
568 * OUT: uio - updated offset and range.
569 *
570 * RETURN: 0 if success
571 * error code if failure
572 *
573 * Timestamps:
574 * ip - ctime|mtime updated if byte count > 0
575 */
576
577 /* ARGSUSED */
578 int
579 zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
580 {
581 znode_t *zp = ITOZ(ip);
582 rlim64_t limit = uio->uio_limit;
583 ssize_t start_resid = uio->uio_resid;
584 ssize_t tx_bytes;
585 uint64_t end_size;
586 dmu_tx_t *tx;
587 zfsvfs_t *zfsvfs = ZTOZSB(zp);
588 zilog_t *zilog;
589 offset_t woff;
590 ssize_t n, nbytes;
591 rl_t *rl;
592 int max_blksz = zfsvfs->z_max_blksz;
593 int error = 0;
594 arc_buf_t *abuf;
595 const iovec_t *aiov = NULL;
596 xuio_t *xuio = NULL;
597 int write_eof;
598 int count = 0;
599 sa_bulk_attr_t bulk[4];
600 uint64_t mtime[2], ctime[2];
601 uint32_t uid;
602 #ifdef HAVE_UIO_ZEROCOPY
603 int i_iov = 0;
604 const iovec_t *iovp = uio->uio_iov;
605 ASSERTV(int iovcnt = uio->uio_iovcnt);
606 #endif
607
608 /*
609 * Fasttrack empty write
610 */
611 n = start_resid;
612 if (n == 0)
613 return (0);
614
615 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
616 limit = MAXOFFSET_T;
617
618 ZFS_ENTER(zfsvfs);
619 ZFS_VERIFY_ZP(zp);
620
621 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
622 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
623 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
624 &zp->z_size, 8);
625 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
626 &zp->z_pflags, 8);
627
628 /*
629 * Callers might not be able to detect properly that we are read-only,
630 * so check it explicitly here.
631 */
632 if (zfs_is_readonly(zfsvfs)) {
633 ZFS_EXIT(zfsvfs);
634 return (SET_ERROR(EROFS));
635 }
636
637 /*
638 * If immutable or not appending then return EPERM
639 */
640 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
641 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
642 (uio->uio_loffset < zp->z_size))) {
643 ZFS_EXIT(zfsvfs);
644 return (SET_ERROR(EPERM));
645 }
646
647 zilog = zfsvfs->z_log;
648
649 /*
650 * Validate file offset
651 */
652 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
653 if (woff < 0) {
654 ZFS_EXIT(zfsvfs);
655 return (SET_ERROR(EINVAL));
656 }
657
658 /*
659 * Pre-fault the pages to ensure slow (eg NFS) pages
660 * don't hold up txg.
661 * Skip this if uio contains loaned arc_buf.
662 */
663 #ifdef HAVE_UIO_ZEROCOPY
664 if ((uio->uio_extflg == UIO_XUIO) &&
665 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
666 xuio = (xuio_t *)uio;
667 else
668 #endif
669 uio_prefaultpages(MIN(n, max_blksz), uio);
670
671 /*
672 * If in append mode, set the io offset pointer to eof.
673 */
674 if (ioflag & FAPPEND) {
675 /*
676 * Obtain an appending range lock to guarantee file append
677 * semantics. We reset the write offset once we have the lock.
678 */
679 rl = zfs_range_lock(&zp->z_range_lock, 0, n, RL_APPEND);
680 woff = rl->r_off;
681 if (rl->r_len == UINT64_MAX) {
682 /*
683 * We overlocked the file because this write will cause
684 * the file block size to increase.
685 * Note that zp_size cannot change with this lock held.
686 */
687 woff = zp->z_size;
688 }
689 uio->uio_loffset = woff;
690 } else {
691 /*
692 * Note that if the file block size will change as a result of
693 * this write, then this range lock will lock the entire file
694 * so that we can re-write the block safely.
695 */
696 rl = zfs_range_lock(&zp->z_range_lock, woff, n, RL_WRITER);
697 }
698
699 if (woff >= limit) {
700 zfs_range_unlock(rl);
701 ZFS_EXIT(zfsvfs);
702 return (SET_ERROR(EFBIG));
703 }
704
705 if ((woff + n) > limit || woff > (limit - n))
706 n = limit - woff;
707
708 /* Will this write extend the file length? */
709 write_eof = (woff + n > zp->z_size);
710
711 end_size = MAX(zp->z_size, woff + n);
712
713 /*
714 * Write the file in reasonable size chunks. Each chunk is written
715 * in a separate transaction; this keeps the intent log records small
716 * and allows us to do more fine-grained space accounting.
717 */
718 while (n > 0) {
719 abuf = NULL;
720 woff = uio->uio_loffset;
721 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
722 KUID_TO_SUID(ip->i_uid)) ||
723 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
724 KGID_TO_SGID(ip->i_gid)) ||
725 (zp->z_projid != ZFS_DEFAULT_PROJID &&
726 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
727 zp->z_projid))) {
728 if (abuf != NULL)
729 dmu_return_arcbuf(abuf);
730 error = SET_ERROR(EDQUOT);
731 break;
732 }
733
734 if (xuio && abuf == NULL) {
735 #ifdef HAVE_UIO_ZEROCOPY
736 ASSERT(i_iov < iovcnt);
737 ASSERT3U(uio->uio_segflg, !=, UIO_BVEC);
738 aiov = &iovp[i_iov];
739 abuf = dmu_xuio_arcbuf(xuio, i_iov);
740 dmu_xuio_clear(xuio, i_iov);
741 ASSERT((aiov->iov_base == abuf->b_data) ||
742 ((char *)aiov->iov_base - (char *)abuf->b_data +
743 aiov->iov_len == arc_buf_size(abuf)));
744 i_iov++;
745 #endif
746 } else if (abuf == NULL && n >= max_blksz &&
747 woff >= zp->z_size &&
748 P2PHASE(woff, max_blksz) == 0 &&
749 zp->z_blksz == max_blksz) {
750 /*
751 * This write covers a full block. "Borrow" a buffer
752 * from the dmu so that we can fill it before we enter
753 * a transaction. This avoids the possibility of
754 * holding up the transaction if the data copy hangs
755 * up on a pagefault (e.g., from an NFS server mapping).
756 */
757 size_t cbytes;
758
759 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
760 max_blksz);
761 ASSERT(abuf != NULL);
762 ASSERT(arc_buf_size(abuf) == max_blksz);
763 if ((error = uiocopy(abuf->b_data, max_blksz,
764 UIO_WRITE, uio, &cbytes))) {
765 dmu_return_arcbuf(abuf);
766 break;
767 }
768 ASSERT(cbytes == max_blksz);
769 }
770
771 /*
772 * Start a transaction.
773 */
774 tx = dmu_tx_create(zfsvfs->z_os);
775 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
776 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
777 zfs_sa_upgrade_txholds(tx, zp);
778 error = dmu_tx_assign(tx, TXG_WAIT);
779 if (error) {
780 dmu_tx_abort(tx);
781 if (abuf != NULL)
782 dmu_return_arcbuf(abuf);
783 break;
784 }
785
786 /*
787 * If zfs_range_lock() over-locked we grow the blocksize
788 * and then reduce the lock range. This will only happen
789 * on the first iteration since zfs_range_reduce() will
790 * shrink down r_len to the appropriate size.
791 */
792 if (rl->r_len == UINT64_MAX) {
793 uint64_t new_blksz;
794
795 if (zp->z_blksz > max_blksz) {
796 /*
797 * File's blocksize is already larger than the
798 * "recordsize" property. Only let it grow to
799 * the next power of 2.
800 */
801 ASSERT(!ISP2(zp->z_blksz));
802 new_blksz = MIN(end_size,
803 1 << highbit64(zp->z_blksz));
804 } else {
805 new_blksz = MIN(end_size, max_blksz);
806 }
807 zfs_grow_blocksize(zp, new_blksz, tx);
808 zfs_range_reduce(rl, woff, n);
809 }
810
811 /*
812 * XXX - should we really limit each write to z_max_blksz?
813 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
814 */
815 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
816
817 if (abuf == NULL) {
818 tx_bytes = uio->uio_resid;
819 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
820 uio, nbytes, tx);
821 tx_bytes -= uio->uio_resid;
822 } else {
823 tx_bytes = nbytes;
824 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
825 /*
826 * If this is not a full block write, but we are
827 * extending the file past EOF and this data starts
828 * block-aligned, use assign_arcbuf(). Otherwise,
829 * write via dmu_write().
830 */
831 if (tx_bytes < max_blksz && (!write_eof ||
832 aiov->iov_base != abuf->b_data)) {
833 ASSERT(xuio);
834 dmu_write(zfsvfs->z_os, zp->z_id, woff,
835 /* cppcheck-suppress nullPointer */
836 aiov->iov_len, aiov->iov_base, tx);
837 dmu_return_arcbuf(abuf);
838 xuio_stat_wbuf_copied();
839 } else {
840 ASSERT(xuio || tx_bytes == max_blksz);
841 dmu_assign_arcbuf_by_dbuf(
842 sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
843 }
844 ASSERT(tx_bytes <= uio->uio_resid);
845 uioskip(uio, tx_bytes);
846 }
847 if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT)) {
848 update_pages(ip, woff,
849 tx_bytes, zfsvfs->z_os, zp->z_id);
850 }
851
852 /*
853 * If we made no progress, we're done. If we made even
854 * partial progress, update the znode and ZIL accordingly.
855 */
856 if (tx_bytes == 0) {
857 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
858 (void *)&zp->z_size, sizeof (uint64_t), tx);
859 dmu_tx_commit(tx);
860 ASSERT(error != 0);
861 break;
862 }
863
864 /*
865 * Clear Set-UID/Set-GID bits on successful write if not
866 * privileged and at least one of the execute bits is set.
867 *
868 * It would be nice to to this after all writes have
869 * been done, but that would still expose the ISUID/ISGID
870 * to another app after the partial write is committed.
871 *
872 * Note: we don't call zfs_fuid_map_id() here because
873 * user 0 is not an ephemeral uid.
874 */
875 mutex_enter(&zp->z_acl_lock);
876 uid = KUID_TO_SUID(ip->i_uid);
877 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
878 (S_IXUSR >> 6))) != 0 &&
879 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
880 secpolicy_vnode_setid_retain(cr,
881 ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
882 uint64_t newmode;
883 zp->z_mode &= ~(S_ISUID | S_ISGID);
884 ip->i_mode = newmode = zp->z_mode;
885 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
886 (void *)&newmode, sizeof (uint64_t), tx);
887 }
888 mutex_exit(&zp->z_acl_lock);
889
890 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
891
892 /*
893 * Update the file size (zp_size) if it has changed;
894 * account for possible concurrent updates.
895 */
896 while ((end_size = zp->z_size) < uio->uio_loffset) {
897 (void) atomic_cas_64(&zp->z_size, end_size,
898 uio->uio_loffset);
899 ASSERT(error == 0);
900 }
901 /*
902 * If we are replaying and eof is non zero then force
903 * the file size to the specified eof. Note, there's no
904 * concurrency during replay.
905 */
906 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
907 zp->z_size = zfsvfs->z_replay_eof;
908
909 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
910
911 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
912 NULL, NULL);
913 dmu_tx_commit(tx);
914
915 if (error != 0)
916 break;
917 ASSERT(tx_bytes == nbytes);
918 n -= nbytes;
919
920 if (!xuio && n > 0)
921 uio_prefaultpages(MIN(n, max_blksz), uio);
922 }
923
924 zfs_inode_update(zp);
925 zfs_range_unlock(rl);
926
927 /*
928 * If we're in replay mode, or we made no progress, return error.
929 * Otherwise, it's at least a partial write, so it's successful.
930 */
931 if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
932 ZFS_EXIT(zfsvfs);
933 return (error);
934 }
935
936 if (ioflag & (FSYNC | FDSYNC) ||
937 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
938 zil_commit(zilog, zp->z_id);
939
940 ZFS_EXIT(zfsvfs);
941 return (0);
942 }
943
944 /*
945 * Drop a reference on the passed inode asynchronously. This ensures
946 * that the caller will never drop the last reference on an inode in
947 * the current context. Doing so while holding open a tx could result
948 * in a deadlock if iput_final() re-enters the filesystem code.
949 */
950 void
951 zfs_iput_async(struct inode *ip)
952 {
953 objset_t *os = ITOZSB(ip)->z_os;
954
955 ASSERT(atomic_read(&ip->i_count) > 0);
956 ASSERT(os != NULL);
957
958 if (atomic_read(&ip->i_count) == 1)
959 VERIFY(taskq_dispatch(dsl_pool_iput_taskq(dmu_objset_pool(os)),
960 (task_func_t *)iput, ip, TQ_SLEEP) != TASKQID_INVALID);
961 else
962 iput(ip);
963 }
964
965 void
966 zfs_get_done(zgd_t *zgd, int error)
967 {
968 znode_t *zp = zgd->zgd_private;
969
970 if (zgd->zgd_db)
971 dmu_buf_rele(zgd->zgd_db, zgd);
972
973 zfs_range_unlock(zgd->zgd_rl);
974
975 /*
976 * Release the vnode asynchronously as we currently have the
977 * txg stopped from syncing.
978 */
979 zfs_iput_async(ZTOI(zp));
980
981 if (error == 0 && zgd->zgd_bp)
982 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
983
984 kmem_free(zgd, sizeof (zgd_t));
985 }
986
987 #ifdef DEBUG
988 static int zil_fault_io = 0;
989 #endif
990
991 /*
992 * Get data to generate a TX_WRITE intent log record.
993 */
994 int
995 zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
996 {
997 zfsvfs_t *zfsvfs = arg;
998 objset_t *os = zfsvfs->z_os;
999 znode_t *zp;
1000 uint64_t object = lr->lr_foid;
1001 uint64_t offset = lr->lr_offset;
1002 uint64_t size = lr->lr_length;
1003 dmu_buf_t *db;
1004 zgd_t *zgd;
1005 int error = 0;
1006
1007 ASSERT3P(lwb, !=, NULL);
1008 ASSERT3P(zio, !=, NULL);
1009 ASSERT3U(size, !=, 0);
1010
1011 /*
1012 * Nothing to do if the file has been removed
1013 */
1014 if (zfs_zget(zfsvfs, object, &zp) != 0)
1015 return (SET_ERROR(ENOENT));
1016 if (zp->z_unlinked) {
1017 /*
1018 * Release the vnode asynchronously as we currently have the
1019 * txg stopped from syncing.
1020 */
1021 zfs_iput_async(ZTOI(zp));
1022 return (SET_ERROR(ENOENT));
1023 }
1024
1025 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1026 zgd->zgd_lwb = lwb;
1027 zgd->zgd_private = zp;
1028
1029 /*
1030 * Write records come in two flavors: immediate and indirect.
1031 * For small writes it's cheaper to store the data with the
1032 * log record (immediate); for large writes it's cheaper to
1033 * sync the data and get a pointer to it (indirect) so that
1034 * we don't have to write the data twice.
1035 */
1036 if (buf != NULL) { /* immediate write */
1037 zgd->zgd_rl = zfs_range_lock(&zp->z_range_lock, offset, size,
1038 RL_READER);
1039 /* test for truncation needs to be done while range locked */
1040 if (offset >= zp->z_size) {
1041 error = SET_ERROR(ENOENT);
1042 } else {
1043 error = dmu_read(os, object, offset, size, buf,
1044 DMU_READ_NO_PREFETCH);
1045 }
1046 ASSERT(error == 0 || error == ENOENT);
1047 } else { /* indirect write */
1048 /*
1049 * Have to lock the whole block to ensure when it's
1050 * written out and its checksum is being calculated
1051 * that no one can change the data. We need to re-check
1052 * blocksize after we get the lock in case it's changed!
1053 */
1054 for (;;) {
1055 uint64_t blkoff;
1056 size = zp->z_blksz;
1057 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1058 offset -= blkoff;
1059 zgd->zgd_rl = zfs_range_lock(&zp->z_range_lock, offset,
1060 size, RL_READER);
1061 if (zp->z_blksz == size)
1062 break;
1063 offset += blkoff;
1064 zfs_range_unlock(zgd->zgd_rl);
1065 }
1066 /* test for truncation needs to be done while range locked */
1067 if (lr->lr_offset >= zp->z_size)
1068 error = SET_ERROR(ENOENT);
1069 #ifdef DEBUG
1070 if (zil_fault_io) {
1071 error = SET_ERROR(EIO);
1072 zil_fault_io = 0;
1073 }
1074 #endif
1075 if (error == 0)
1076 error = dmu_buf_hold(os, object, offset, zgd, &db,
1077 DMU_READ_NO_PREFETCH);
1078
1079 if (error == 0) {
1080 blkptr_t *bp = &lr->lr_blkptr;
1081
1082 zgd->zgd_db = db;
1083 zgd->zgd_bp = bp;
1084
1085 ASSERT(db->db_offset == offset);
1086 ASSERT(db->db_size == size);
1087
1088 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1089 zfs_get_done, zgd);
1090 ASSERT(error || lr->lr_length <= size);
1091
1092 /*
1093 * On success, we need to wait for the write I/O
1094 * initiated by dmu_sync() to complete before we can
1095 * release this dbuf. We will finish everything up
1096 * in the zfs_get_done() callback.
1097 */
1098 if (error == 0)
1099 return (0);
1100
1101 if (error == EALREADY) {
1102 lr->lr_common.lrc_txtype = TX_WRITE2;
1103 /*
1104 * TX_WRITE2 relies on the data previously
1105 * written by the TX_WRITE that caused
1106 * EALREADY. We zero out the BP because
1107 * it is the old, currently-on-disk BP,
1108 * so there's no need to zio_flush() its
1109 * vdevs (flushing would needlesly hurt
1110 * performance, and doesn't work on
1111 * indirect vdevs).
1112 */
1113 zgd->zgd_bp = NULL;
1114 BP_ZERO(bp);
1115 error = 0;
1116 }
1117 }
1118 }
1119
1120 zfs_get_done(zgd, error);
1121
1122 return (error);
1123 }
1124
1125 /*ARGSUSED*/
1126 int
1127 zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
1128 {
1129 znode_t *zp = ITOZ(ip);
1130 zfsvfs_t *zfsvfs = ITOZSB(ip);
1131 int error;
1132
1133 ZFS_ENTER(zfsvfs);
1134 ZFS_VERIFY_ZP(zp);
1135
1136 if (flag & V_ACE_MASK)
1137 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1138 else
1139 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1140
1141 ZFS_EXIT(zfsvfs);
1142 return (error);
1143 }
1144
1145 /*
1146 * Lookup an entry in a directory, or an extended attribute directory.
1147 * If it exists, return a held inode reference for it.
1148 *
1149 * IN: dip - inode of directory to search.
1150 * nm - name of entry to lookup.
1151 * flags - LOOKUP_XATTR set if looking for an attribute.
1152 * cr - credentials of caller.
1153 * direntflags - directory lookup flags
1154 * realpnp - returned pathname.
1155 *
1156 * OUT: ipp - inode of located entry, NULL if not found.
1157 *
1158 * RETURN: 0 on success, error code on failure.
1159 *
1160 * Timestamps:
1161 * NA
1162 */
1163 /* ARGSUSED */
1164 int
1165 zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
1166 cred_t *cr, int *direntflags, pathname_t *realpnp)
1167 {
1168 znode_t *zdp = ITOZ(dip);
1169 zfsvfs_t *zfsvfs = ITOZSB(dip);
1170 int error = 0;
1171
1172 /*
1173 * Fast path lookup, however we must skip DNLC lookup
1174 * for case folding or normalizing lookups because the
1175 * DNLC code only stores the passed in name. This means
1176 * creating 'a' and removing 'A' on a case insensitive
1177 * file system would work, but DNLC still thinks 'a'
1178 * exists and won't let you create it again on the next
1179 * pass through fast path.
1180 */
1181 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1182
1183 if (!S_ISDIR(dip->i_mode)) {
1184 return (SET_ERROR(ENOTDIR));
1185 } else if (zdp->z_sa_hdl == NULL) {
1186 return (SET_ERROR(EIO));
1187 }
1188
1189 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1190 error = zfs_fastaccesschk_execute(zdp, cr);
1191 if (!error) {
1192 *ipp = dip;
1193 igrab(*ipp);
1194 return (0);
1195 }
1196 return (error);
1197 #ifdef HAVE_DNLC
1198 } else if (!zdp->z_zfsvfs->z_norm &&
1199 (zdp->z_zfsvfs->z_case == ZFS_CASE_SENSITIVE)) {
1200
1201 vnode_t *tvp = dnlc_lookup(dvp, nm);
1202
1203 if (tvp) {
1204 error = zfs_fastaccesschk_execute(zdp, cr);
1205 if (error) {
1206 iput(tvp);
1207 return (error);
1208 }
1209 if (tvp == DNLC_NO_VNODE) {
1210 iput(tvp);
1211 return (SET_ERROR(ENOENT));
1212 } else {
1213 *vpp = tvp;
1214 return (specvp_check(vpp, cr));
1215 }
1216 }
1217 #endif /* HAVE_DNLC */
1218 }
1219 }
1220
1221 ZFS_ENTER(zfsvfs);
1222 ZFS_VERIFY_ZP(zdp);
1223
1224 *ipp = NULL;
1225
1226 if (flags & LOOKUP_XATTR) {
1227 /*
1228 * We don't allow recursive attributes..
1229 * Maybe someday we will.
1230 */
1231 if (zdp->z_pflags & ZFS_XATTR) {
1232 ZFS_EXIT(zfsvfs);
1233 return (SET_ERROR(EINVAL));
1234 }
1235
1236 if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
1237 ZFS_EXIT(zfsvfs);
1238 return (error);
1239 }
1240
1241 /*
1242 * Do we have permission to get into attribute directory?
1243 */
1244
1245 if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0,
1246 B_FALSE, cr))) {
1247 iput(*ipp);
1248 *ipp = NULL;
1249 }
1250
1251 ZFS_EXIT(zfsvfs);
1252 return (error);
1253 }
1254
1255 if (!S_ISDIR(dip->i_mode)) {
1256 ZFS_EXIT(zfsvfs);
1257 return (SET_ERROR(ENOTDIR));
1258 }
1259
1260 /*
1261 * Check accessibility of directory.
1262 */
1263
1264 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
1265 ZFS_EXIT(zfsvfs);
1266 return (error);
1267 }
1268
1269 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1270 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1271 ZFS_EXIT(zfsvfs);
1272 return (SET_ERROR(EILSEQ));
1273 }
1274
1275 error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
1276 if ((error == 0) && (*ipp))
1277 zfs_inode_update(ITOZ(*ipp));
1278
1279 ZFS_EXIT(zfsvfs);
1280 return (error);
1281 }
1282
1283 /*
1284 * Attempt to create a new entry in a directory. If the entry
1285 * already exists, truncate the file if permissible, else return
1286 * an error. Return the ip of the created or trunc'd file.
1287 *
1288 * IN: dip - inode of directory to put new file entry in.
1289 * name - name of new file entry.
1290 * vap - attributes of new file.
1291 * excl - flag indicating exclusive or non-exclusive mode.
1292 * mode - mode to open file with.
1293 * cr - credentials of caller.
1294 * flag - large file flag [UNUSED].
1295 * vsecp - ACL to be set
1296 *
1297 * OUT: ipp - inode of created or trunc'd entry.
1298 *
1299 * RETURN: 0 on success, error code on failure.
1300 *
1301 * Timestamps:
1302 * dip - ctime|mtime updated if new entry created
1303 * ip - ctime|mtime always, atime if new
1304 */
1305
1306 /* ARGSUSED */
1307 int
1308 zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
1309 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1310 {
1311 znode_t *zp, *dzp = ITOZ(dip);
1312 zfsvfs_t *zfsvfs = ITOZSB(dip);
1313 zilog_t *zilog;
1314 objset_t *os;
1315 zfs_dirlock_t *dl;
1316 dmu_tx_t *tx;
1317 int error;
1318 uid_t uid;
1319 gid_t gid;
1320 zfs_acl_ids_t acl_ids;
1321 boolean_t fuid_dirtied;
1322 boolean_t have_acl = B_FALSE;
1323 boolean_t waited = B_FALSE;
1324
1325 /*
1326 * If we have an ephemeral id, ACL, or XVATTR then
1327 * make sure file system is at proper version
1328 */
1329
1330 gid = crgetgid(cr);
1331 uid = crgetuid(cr);
1332
1333 if (zfsvfs->z_use_fuids == B_FALSE &&
1334 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1335 return (SET_ERROR(EINVAL));
1336
1337 if (name == NULL)
1338 return (SET_ERROR(EINVAL));
1339
1340 ZFS_ENTER(zfsvfs);
1341 ZFS_VERIFY_ZP(dzp);
1342 os = zfsvfs->z_os;
1343 zilog = zfsvfs->z_log;
1344
1345 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1346 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1347 ZFS_EXIT(zfsvfs);
1348 return (SET_ERROR(EILSEQ));
1349 }
1350
1351 if (vap->va_mask & ATTR_XVATTR) {
1352 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1353 crgetuid(cr), cr, vap->va_mode)) != 0) {
1354 ZFS_EXIT(zfsvfs);
1355 return (error);
1356 }
1357 }
1358
1359 top:
1360 *ipp = NULL;
1361 if (*name == '\0') {
1362 /*
1363 * Null component name refers to the directory itself.
1364 */
1365 igrab(dip);
1366 zp = dzp;
1367 dl = NULL;
1368 error = 0;
1369 } else {
1370 /* possible igrab(zp) */
1371 int zflg = 0;
1372
1373 if (flag & FIGNORECASE)
1374 zflg |= ZCILOOK;
1375
1376 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1377 NULL, NULL);
1378 if (error) {
1379 if (have_acl)
1380 zfs_acl_ids_free(&acl_ids);
1381 if (strcmp(name, "..") == 0)
1382 error = SET_ERROR(EISDIR);
1383 ZFS_EXIT(zfsvfs);
1384 return (error);
1385 }
1386 }
1387
1388 if (zp == NULL) {
1389 uint64_t txtype;
1390 uint64_t projid = ZFS_DEFAULT_PROJID;
1391
1392 /*
1393 * Create a new file object and update the directory
1394 * to reference it.
1395 */
1396 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1397 if (have_acl)
1398 zfs_acl_ids_free(&acl_ids);
1399 goto out;
1400 }
1401
1402 /*
1403 * We only support the creation of regular files in
1404 * extended attribute directories.
1405 */
1406
1407 if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
1408 if (have_acl)
1409 zfs_acl_ids_free(&acl_ids);
1410 error = SET_ERROR(EINVAL);
1411 goto out;
1412 }
1413
1414 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1415 cr, vsecp, &acl_ids)) != 0)
1416 goto out;
1417 have_acl = B_TRUE;
1418
1419 if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
1420 projid = zfs_inherit_projid(dzp);
1421 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
1422 zfs_acl_ids_free(&acl_ids);
1423 error = SET_ERROR(EDQUOT);
1424 goto out;
1425 }
1426
1427 tx = dmu_tx_create(os);
1428
1429 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1430 ZFS_SA_BASE_ATTR_SIZE);
1431
1432 fuid_dirtied = zfsvfs->z_fuid_dirty;
1433 if (fuid_dirtied)
1434 zfs_fuid_txhold(zfsvfs, tx);
1435 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1436 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1437 if (!zfsvfs->z_use_sa &&
1438 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1439 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1440 0, acl_ids.z_aclp->z_acl_bytes);
1441 }
1442
1443 error = dmu_tx_assign(tx,
1444 (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1445 if (error) {
1446 zfs_dirent_unlock(dl);
1447 if (error == ERESTART) {
1448 waited = B_TRUE;
1449 dmu_tx_wait(tx);
1450 dmu_tx_abort(tx);
1451 goto top;
1452 }
1453 zfs_acl_ids_free(&acl_ids);
1454 dmu_tx_abort(tx);
1455 ZFS_EXIT(zfsvfs);
1456 return (error);
1457 }
1458 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1459
1460 error = zfs_link_create(dl, zp, tx, ZNEW);
1461 if (error != 0) {
1462 /*
1463 * Since, we failed to add the directory entry for it,
1464 * delete the newly created dnode.
1465 */
1466 zfs_znode_delete(zp, tx);
1467 remove_inode_hash(ZTOI(zp));
1468 zfs_acl_ids_free(&acl_ids);
1469 dmu_tx_commit(tx);
1470 goto out;
1471 }
1472
1473 if (fuid_dirtied)
1474 zfs_fuid_sync(zfsvfs, tx);
1475
1476 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1477 if (flag & FIGNORECASE)
1478 txtype |= TX_CI;
1479 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1480 vsecp, acl_ids.z_fuidp, vap);
1481 zfs_acl_ids_free(&acl_ids);
1482 dmu_tx_commit(tx);
1483 } else {
1484 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1485
1486 if (have_acl)
1487 zfs_acl_ids_free(&acl_ids);
1488 have_acl = B_FALSE;
1489
1490 /*
1491 * A directory entry already exists for this name.
1492 */
1493 /*
1494 * Can't truncate an existing file if in exclusive mode.
1495 */
1496 if (excl) {
1497 error = SET_ERROR(EEXIST);
1498 goto out;
1499 }
1500 /*
1501 * Can't open a directory for writing.
1502 */
1503 if (S_ISDIR(ZTOI(zp)->i_mode)) {
1504 error = SET_ERROR(EISDIR);
1505 goto out;
1506 }
1507 /*
1508 * Verify requested access to file.
1509 */
1510 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1511 goto out;
1512 }
1513
1514 mutex_enter(&dzp->z_lock);
1515 dzp->z_seq++;
1516 mutex_exit(&dzp->z_lock);
1517
1518 /*
1519 * Truncate regular files if requested.
1520 */
1521 if (S_ISREG(ZTOI(zp)->i_mode) &&
1522 (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
1523 /* we can't hold any locks when calling zfs_freesp() */
1524 if (dl) {
1525 zfs_dirent_unlock(dl);
1526 dl = NULL;
1527 }
1528 error = zfs_freesp(zp, 0, 0, mode, TRUE);
1529 }
1530 }
1531 out:
1532
1533 if (dl)
1534 zfs_dirent_unlock(dl);
1535
1536 if (error) {
1537 if (zp)
1538 iput(ZTOI(zp));
1539 } else {
1540 zfs_inode_update(dzp);
1541 zfs_inode_update(zp);
1542 *ipp = ZTOI(zp);
1543 }
1544
1545 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1546 zil_commit(zilog, 0);
1547
1548 ZFS_EXIT(zfsvfs);
1549 return (error);
1550 }
1551
1552 /* ARGSUSED */
1553 int
1554 zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
1555 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1556 {
1557 znode_t *zp = NULL, *dzp = ITOZ(dip);
1558 zfsvfs_t *zfsvfs = ITOZSB(dip);
1559 objset_t *os;
1560 dmu_tx_t *tx;
1561 int error;
1562 uid_t uid;
1563 gid_t gid;
1564 zfs_acl_ids_t acl_ids;
1565 uint64_t projid = ZFS_DEFAULT_PROJID;
1566 boolean_t fuid_dirtied;
1567 boolean_t have_acl = B_FALSE;
1568 boolean_t waited = B_FALSE;
1569
1570 /*
1571 * If we have an ephemeral id, ACL, or XVATTR then
1572 * make sure file system is at proper version
1573 */
1574
1575 gid = crgetgid(cr);
1576 uid = crgetuid(cr);
1577
1578 if (zfsvfs->z_use_fuids == B_FALSE &&
1579 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1580 return (SET_ERROR(EINVAL));
1581
1582 ZFS_ENTER(zfsvfs);
1583 ZFS_VERIFY_ZP(dzp);
1584 os = zfsvfs->z_os;
1585
1586 if (vap->va_mask & ATTR_XVATTR) {
1587 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1588 crgetuid(cr), cr, vap->va_mode)) != 0) {
1589 ZFS_EXIT(zfsvfs);
1590 return (error);
1591 }
1592 }
1593
1594 top:
1595 *ipp = NULL;
1596
1597 /*
1598 * Create a new file object and update the directory
1599 * to reference it.
1600 */
1601 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1602 if (have_acl)
1603 zfs_acl_ids_free(&acl_ids);
1604 goto out;
1605 }
1606
1607 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1608 cr, vsecp, &acl_ids)) != 0)
1609 goto out;
1610 have_acl = B_TRUE;
1611
1612 if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
1613 projid = zfs_inherit_projid(dzp);
1614 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
1615 zfs_acl_ids_free(&acl_ids);
1616 error = SET_ERROR(EDQUOT);
1617 goto out;
1618 }
1619
1620 tx = dmu_tx_create(os);
1621
1622 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1623 ZFS_SA_BASE_ATTR_SIZE);
1624 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1625
1626 fuid_dirtied = zfsvfs->z_fuid_dirty;
1627 if (fuid_dirtied)
1628 zfs_fuid_txhold(zfsvfs, tx);
1629 if (!zfsvfs->z_use_sa &&
1630 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1631 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1632 0, acl_ids.z_aclp->z_acl_bytes);
1633 }
1634 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1635 if (error) {
1636 if (error == ERESTART) {
1637 waited = B_TRUE;
1638 dmu_tx_wait(tx);
1639 dmu_tx_abort(tx);
1640 goto top;
1641 }
1642 zfs_acl_ids_free(&acl_ids);
1643 dmu_tx_abort(tx);
1644 ZFS_EXIT(zfsvfs);
1645 return (error);
1646 }
1647 zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids);
1648
1649 if (fuid_dirtied)
1650 zfs_fuid_sync(zfsvfs, tx);
1651
1652 /* Add to unlinked set */
1653 zp->z_unlinked = 1;
1654 zfs_unlinked_add(zp, tx);
1655 zfs_acl_ids_free(&acl_ids);
1656 dmu_tx_commit(tx);
1657 out:
1658
1659 if (error) {
1660 if (zp)
1661 iput(ZTOI(zp));
1662 } else {
1663 zfs_inode_update(dzp);
1664 zfs_inode_update(zp);
1665 *ipp = ZTOI(zp);
1666 }
1667
1668 ZFS_EXIT(zfsvfs);
1669 return (error);
1670 }
1671
1672 /*
1673 * Remove an entry from a directory.
1674 *
1675 * IN: dip - inode of directory to remove entry from.
1676 * name - name of entry to remove.
1677 * cr - credentials of caller.
1678 *
1679 * RETURN: 0 if success
1680 * error code if failure
1681 *
1682 * Timestamps:
1683 * dip - ctime|mtime
1684 * ip - ctime (if nlink > 0)
1685 */
1686
1687 uint64_t null_xattr = 0;
1688
1689 /*ARGSUSED*/
1690 int
1691 zfs_remove(struct inode *dip, char *name, cred_t *cr, int flags)
1692 {
1693 znode_t *zp, *dzp = ITOZ(dip);
1694 znode_t *xzp;
1695 struct inode *ip;
1696 zfsvfs_t *zfsvfs = ITOZSB(dip);
1697 zilog_t *zilog;
1698 uint64_t acl_obj, xattr_obj;
1699 uint64_t xattr_obj_unlinked = 0;
1700 uint64_t obj = 0;
1701 uint64_t links;
1702 zfs_dirlock_t *dl;
1703 dmu_tx_t *tx;
1704 boolean_t may_delete_now, delete_now = FALSE;
1705 boolean_t unlinked, toobig = FALSE;
1706 uint64_t txtype;
1707 pathname_t *realnmp = NULL;
1708 pathname_t realnm;
1709 int error;
1710 int zflg = ZEXISTS;
1711 boolean_t waited = B_FALSE;
1712
1713 if (name == NULL)
1714 return (SET_ERROR(EINVAL));
1715
1716 ZFS_ENTER(zfsvfs);
1717 ZFS_VERIFY_ZP(dzp);
1718 zilog = zfsvfs->z_log;
1719
1720 if (flags & FIGNORECASE) {
1721 zflg |= ZCILOOK;
1722 pn_alloc(&realnm);
1723 realnmp = &realnm;
1724 }
1725
1726 top:
1727 xattr_obj = 0;
1728 xzp = NULL;
1729 /*
1730 * Attempt to lock directory; fail if entry doesn't exist.
1731 */
1732 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1733 NULL, realnmp))) {
1734 if (realnmp)
1735 pn_free(realnmp);
1736 ZFS_EXIT(zfsvfs);
1737 return (error);
1738 }
1739
1740 ip = ZTOI(zp);
1741
1742 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1743 goto out;
1744 }
1745
1746 /*
1747 * Need to use rmdir for removing directories.
1748 */
1749 if (S_ISDIR(ip->i_mode)) {
1750 error = SET_ERROR(EPERM);
1751 goto out;
1752 }
1753
1754 #ifdef HAVE_DNLC
1755 if (realnmp)
1756 dnlc_remove(dvp, realnmp->pn_buf);
1757 else
1758 dnlc_remove(dvp, name);
1759 #endif /* HAVE_DNLC */
1760
1761 mutex_enter(&zp->z_lock);
1762 may_delete_now = atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped);
1763 mutex_exit(&zp->z_lock);
1764
1765 /*
1766 * We may delete the znode now, or we may put it in the unlinked set;
1767 * it depends on whether we're the last link, and on whether there are
1768 * other holds on the inode. So we dmu_tx_hold() the right things to
1769 * allow for either case.
1770 */
1771 obj = zp->z_id;
1772 tx = dmu_tx_create(zfsvfs->z_os);
1773 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1774 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1775 zfs_sa_upgrade_txholds(tx, zp);
1776 zfs_sa_upgrade_txholds(tx, dzp);
1777 if (may_delete_now) {
1778 toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks;
1779 /* if the file is too big, only hold_free a token amount */
1780 dmu_tx_hold_free(tx, zp->z_id, 0,
1781 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1782 }
1783
1784 /* are there any extended attributes? */
1785 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1786 &xattr_obj, sizeof (xattr_obj));
1787 if (error == 0 && xattr_obj) {
1788 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1789 ASSERT0(error);
1790 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1791 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1792 }
1793
1794 mutex_enter(&zp->z_lock);
1795 if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1796 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1797 mutex_exit(&zp->z_lock);
1798
1799 /* charge as an update -- would be nice not to charge at all */
1800 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1801
1802 /*
1803 * Mark this transaction as typically resulting in a net free of space
1804 */
1805 dmu_tx_mark_netfree(tx);
1806
1807 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1808 if (error) {
1809 zfs_dirent_unlock(dl);
1810 if (error == ERESTART) {
1811 waited = B_TRUE;
1812 dmu_tx_wait(tx);
1813 dmu_tx_abort(tx);
1814 iput(ip);
1815 if (xzp)
1816 iput(ZTOI(xzp));
1817 goto top;
1818 }
1819 if (realnmp)
1820 pn_free(realnmp);
1821 dmu_tx_abort(tx);
1822 iput(ip);
1823 if (xzp)
1824 iput(ZTOI(xzp));
1825 ZFS_EXIT(zfsvfs);
1826 return (error);
1827 }
1828
1829 /*
1830 * Remove the directory entry.
1831 */
1832 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1833
1834 if (error) {
1835 dmu_tx_commit(tx);
1836 goto out;
1837 }
1838
1839 if (unlinked) {
1840 /*
1841 * Hold z_lock so that we can make sure that the ACL obj
1842 * hasn't changed. Could have been deleted due to
1843 * zfs_sa_upgrade().
1844 */
1845 mutex_enter(&zp->z_lock);
1846 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1847 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1848 delete_now = may_delete_now && !toobig &&
1849 atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped) &&
1850 xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
1851 acl_obj;
1852 }
1853
1854 if (delete_now) {
1855 if (xattr_obj_unlinked) {
1856 ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2);
1857 mutex_enter(&xzp->z_lock);
1858 xzp->z_unlinked = 1;
1859 clear_nlink(ZTOI(xzp));
1860 links = 0;
1861 error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
1862 &links, sizeof (links), tx);
1863 ASSERT3U(error, ==, 0);
1864 mutex_exit(&xzp->z_lock);
1865 zfs_unlinked_add(xzp, tx);
1866
1867 if (zp->z_is_sa)
1868 error = sa_remove(zp->z_sa_hdl,
1869 SA_ZPL_XATTR(zfsvfs), tx);
1870 else
1871 error = sa_update(zp->z_sa_hdl,
1872 SA_ZPL_XATTR(zfsvfs), &null_xattr,
1873 sizeof (uint64_t), tx);
1874 ASSERT0(error);
1875 }
1876 /*
1877 * Add to the unlinked set because a new reference could be
1878 * taken concurrently resulting in a deferred destruction.
1879 */
1880 zfs_unlinked_add(zp, tx);
1881 mutex_exit(&zp->z_lock);
1882 } else if (unlinked) {
1883 mutex_exit(&zp->z_lock);
1884 zfs_unlinked_add(zp, tx);
1885 }
1886
1887 txtype = TX_REMOVE;
1888 if (flags & FIGNORECASE)
1889 txtype |= TX_CI;
1890 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1891
1892 dmu_tx_commit(tx);
1893 out:
1894 if (realnmp)
1895 pn_free(realnmp);
1896
1897 zfs_dirent_unlock(dl);
1898 zfs_inode_update(dzp);
1899 zfs_inode_update(zp);
1900
1901 if (delete_now)
1902 iput(ip);
1903 else
1904 zfs_iput_async(ip);
1905
1906 if (xzp) {
1907 zfs_inode_update(xzp);
1908 zfs_iput_async(ZTOI(xzp));
1909 }
1910
1911 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1912 zil_commit(zilog, 0);
1913
1914 ZFS_EXIT(zfsvfs);
1915 return (error);
1916 }
1917
1918 /*
1919 * Create a new directory and insert it into dip using the name
1920 * provided. Return a pointer to the inserted directory.
1921 *
1922 * IN: dip - inode of directory to add subdir to.
1923 * dirname - name of new directory.
1924 * vap - attributes of new directory.
1925 * cr - credentials of caller.
1926 * vsecp - ACL to be set
1927 *
1928 * OUT: ipp - inode of created directory.
1929 *
1930 * RETURN: 0 if success
1931 * error code if failure
1932 *
1933 * Timestamps:
1934 * dip - ctime|mtime updated
1935 * ipp - ctime|mtime|atime updated
1936 */
1937 /*ARGSUSED*/
1938 int
1939 zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
1940 cred_t *cr, int flags, vsecattr_t *vsecp)
1941 {
1942 znode_t *zp, *dzp = ITOZ(dip);
1943 zfsvfs_t *zfsvfs = ITOZSB(dip);
1944 zilog_t *zilog;
1945 zfs_dirlock_t *dl;
1946 uint64_t txtype;
1947 dmu_tx_t *tx;
1948 int error;
1949 int zf = ZNEW;
1950 uid_t uid;
1951 gid_t gid = crgetgid(cr);
1952 zfs_acl_ids_t acl_ids;
1953 boolean_t fuid_dirtied;
1954 boolean_t waited = B_FALSE;
1955
1956 ASSERT(S_ISDIR(vap->va_mode));
1957
1958 /*
1959 * If we have an ephemeral id, ACL, or XVATTR then
1960 * make sure file system is at proper version
1961 */
1962
1963 uid = crgetuid(cr);
1964 if (zfsvfs->z_use_fuids == B_FALSE &&
1965 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1966 return (SET_ERROR(EINVAL));
1967
1968 if (dirname == NULL)
1969 return (SET_ERROR(EINVAL));
1970
1971 ZFS_ENTER(zfsvfs);
1972 ZFS_VERIFY_ZP(dzp);
1973 zilog = zfsvfs->z_log;
1974
1975 if (dzp->z_pflags & ZFS_XATTR) {
1976 ZFS_EXIT(zfsvfs);
1977 return (SET_ERROR(EINVAL));
1978 }
1979
1980 if (zfsvfs->z_utf8 && u8_validate(dirname,
1981 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1982 ZFS_EXIT(zfsvfs);
1983 return (SET_ERROR(EILSEQ));
1984 }
1985 if (flags & FIGNORECASE)
1986 zf |= ZCILOOK;
1987
1988 if (vap->va_mask & ATTR_XVATTR) {
1989 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1990 crgetuid(cr), cr, vap->va_mode)) != 0) {
1991 ZFS_EXIT(zfsvfs);
1992 return (error);
1993 }
1994 }
1995
1996 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1997 vsecp, &acl_ids)) != 0) {
1998 ZFS_EXIT(zfsvfs);
1999 return (error);
2000 }
2001 /*
2002 * First make sure the new directory doesn't exist.
2003 *
2004 * Existence is checked first to make sure we don't return
2005 * EACCES instead of EEXIST which can cause some applications
2006 * to fail.
2007 */
2008 top:
2009 *ipp = NULL;
2010
2011 if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
2012 NULL, NULL))) {
2013 zfs_acl_ids_free(&acl_ids);
2014 ZFS_EXIT(zfsvfs);
2015 return (error);
2016 }
2017
2018 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
2019 zfs_acl_ids_free(&acl_ids);
2020 zfs_dirent_unlock(dl);
2021 ZFS_EXIT(zfsvfs);
2022 return (error);
2023 }
2024
2025 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
2026 zfs_acl_ids_free(&acl_ids);
2027 zfs_dirent_unlock(dl);
2028 ZFS_EXIT(zfsvfs);
2029 return (SET_ERROR(EDQUOT));
2030 }
2031
2032 /*
2033 * Add a new entry to the directory.
2034 */
2035 tx = dmu_tx_create(zfsvfs->z_os);
2036 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
2037 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
2038 fuid_dirtied = zfsvfs->z_fuid_dirty;
2039 if (fuid_dirtied)
2040 zfs_fuid_txhold(zfsvfs, tx);
2041 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2042 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
2043 acl_ids.z_aclp->z_acl_bytes);
2044 }
2045
2046 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
2047 ZFS_SA_BASE_ATTR_SIZE);
2048
2049 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
2050 if (error) {
2051 zfs_dirent_unlock(dl);
2052 if (error == ERESTART) {
2053 waited = B_TRUE;
2054 dmu_tx_wait(tx);
2055 dmu_tx_abort(tx);
2056 goto top;
2057 }
2058 zfs_acl_ids_free(&acl_ids);
2059 dmu_tx_abort(tx);
2060 ZFS_EXIT(zfsvfs);
2061 return (error);
2062 }
2063
2064 /*
2065 * Create new node.
2066 */
2067 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
2068
2069 /*
2070 * Now put new name in parent dir.
2071 */
2072 error = zfs_link_create(dl, zp, tx, ZNEW);
2073 if (error != 0) {
2074 zfs_znode_delete(zp, tx);
2075 remove_inode_hash(ZTOI(zp));
2076 goto out;
2077 }
2078
2079 if (fuid_dirtied)
2080 zfs_fuid_sync(zfsvfs, tx);
2081
2082 *ipp = ZTOI(zp);
2083
2084 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
2085 if (flags & FIGNORECASE)
2086 txtype |= TX_CI;
2087 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
2088 acl_ids.z_fuidp, vap);
2089
2090 out:
2091 zfs_acl_ids_free(&acl_ids);
2092
2093 dmu_tx_commit(tx);
2094
2095 zfs_dirent_unlock(dl);
2096
2097 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2098 zil_commit(zilog, 0);
2099
2100 if (error != 0) {
2101 iput(ZTOI(zp));
2102 } else {
2103 zfs_inode_update(dzp);
2104 zfs_inode_update(zp);
2105 }
2106 ZFS_EXIT(zfsvfs);
2107 return (error);
2108 }
2109
2110 /*
2111 * Remove a directory subdir entry. If the current working
2112 * directory is the same as the subdir to be removed, the
2113 * remove will fail.
2114 *
2115 * IN: dip - inode of directory to remove from.
2116 * name - name of directory to be removed.
2117 * cwd - inode of current working directory.
2118 * cr - credentials of caller.
2119 * flags - case flags
2120 *
2121 * RETURN: 0 on success, error code on failure.
2122 *
2123 * Timestamps:
2124 * dip - ctime|mtime updated
2125 */
2126 /*ARGSUSED*/
2127 int
2128 zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr,
2129 int flags)
2130 {
2131 znode_t *dzp = ITOZ(dip);
2132 znode_t *zp;
2133 struct inode *ip;
2134 zfsvfs_t *zfsvfs = ITOZSB(dip);
2135 zilog_t *zilog;
2136 zfs_dirlock_t *dl;
2137 dmu_tx_t *tx;
2138 int error;
2139 int zflg = ZEXISTS;
2140 boolean_t waited = B_FALSE;
2141
2142 if (name == NULL)
2143 return (SET_ERROR(EINVAL));
2144
2145 ZFS_ENTER(zfsvfs);
2146 ZFS_VERIFY_ZP(dzp);
2147 zilog = zfsvfs->z_log;
2148
2149 if (flags & FIGNORECASE)
2150 zflg |= ZCILOOK;
2151 top:
2152 zp = NULL;
2153
2154 /*
2155 * Attempt to lock directory; fail if entry doesn't exist.
2156 */
2157 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
2158 NULL, NULL))) {
2159 ZFS_EXIT(zfsvfs);
2160 return (error);
2161 }
2162
2163 ip = ZTOI(zp);
2164
2165 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
2166 goto out;
2167 }
2168
2169 if (!S_ISDIR(ip->i_mode)) {
2170 error = SET_ERROR(ENOTDIR);
2171 goto out;
2172 }
2173
2174 if (ip == cwd) {
2175 error = SET_ERROR(EINVAL);
2176 goto out;
2177 }
2178
2179 /*
2180 * Grab a lock on the directory to make sure that no one is
2181 * trying to add (or lookup) entries while we are removing it.
2182 */
2183 rw_enter(&zp->z_name_lock, RW_WRITER);
2184
2185 /*
2186 * Grab a lock on the parent pointer to make sure we play well
2187 * with the treewalk and directory rename code.
2188 */
2189 rw_enter(&zp->z_parent_lock, RW_WRITER);
2190
2191 tx = dmu_tx_create(zfsvfs->z_os);
2192 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2193 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2194 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2195 zfs_sa_upgrade_txholds(tx, zp);
2196 zfs_sa_upgrade_txholds(tx, dzp);
2197 dmu_tx_mark_netfree(tx);
2198 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
2199 if (error) {
2200 rw_exit(&zp->z_parent_lock);
2201 rw_exit(&zp->z_name_lock);
2202 zfs_dirent_unlock(dl);
2203 if (error == ERESTART) {
2204 waited = B_TRUE;
2205 dmu_tx_wait(tx);
2206 dmu_tx_abort(tx);
2207 iput(ip);
2208 goto top;
2209 }
2210 dmu_tx_abort(tx);
2211 iput(ip);
2212 ZFS_EXIT(zfsvfs);
2213 return (error);
2214 }
2215
2216 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
2217
2218 if (error == 0) {
2219 uint64_t txtype = TX_RMDIR;
2220 if (flags & FIGNORECASE)
2221 txtype |= TX_CI;
2222 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2223 }
2224
2225 dmu_tx_commit(tx);
2226
2227 rw_exit(&zp->z_parent_lock);
2228 rw_exit(&zp->z_name_lock);
2229 out:
2230 zfs_dirent_unlock(dl);
2231
2232 zfs_inode_update(dzp);
2233 zfs_inode_update(zp);
2234 iput(ip);
2235
2236 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2237 zil_commit(zilog, 0);
2238
2239 ZFS_EXIT(zfsvfs);
2240 return (error);
2241 }
2242
2243 /*
2244 * Read as many directory entries as will fit into the provided
2245 * dirent buffer from the given directory cursor position.
2246 *
2247 * IN: ip - inode of directory to read.
2248 * dirent - buffer for directory entries.
2249 *
2250 * OUT: dirent - filler buffer of directory entries.
2251 *
2252 * RETURN: 0 if success
2253 * error code if failure
2254 *
2255 * Timestamps:
2256 * ip - atime updated
2257 *
2258 * Note that the low 4 bits of the cookie returned by zap is always zero.
2259 * This allows us to use the low range for "special" directory entries:
2260 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2261 * we use the offset 2 for the '.zfs' directory.
2262 */
2263 /* ARGSUSED */
2264 int
2265 zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr)
2266 {
2267 znode_t *zp = ITOZ(ip);
2268 zfsvfs_t *zfsvfs = ITOZSB(ip);
2269 objset_t *os;
2270 zap_cursor_t zc;
2271 zap_attribute_t zap;
2272 int error;
2273 uint8_t prefetch;
2274 uint8_t type;
2275 int done = 0;
2276 uint64_t parent;
2277 uint64_t offset; /* must be unsigned; checks for < 1 */
2278
2279 ZFS_ENTER(zfsvfs);
2280 ZFS_VERIFY_ZP(zp);
2281
2282 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2283 &parent, sizeof (parent))) != 0)
2284 goto out;
2285
2286 /*
2287 * Quit if directory has been removed (posix)
2288 */
2289 if (zp->z_unlinked)
2290 goto out;
2291
2292 error = 0;
2293 os = zfsvfs->z_os;
2294 offset = ctx->pos;
2295 prefetch = zp->z_zn_prefetch;
2296
2297 /*
2298 * Initialize the iterator cursor.
2299 */
2300 if (offset <= 3) {
2301 /*
2302 * Start iteration from the beginning of the directory.
2303 */
2304 zap_cursor_init(&zc, os, zp->z_id);
2305 } else {
2306 /*
2307 * The offset is a serialized cursor.
2308 */
2309 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2310 }
2311
2312 /*
2313 * Transform to file-system independent format
2314 */
2315 while (!done) {
2316 uint64_t objnum;
2317 /*
2318 * Special case `.', `..', and `.zfs'.
2319 */
2320 if (offset == 0) {
2321 (void) strcpy(zap.za_name, ".");
2322 zap.za_normalization_conflict = 0;
2323 objnum = zp->z_id;
2324 type = DT_DIR;
2325 } else if (offset == 1) {
2326 (void) strcpy(zap.za_name, "..");
2327 zap.za_normalization_conflict = 0;
2328 objnum = parent;
2329 type = DT_DIR;
2330 } else if (offset == 2 && zfs_show_ctldir(zp)) {
2331 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2332 zap.za_normalization_conflict = 0;
2333 objnum = ZFSCTL_INO_ROOT;
2334 type = DT_DIR;
2335 } else {
2336 /*
2337 * Grab next entry.
2338 */
2339 if ((error = zap_cursor_retrieve(&zc, &zap))) {
2340 if (error == ENOENT)
2341 break;
2342 else
2343 goto update;
2344 }
2345
2346 /*
2347 * Allow multiple entries provided the first entry is
2348 * the object id. Non-zpl consumers may safely make
2349 * use of the additional space.
2350 *
2351 * XXX: This should be a feature flag for compatibility
2352 */
2353 if (zap.za_integer_length != 8 ||
2354 zap.za_num_integers == 0) {
2355 cmn_err(CE_WARN, "zap_readdir: bad directory "
2356 "entry, obj = %lld, offset = %lld, "
2357 "length = %d, num = %lld\n",
2358 (u_longlong_t)zp->z_id,
2359 (u_longlong_t)offset,
2360 zap.za_integer_length,
2361 (u_longlong_t)zap.za_num_integers);
2362 error = SET_ERROR(ENXIO);
2363 goto update;
2364 }
2365
2366 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2367 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2368 }
2369
2370 done = !zpl_dir_emit(ctx, zap.za_name, strlen(zap.za_name),
2371 objnum, type);
2372 if (done)
2373 break;
2374
2375 /* Prefetch znode */
2376 if (prefetch) {
2377 dmu_prefetch(os, objnum, 0, 0, 0,
2378 ZIO_PRIORITY_SYNC_READ);
2379 }
2380
2381 /*
2382 * Move to the next entry, fill in the previous offset.
2383 */
2384 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2385 zap_cursor_advance(&zc);
2386 offset = zap_cursor_serialize(&zc);
2387 } else {
2388 offset += 1;
2389 }
2390 ctx->pos = offset;
2391 }
2392 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2393
2394 update:
2395 zap_cursor_fini(&zc);
2396 if (error == ENOENT)
2397 error = 0;
2398 out:
2399 ZFS_EXIT(zfsvfs);
2400
2401 return (error);
2402 }
2403
2404 ulong_t zfs_fsync_sync_cnt = 4;
2405
2406 int
2407 zfs_fsync(struct inode *ip, int syncflag, cred_t *cr)
2408 {
2409 znode_t *zp = ITOZ(ip);
2410 zfsvfs_t *zfsvfs = ITOZSB(ip);
2411
2412 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2413
2414 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2415 ZFS_ENTER(zfsvfs);
2416 ZFS_VERIFY_ZP(zp);
2417 zil_commit(zfsvfs->z_log, zp->z_id);
2418 ZFS_EXIT(zfsvfs);
2419 }
2420 tsd_set(zfs_fsyncer_key, NULL);
2421
2422 return (0);
2423 }
2424
2425
2426 /*
2427 * Get the requested file attributes and place them in the provided
2428 * vattr structure.
2429 *
2430 * IN: ip - inode of file.
2431 * vap - va_mask identifies requested attributes.
2432 * If ATTR_XVATTR set, then optional attrs are requested
2433 * flags - ATTR_NOACLCHECK (CIFS server context)
2434 * cr - credentials of caller.
2435 *
2436 * OUT: vap - attribute values.
2437 *
2438 * RETURN: 0 (always succeeds)
2439 */
2440 /* ARGSUSED */
2441 int
2442 zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2443 {
2444 znode_t *zp = ITOZ(ip);
2445 zfsvfs_t *zfsvfs = ITOZSB(ip);
2446 int error = 0;
2447 uint64_t links;
2448 uint64_t atime[2], mtime[2], ctime[2];
2449 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2450 xoptattr_t *xoap = NULL;
2451 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2452 sa_bulk_attr_t bulk[3];
2453 int count = 0;
2454
2455 ZFS_ENTER(zfsvfs);
2456 ZFS_VERIFY_ZP(zp);
2457
2458 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2459
2460 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
2461 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2462 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2463
2464 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2465 ZFS_EXIT(zfsvfs);
2466 return (error);
2467 }
2468
2469 /*
2470 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2471 * Also, if we are the owner don't bother, since owner should
2472 * always be allowed to read basic attributes of file.
2473 */
2474 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2475 (vap->va_uid != crgetuid(cr))) {
2476 if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2477 skipaclchk, cr))) {
2478 ZFS_EXIT(zfsvfs);
2479 return (error);
2480 }
2481 }
2482
2483 /*
2484 * Return all attributes. It's cheaper to provide the answer
2485 * than to determine whether we were asked the question.
2486 */
2487
2488 mutex_enter(&zp->z_lock);
2489 vap->va_type = vn_mode_to_vtype(zp->z_mode);
2490 vap->va_mode = zp->z_mode;
2491 vap->va_fsid = ZTOI(zp)->i_sb->s_dev;
2492 vap->va_nodeid = zp->z_id;
2493 if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
2494 links = ZTOI(zp)->i_nlink + 1;
2495 else
2496 links = ZTOI(zp)->i_nlink;
2497 vap->va_nlink = MIN(links, ZFS_LINK_MAX);
2498 vap->va_size = i_size_read(ip);
2499 vap->va_rdev = ip->i_rdev;
2500 vap->va_seq = ip->i_generation;
2501
2502 /*
2503 * Add in any requested optional attributes and the create time.
2504 * Also set the corresponding bits in the returned attribute bitmap.
2505 */
2506 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2507 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2508 xoap->xoa_archive =
2509 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2510 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2511 }
2512
2513 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2514 xoap->xoa_readonly =
2515 ((zp->z_pflags & ZFS_READONLY) != 0);
2516 XVA_SET_RTN(xvap, XAT_READONLY);
2517 }
2518
2519 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2520 xoap->xoa_system =
2521 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2522 XVA_SET_RTN(xvap, XAT_SYSTEM);
2523 }
2524
2525 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2526 xoap->xoa_hidden =
2527 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2528 XVA_SET_RTN(xvap, XAT_HIDDEN);
2529 }
2530
2531 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2532 xoap->xoa_nounlink =
2533 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2534 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2535 }
2536
2537 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2538 xoap->xoa_immutable =
2539 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2540 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2541 }
2542
2543 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2544 xoap->xoa_appendonly =
2545 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2546 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2547 }
2548
2549 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2550 xoap->xoa_nodump =
2551 ((zp->z_pflags & ZFS_NODUMP) != 0);
2552 XVA_SET_RTN(xvap, XAT_NODUMP);
2553 }
2554
2555 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2556 xoap->xoa_opaque =
2557 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2558 XVA_SET_RTN(xvap, XAT_OPAQUE);
2559 }
2560
2561 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2562 xoap->xoa_av_quarantined =
2563 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2564 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2565 }
2566
2567 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2568 xoap->xoa_av_modified =
2569 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2570 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2571 }
2572
2573 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2574 S_ISREG(ip->i_mode)) {
2575 zfs_sa_get_scanstamp(zp, xvap);
2576 }
2577
2578 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2579 uint64_t times[2];
2580
2581 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
2582 times, sizeof (times));
2583 ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2584 XVA_SET_RTN(xvap, XAT_CREATETIME);
2585 }
2586
2587 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2588 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2589 XVA_SET_RTN(xvap, XAT_REPARSE);
2590 }
2591 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2592 xoap->xoa_generation = ip->i_generation;
2593 XVA_SET_RTN(xvap, XAT_GEN);
2594 }
2595
2596 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2597 xoap->xoa_offline =
2598 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2599 XVA_SET_RTN(xvap, XAT_OFFLINE);
2600 }
2601
2602 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2603 xoap->xoa_sparse =
2604 ((zp->z_pflags & ZFS_SPARSE) != 0);
2605 XVA_SET_RTN(xvap, XAT_SPARSE);
2606 }
2607
2608 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
2609 xoap->xoa_projinherit =
2610 ((zp->z_pflags & ZFS_PROJINHERIT) != 0);
2611 XVA_SET_RTN(xvap, XAT_PROJINHERIT);
2612 }
2613
2614 if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
2615 xoap->xoa_projid = zp->z_projid;
2616 XVA_SET_RTN(xvap, XAT_PROJID);
2617 }
2618 }
2619
2620 ZFS_TIME_DECODE(&vap->va_atime, atime);
2621 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2622 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2623
2624 mutex_exit(&zp->z_lock);
2625
2626 sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2627
2628 if (zp->z_blksz == 0) {
2629 /*
2630 * Block size hasn't been set; suggest maximal I/O transfers.
2631 */
2632 vap->va_blksize = zfsvfs->z_max_blksz;
2633 }
2634
2635 ZFS_EXIT(zfsvfs);
2636 return (0);
2637 }
2638
2639 /*
2640 * Get the basic file attributes and place them in the provided kstat
2641 * structure. The inode is assumed to be the authoritative source
2642 * for most of the attributes. However, the znode currently has the
2643 * authoritative atime, blksize, and block count.
2644 *
2645 * IN: ip - inode of file.
2646 *
2647 * OUT: sp - kstat values.
2648 *
2649 * RETURN: 0 (always succeeds)
2650 */
2651 /* ARGSUSED */
2652 int
2653 zfs_getattr_fast(struct inode *ip, struct kstat *sp)
2654 {
2655 znode_t *zp = ITOZ(ip);
2656 zfsvfs_t *zfsvfs = ITOZSB(ip);
2657 uint32_t blksize;
2658 u_longlong_t nblocks;
2659
2660 ZFS_ENTER(zfsvfs);
2661 ZFS_VERIFY_ZP(zp);
2662
2663 mutex_enter(&zp->z_lock);
2664
2665 generic_fillattr(ip, sp);
2666
2667 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
2668 sp->blksize = blksize;
2669 sp->blocks = nblocks;
2670
2671 if (unlikely(zp->z_blksz == 0)) {
2672 /*
2673 * Block size hasn't been set; suggest maximal I/O transfers.
2674 */
2675 sp->blksize = zfsvfs->z_max_blksz;
2676 }
2677
2678 mutex_exit(&zp->z_lock);
2679
2680 /*
2681 * Required to prevent NFS client from detecting different inode
2682 * numbers of snapshot root dentry before and after snapshot mount.
2683 */
2684 if (zfsvfs->z_issnap) {
2685 if (ip->i_sb->s_root->d_inode == ip)
2686 sp->ino = ZFSCTL_INO_SNAPDIRS -
2687 dmu_objset_id(zfsvfs->z_os);
2688 }
2689
2690 ZFS_EXIT(zfsvfs);
2691
2692 return (0);
2693 }
2694
2695 /*
2696 * For the operation of changing file's user/group/project, we need to
2697 * handle not only the main object that is assigned to the file directly,
2698 * but also the ones that are used by the file via hidden xattr directory.
2699 *
2700 * Because the xattr directory may contains many EA entries, as to it may
2701 * be impossible to change all of them via the transaction of changing the
2702 * main object's user/group/project attributes. Then we have to change them
2703 * via other multiple independent transactions one by one. It may be not good
2704 * solution, but we have no better idea yet.
2705 */
2706 static int
2707 zfs_setattr_dir(znode_t *dzp)
2708 {
2709 struct inode *dxip = ZTOI(dzp);
2710 struct inode *xip = NULL;
2711 zfsvfs_t *zfsvfs = ITOZSB(dxip);
2712 objset_t *os = zfsvfs->z_os;
2713 zap_cursor_t zc;
2714 zap_attribute_t zap;
2715 zfs_dirlock_t *dl;
2716 znode_t *zp;
2717 dmu_tx_t *tx = NULL;
2718 uint64_t uid, gid;
2719 sa_bulk_attr_t bulk[4];
2720 int count = 0;
2721 int err;
2722
2723 zap_cursor_init(&zc, os, dzp->z_id);
2724 while ((err = zap_cursor_retrieve(&zc, &zap)) == 0) {
2725 if (zap.za_integer_length != 8 || zap.za_num_integers != 1) {
2726 err = ENXIO;
2727 break;
2728 }
2729
2730 err = zfs_dirent_lock(&dl, dzp, (char *)zap.za_name, &zp,
2731 ZEXISTS, NULL, NULL);
2732 if (err == ENOENT)
2733 goto next;
2734 if (err)
2735 break;
2736
2737 xip = ZTOI(zp);
2738 if (KUID_TO_SUID(xip->i_uid) == KUID_TO_SUID(dxip->i_uid) &&
2739 KGID_TO_SGID(xip->i_gid) == KGID_TO_SGID(dxip->i_gid) &&
2740 zp->z_projid == dzp->z_projid)
2741 goto next;
2742
2743 tx = dmu_tx_create(os);
2744 if (!(zp->z_pflags & ZFS_PROJID))
2745 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2746 else
2747 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2748
2749 err = dmu_tx_assign(tx, TXG_WAIT);
2750 if (err)
2751 break;
2752
2753 mutex_enter(&dzp->z_lock);
2754
2755 if (KUID_TO_SUID(xip->i_uid) != KUID_TO_SUID(dxip->i_uid)) {
2756 xip->i_uid = dxip->i_uid;
2757 uid = zfs_uid_read(dxip);
2758 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
2759 &uid, sizeof (uid));
2760 }
2761
2762 if (KGID_TO_SGID(xip->i_gid) != KGID_TO_SGID(dxip->i_gid)) {
2763 xip->i_gid = dxip->i_gid;
2764 gid = zfs_gid_read(dxip);
2765 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
2766 &gid, sizeof (gid));
2767 }
2768
2769 if (zp->z_projid != dzp->z_projid) {
2770 if (!(zp->z_pflags & ZFS_PROJID)) {
2771 zp->z_pflags |= ZFS_PROJID;
2772 SA_ADD_BULK_ATTR(bulk, count,
2773 SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags,
2774 sizeof (zp->z_pflags));
2775 }
2776
2777 zp->z_projid = dzp->z_projid;
2778 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PROJID(zfsvfs),
2779 NULL, &zp->z_projid, sizeof (zp->z_projid));
2780 }
2781
2782 mutex_exit(&dzp->z_lock);
2783
2784 if (likely(count > 0)) {
2785 err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
2786 dmu_tx_commit(tx);
2787 } else {
2788 dmu_tx_abort(tx);
2789 }
2790 tx = NULL;
2791 if (err != 0 && err != ENOENT)
2792 break;
2793
2794 next:
2795 if (xip) {
2796 iput(xip);
2797 xip = NULL;
2798 zfs_dirent_unlock(dl);
2799 }
2800 zap_cursor_advance(&zc);
2801 }
2802
2803 if (tx)
2804 dmu_tx_abort(tx);
2805 if (xip) {
2806 iput(xip);
2807 zfs_dirent_unlock(dl);
2808 }
2809 zap_cursor_fini(&zc);
2810
2811 return (err == ENOENT ? 0 : err);
2812 }
2813
2814 /*
2815 * Set the file attributes to the values contained in the
2816 * vattr structure.
2817 *
2818 * IN: ip - inode of file to be modified.
2819 * vap - new attribute values.
2820 * If ATTR_XVATTR set, then optional attrs are being set
2821 * flags - ATTR_UTIME set if non-default time values provided.
2822 * - ATTR_NOACLCHECK (CIFS context only).
2823 * cr - credentials of caller.
2824 *
2825 * RETURN: 0 if success
2826 * error code if failure
2827 *
2828 * Timestamps:
2829 * ip - ctime updated, mtime updated if size changed.
2830 */
2831 /* ARGSUSED */
2832 int
2833 zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2834 {
2835 znode_t *zp = ITOZ(ip);
2836 zfsvfs_t *zfsvfs = ITOZSB(ip);
2837 objset_t *os = zfsvfs->z_os;
2838 zilog_t *zilog;
2839 dmu_tx_t *tx;
2840 vattr_t oldva;
2841 xvattr_t *tmpxvattr;
2842 uint_t mask = vap->va_mask;
2843 uint_t saved_mask = 0;
2844 int trim_mask = 0;
2845 uint64_t new_mode;
2846 uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid;
2847 uint64_t xattr_obj;
2848 uint64_t mtime[2], ctime[2], atime[2];
2849 uint64_t projid = ZFS_INVALID_PROJID;
2850 znode_t *attrzp;
2851 int need_policy = FALSE;
2852 int err, err2 = 0;
2853 zfs_fuid_info_t *fuidp = NULL;
2854 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2855 xoptattr_t *xoap;
2856 zfs_acl_t *aclp;
2857 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2858 boolean_t fuid_dirtied = B_FALSE;
2859 boolean_t handle_eadir = B_FALSE;
2860 sa_bulk_attr_t *bulk, *xattr_bulk;
2861 int count = 0, xattr_count = 0, bulks = 8;
2862
2863 if (mask == 0)
2864 return (0);
2865
2866 ZFS_ENTER(zfsvfs);
2867 ZFS_VERIFY_ZP(zp);
2868
2869 /*
2870 * If this is a xvattr_t, then get a pointer to the structure of
2871 * optional attributes. If this is NULL, then we have a vattr_t.
2872 */
2873 xoap = xva_getxoptattr(xvap);
2874 if (xoap != NULL && (mask & ATTR_XVATTR)) {
2875 if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
2876 if (!dmu_objset_projectquota_enabled(os) ||
2877 (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode))) {
2878 ZFS_EXIT(zfsvfs);
2879 return (SET_ERROR(ENOTSUP));
2880 }
2881
2882 projid = xoap->xoa_projid;
2883 if (unlikely(projid == ZFS_INVALID_PROJID)) {
2884 ZFS_EXIT(zfsvfs);
2885 return (SET_ERROR(EINVAL));
2886 }
2887
2888 if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
2889 projid = ZFS_INVALID_PROJID;
2890 else
2891 need_policy = TRUE;
2892 }
2893
2894 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
2895 (xoap->xoa_projinherit !=
2896 ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
2897 (!dmu_objset_projectquota_enabled(os) ||
2898 (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode)))) {
2899 ZFS_EXIT(zfsvfs);
2900 return (SET_ERROR(ENOTSUP));
2901 }
2902 }
2903
2904 zilog = zfsvfs->z_log;
2905
2906 /*
2907 * Make sure that if we have ephemeral uid/gid or xvattr specified
2908 * that file system is at proper version level
2909 */
2910
2911 if (zfsvfs->z_use_fuids == B_FALSE &&
2912 (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2913 ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2914 (mask & ATTR_XVATTR))) {
2915 ZFS_EXIT(zfsvfs);
2916 return (SET_ERROR(EINVAL));
2917 }
2918
2919 if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
2920 ZFS_EXIT(zfsvfs);
2921 return (SET_ERROR(EISDIR));
2922 }
2923
2924 if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
2925 ZFS_EXIT(zfsvfs);
2926 return (SET_ERROR(EINVAL));
2927 }
2928
2929 tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
2930 xva_init(tmpxvattr);
2931
2932 bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
2933 xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
2934
2935 /*
2936 * Immutable files can only alter immutable bit and atime
2937 */
2938 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2939 ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
2940 ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2941 err = SET_ERROR(EPERM);
2942 goto out3;
2943 }
2944
2945 if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2946 err = SET_ERROR(EPERM);
2947 goto out3;
2948 }
2949
2950 /*
2951 * Verify timestamps doesn't overflow 32 bits.
2952 * ZFS can handle large timestamps, but 32bit syscalls can't
2953 * handle times greater than 2039. This check should be removed
2954 * once large timestamps are fully supported.
2955 */
2956 if (mask & (ATTR_ATIME | ATTR_MTIME)) {
2957 if (((mask & ATTR_ATIME) &&
2958 TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2959 ((mask & ATTR_MTIME) &&
2960 TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2961 err = SET_ERROR(EOVERFLOW);
2962 goto out3;
2963 }
2964 }
2965
2966 top:
2967 attrzp = NULL;
2968 aclp = NULL;
2969
2970 /* Can this be moved to before the top label? */
2971 if (zfs_is_readonly(zfsvfs)) {
2972 err = SET_ERROR(EROFS);
2973 goto out3;
2974 }
2975
2976 /*
2977 * First validate permissions
2978 */
2979
2980 if (mask & ATTR_SIZE) {
2981 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2982 if (err)
2983 goto out3;
2984
2985 /*
2986 * XXX - Note, we are not providing any open
2987 * mode flags here (like FNDELAY), so we may
2988 * block if there are locks present... this
2989 * should be addressed in openat().
2990 */
2991 /* XXX - would it be OK to generate a log record here? */
2992 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2993 if (err)
2994 goto out3;
2995 }
2996
2997 if (mask & (ATTR_ATIME|ATTR_MTIME) ||
2998 ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2999 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
3000 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
3001 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
3002 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
3003 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
3004 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
3005 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
3006 skipaclchk, cr);
3007 }
3008
3009 if (mask & (ATTR_UID|ATTR_GID)) {
3010 int idmask = (mask & (ATTR_UID|ATTR_GID));
3011 int take_owner;
3012 int take_group;
3013
3014 /*
3015 * NOTE: even if a new mode is being set,
3016 * we may clear S_ISUID/S_ISGID bits.
3017 */
3018
3019 if (!(mask & ATTR_MODE))
3020 vap->va_mode = zp->z_mode;
3021
3022 /*
3023 * Take ownership or chgrp to group we are a member of
3024 */
3025
3026 take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
3027 take_group = (mask & ATTR_GID) &&
3028 zfs_groupmember(zfsvfs, vap->va_gid, cr);
3029
3030 /*
3031 * If both ATTR_UID and ATTR_GID are set then take_owner and
3032 * take_group must both be set in order to allow taking
3033 * ownership.
3034 *
3035 * Otherwise, send the check through secpolicy_vnode_setattr()
3036 *
3037 */
3038
3039 if (((idmask == (ATTR_UID|ATTR_GID)) &&
3040 take_owner && take_group) ||
3041 ((idmask == ATTR_UID) && take_owner) ||
3042 ((idmask == ATTR_GID) && take_group)) {
3043 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
3044 skipaclchk, cr) == 0) {
3045 /*
3046 * Remove setuid/setgid for non-privileged users
3047 */
3048 (void) secpolicy_setid_clear(vap, cr);
3049 trim_mask = (mask & (ATTR_UID|ATTR_GID));
3050 } else {
3051 need_policy = TRUE;
3052 }
3053 } else {
3054 need_policy = TRUE;
3055 }
3056 }
3057
3058 mutex_enter(&zp->z_lock);
3059 oldva.va_mode = zp->z_mode;
3060 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
3061 if (mask & ATTR_XVATTR) {
3062 /*
3063 * Update xvattr mask to include only those attributes
3064 * that are actually changing.
3065 *
3066 * the bits will be restored prior to actually setting
3067 * the attributes so the caller thinks they were set.
3068 */
3069 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
3070 if (xoap->xoa_appendonly !=
3071 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
3072 need_policy = TRUE;
3073 } else {
3074 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
3075 XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
3076 }
3077 }
3078
3079 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
3080 if (xoap->xoa_projinherit !=
3081 ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
3082 need_policy = TRUE;
3083 } else {
3084 XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
3085 XVA_SET_REQ(tmpxvattr, XAT_PROJINHERIT);
3086 }
3087 }
3088
3089 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
3090 if (xoap->xoa_nounlink !=
3091 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
3092 need_policy = TRUE;
3093 } else {
3094 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
3095 XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
3096 }
3097 }
3098
3099 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
3100 if (xoap->xoa_immutable !=
3101 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
3102 need_policy = TRUE;
3103 } else {
3104 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
3105 XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
3106 }
3107 }
3108
3109 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
3110 if (xoap->xoa_nodump !=
3111 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
3112 need_policy = TRUE;
3113 } else {
3114 XVA_CLR_REQ(xvap, XAT_NODUMP);
3115 XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
3116 }
3117 }
3118
3119 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
3120 if (xoap->xoa_av_modified !=
3121 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
3122 need_policy = TRUE;
3123 } else {
3124 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
3125 XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
3126 }
3127 }
3128
3129 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
3130 if ((!S_ISREG(ip->i_mode) &&
3131 xoap->xoa_av_quarantined) ||
3132 xoap->xoa_av_quarantined !=
3133 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
3134 need_policy = TRUE;
3135 } else {
3136 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
3137 XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
3138 }
3139 }
3140
3141 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
3142 mutex_exit(&zp->z_lock);
3143 err = SET_ERROR(EPERM);
3144 goto out3;
3145 }
3146
3147 if (need_policy == FALSE &&
3148 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
3149 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
3150 need_policy = TRUE;
3151 }
3152 }
3153
3154 mutex_exit(&zp->z_lock);
3155
3156 if (mask & ATTR_MODE) {
3157 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
3158 err = secpolicy_setid_setsticky_clear(ip, vap,
3159 &oldva, cr);
3160 if (err)
3161 goto out3;
3162
3163 trim_mask |= ATTR_MODE;
3164 } else {
3165 need_policy = TRUE;
3166 }
3167 }
3168
3169 if (need_policy) {
3170 /*
3171 * If trim_mask is set then take ownership
3172 * has been granted or write_acl is present and user
3173 * has the ability to modify mode. In that case remove
3174 * UID|GID and or MODE from mask so that
3175 * secpolicy_vnode_setattr() doesn't revoke it.
3176 */
3177
3178 if (trim_mask) {
3179 saved_mask = vap->va_mask;
3180 vap->va_mask &= ~trim_mask;
3181 }
3182 err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
3183 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
3184 if (err)
3185 goto out3;
3186
3187 if (trim_mask)
3188 vap->va_mask |= saved_mask;
3189 }
3190
3191 /*
3192 * secpolicy_vnode_setattr, or take ownership may have
3193 * changed va_mask
3194 */
3195 mask = vap->va_mask;
3196
3197 if ((mask & (ATTR_UID | ATTR_GID)) || projid != ZFS_INVALID_PROJID) {
3198 handle_eadir = B_TRUE;
3199 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
3200 &xattr_obj, sizeof (xattr_obj));
3201
3202 if (err == 0 && xattr_obj) {
3203 err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
3204 if (err)
3205 goto out2;
3206 }
3207 if (mask & ATTR_UID) {
3208 new_kuid = zfs_fuid_create(zfsvfs,
3209 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
3210 if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
3211 zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
3212 new_kuid)) {
3213 if (attrzp)
3214 iput(ZTOI(attrzp));
3215 err = SET_ERROR(EDQUOT);
3216 goto out2;
3217 }
3218 }
3219
3220 if (mask & ATTR_GID) {
3221 new_kgid = zfs_fuid_create(zfsvfs,
3222 (uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
3223 if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
3224 zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
3225 new_kgid)) {
3226 if (attrzp)
3227 iput(ZTOI(attrzp));
3228 err = SET_ERROR(EDQUOT);
3229 goto out2;
3230 }
3231 }
3232
3233 if (projid != ZFS_INVALID_PROJID &&
3234 zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
3235 if (attrzp)
3236 iput(ZTOI(attrzp));
3237 err = EDQUOT;
3238 goto out2;
3239 }
3240 }
3241 tx = dmu_tx_create(os);
3242
3243 if (mask & ATTR_MODE) {
3244 uint64_t pmode = zp->z_mode;
3245 uint64_t acl_obj;
3246 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
3247
3248 zfs_acl_chmod_setattr(zp, &aclp, new_mode);
3249
3250 mutex_enter(&zp->z_lock);
3251 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
3252 /*
3253 * Are we upgrading ACL from old V0 format
3254 * to V1 format?
3255 */
3256 if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
3257 zfs_znode_acl_version(zp) ==
3258 ZFS_ACL_VERSION_INITIAL) {
3259 dmu_tx_hold_free(tx, acl_obj, 0,
3260 DMU_OBJECT_END);
3261 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3262 0, aclp->z_acl_bytes);
3263 } else {
3264 dmu_tx_hold_write(tx, acl_obj, 0,
3265 aclp->z_acl_bytes);
3266 }
3267 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3268 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3269 0, aclp->z_acl_bytes);
3270 }
3271 mutex_exit(&zp->z_lock);
3272 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3273 } else {
3274 if (((mask & ATTR_XVATTR) &&
3275 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
3276 (projid != ZFS_INVALID_PROJID &&
3277 !(zp->z_pflags & ZFS_PROJID)))
3278 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3279 else
3280 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3281 }
3282
3283 if (attrzp) {
3284 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
3285 }
3286
3287 fuid_dirtied = zfsvfs->z_fuid_dirty;
3288 if (fuid_dirtied)
3289 zfs_fuid_txhold(zfsvfs, tx);
3290
3291 zfs_sa_upgrade_txholds(tx, zp);
3292
3293 err = dmu_tx_assign(tx, TXG_WAIT);
3294 if (err)
3295 goto out;
3296
3297 count = 0;
3298 /*
3299 * Set each attribute requested.
3300 * We group settings according to the locks they need to acquire.
3301 *
3302 * Note: you cannot set ctime directly, although it will be
3303 * updated as a side-effect of calling this function.
3304 */
3305
3306 if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
3307 /*
3308 * For the existed object that is upgraded from old system,
3309 * its on-disk layout has no slot for the project ID attribute.
3310 * But quota accounting logic needs to access related slots by
3311 * offset directly. So we need to adjust old objects' layout
3312 * to make the project ID to some unified and fixed offset.
3313 */
3314 if (attrzp)
3315 err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
3316 if (err == 0)
3317 err = sa_add_projid(zp->z_sa_hdl, tx, projid);
3318
3319 if (unlikely(err == EEXIST))
3320 err = 0;
3321 else if (err != 0)
3322 goto out;
3323 else
3324 projid = ZFS_INVALID_PROJID;
3325 }
3326
3327 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3328 mutex_enter(&zp->z_acl_lock);
3329 mutex_enter(&zp->z_lock);
3330
3331 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3332 &zp->z_pflags, sizeof (zp->z_pflags));
3333
3334 if (attrzp) {
3335 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3336 mutex_enter(&attrzp->z_acl_lock);
3337 mutex_enter(&attrzp->z_lock);
3338 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3339 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3340 sizeof (attrzp->z_pflags));
3341 if (projid != ZFS_INVALID_PROJID) {
3342 attrzp->z_projid = projid;
3343 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3344 SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
3345 sizeof (attrzp->z_projid));
3346 }
3347 }
3348
3349 if (mask & (ATTR_UID|ATTR_GID)) {
3350
3351 if (mask & ATTR_UID) {
3352 ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid);
3353 new_uid = zfs_uid_read(ZTOI(zp));
3354 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3355 &new_uid, sizeof (new_uid));
3356 if (attrzp) {
3357 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3358 SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3359 sizeof (new_uid));
3360 ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid);
3361 }
3362 }
3363
3364 if (mask & ATTR_GID) {
3365 ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid);
3366 new_gid = zfs_gid_read(ZTOI(zp));
3367 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3368 NULL, &new_gid, sizeof (new_gid));
3369 if (attrzp) {
3370 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3371 SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3372 sizeof (new_gid));
3373 ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid);
3374 }
3375 }
3376 if (!(mask & ATTR_MODE)) {
3377 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3378 NULL, &new_mode, sizeof (new_mode));
3379 new_mode = zp->z_mode;
3380 }
3381 err = zfs_acl_chown_setattr(zp);
3382 ASSERT(err == 0);
3383 if (attrzp) {
3384 err = zfs_acl_chown_setattr(attrzp);
3385 ASSERT(err == 0);
3386 }
3387 }
3388
3389 if (mask & ATTR_MODE) {
3390 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3391 &new_mode, sizeof (new_mode));
3392 zp->z_mode = ZTOI(zp)->i_mode = new_mode;
3393 ASSERT3P(aclp, !=, NULL);
3394 err = zfs_aclset_common(zp, aclp, cr, tx);
3395 ASSERT0(err);
3396 if (zp->z_acl_cached)
3397 zfs_acl_free(zp->z_acl_cached);
3398 zp->z_acl_cached = aclp;
3399 aclp = NULL;
3400 }
3401
3402 if ((mask & ATTR_ATIME) || zp->z_atime_dirty) {
3403 zp->z_atime_dirty = 0;
3404 ZFS_TIME_ENCODE(&ip->i_atime, atime);
3405 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3406 &atime, sizeof (atime));
3407 }
3408
3409 if (mask & (ATTR_MTIME | ATTR_SIZE)) {
3410 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3411 ZTOI(zp)->i_mtime = timespec_trunc(vap->va_mtime,
3412 ZTOI(zp)->i_sb->s_time_gran);
3413
3414 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3415 mtime, sizeof (mtime));
3416 }
3417
3418 if (mask & (ATTR_CTIME | ATTR_SIZE)) {
3419 ZFS_TIME_ENCODE(&vap->va_ctime, ctime);
3420 ZTOI(zp)->i_ctime = timespec_trunc(vap->va_ctime,
3421 ZTOI(zp)->i_sb->s_time_gran);
3422 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3423 ctime, sizeof (ctime));
3424 }
3425
3426 if (projid != ZFS_INVALID_PROJID) {
3427 zp->z_projid = projid;
3428 SA_ADD_BULK_ATTR(bulk, count,
3429 SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
3430 sizeof (zp->z_projid));
3431 }
3432
3433 if (attrzp && mask) {
3434 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3435 SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
3436 sizeof (ctime));
3437 }
3438
3439 /*
3440 * Do this after setting timestamps to prevent timestamp
3441 * update from toggling bit
3442 */
3443
3444 if (xoap && (mask & ATTR_XVATTR)) {
3445
3446 /*
3447 * restore trimmed off masks
3448 * so that return masks can be set for caller.
3449 */
3450
3451 if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
3452 XVA_SET_REQ(xvap, XAT_APPENDONLY);
3453 }
3454 if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
3455 XVA_SET_REQ(xvap, XAT_NOUNLINK);
3456 }
3457 if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
3458 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3459 }
3460 if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
3461 XVA_SET_REQ(xvap, XAT_NODUMP);
3462 }
3463 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
3464 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3465 }
3466 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
3467 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3468 }
3469 if (XVA_ISSET_REQ(tmpxvattr, XAT_PROJINHERIT)) {
3470 XVA_SET_REQ(xvap, XAT_PROJINHERIT);
3471 }
3472
3473 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3474 ASSERT(S_ISREG(ip->i_mode));
3475
3476 zfs_xvattr_set(zp, xvap, tx);
3477 }
3478
3479 if (fuid_dirtied)
3480 zfs_fuid_sync(zfsvfs, tx);
3481
3482 if (mask != 0)
3483 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3484
3485 mutex_exit(&zp->z_lock);
3486 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3487 mutex_exit(&zp->z_acl_lock);
3488
3489 if (attrzp) {
3490 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3491 mutex_exit(&attrzp->z_acl_lock);
3492 mutex_exit(&attrzp->z_lock);
3493 }
3494 out:
3495 if (err == 0 && xattr_count > 0) {
3496 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3497 xattr_count, tx);
3498 ASSERT(err2 == 0);
3499 }
3500
3501 if (aclp)
3502 zfs_acl_free(aclp);
3503
3504 if (fuidp) {
3505 zfs_fuid_info_free(fuidp);
3506 fuidp = NULL;
3507 }
3508
3509 if (err) {
3510 dmu_tx_abort(tx);
3511 if (attrzp)
3512 iput(ZTOI(attrzp));
3513 if (err == ERESTART)
3514 goto top;
3515 } else {
3516 if (count > 0)
3517 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3518 dmu_tx_commit(tx);
3519 if (attrzp) {
3520 if (err2 == 0 && handle_eadir)
3521 err2 = zfs_setattr_dir(attrzp);
3522 iput(ZTOI(attrzp));
3523 }
3524 zfs_inode_update(zp);
3525 }
3526
3527 out2:
3528 if (os->os_sync == ZFS_SYNC_ALWAYS)
3529 zil_commit(zilog, 0);
3530
3531 out3:
3532 kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks);
3533 kmem_free(bulk, sizeof (sa_bulk_attr_t) * bulks);
3534 kmem_free(tmpxvattr, sizeof (xvattr_t));
3535 ZFS_EXIT(zfsvfs);
3536 return (err);
3537 }
3538
3539 typedef struct zfs_zlock {
3540 krwlock_t *zl_rwlock; /* lock we acquired */
3541 znode_t *zl_znode; /* znode we held */
3542 struct zfs_zlock *zl_next; /* next in list */
3543 } zfs_zlock_t;
3544
3545 /*
3546 * Drop locks and release vnodes that were held by zfs_rename_lock().
3547 */
3548 static void
3549 zfs_rename_unlock(zfs_zlock_t **zlpp)
3550 {
3551 zfs_zlock_t *zl;
3552
3553 while ((zl = *zlpp) != NULL) {
3554 if (zl->zl_znode != NULL)
3555 zfs_iput_async(ZTOI(zl->zl_znode));
3556 rw_exit(zl->zl_rwlock);
3557 *zlpp = zl->zl_next;
3558 kmem_free(zl, sizeof (*zl));
3559 }
3560 }
3561
3562 /*
3563 * Search back through the directory tree, using the ".." entries.
3564 * Lock each directory in the chain to prevent concurrent renames.
3565 * Fail any attempt to move a directory into one of its own descendants.
3566 * XXX - z_parent_lock can overlap with map or grow locks
3567 */
3568 static int
3569 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3570 {
3571 zfs_zlock_t *zl;
3572 znode_t *zp = tdzp;
3573 uint64_t rootid = ZTOZSB(zp)->z_root;
3574 uint64_t oidp = zp->z_id;
3575 krwlock_t *rwlp = &szp->z_parent_lock;
3576 krw_t rw = RW_WRITER;
3577
3578 /*
3579 * First pass write-locks szp and compares to zp->z_id.
3580 * Later passes read-lock zp and compare to zp->z_parent.
3581 */
3582 do {
3583 if (!rw_tryenter(rwlp, rw)) {
3584 /*
3585 * Another thread is renaming in this path.
3586 * Note that if we are a WRITER, we don't have any
3587 * parent_locks held yet.
3588 */
3589 if (rw == RW_READER && zp->z_id > szp->z_id) {
3590 /*
3591 * Drop our locks and restart
3592 */
3593 zfs_rename_unlock(&zl);
3594 *zlpp = NULL;
3595 zp = tdzp;
3596 oidp = zp->z_id;
3597 rwlp = &szp->z_parent_lock;
3598 rw = RW_WRITER;
3599 continue;
3600 } else {
3601 /*
3602 * Wait for other thread to drop its locks
3603 */
3604 rw_enter(rwlp, rw);
3605 }
3606 }
3607
3608 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3609 zl->zl_rwlock = rwlp;
3610 zl->zl_znode = NULL;
3611 zl->zl_next = *zlpp;
3612 *zlpp = zl;
3613
3614 if (oidp == szp->z_id) /* We're a descendant of szp */
3615 return (SET_ERROR(EINVAL));
3616
3617 if (oidp == rootid) /* We've hit the top */
3618 return (0);
3619
3620 if (rw == RW_READER) { /* i.e. not the first pass */
3621 int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
3622 if (error)
3623 return (error);
3624 zl->zl_znode = zp;
3625 }
3626 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
3627 &oidp, sizeof (oidp));
3628 rwlp = &zp->z_parent_lock;
3629 rw = RW_READER;
3630
3631 } while (zp->z_id != sdzp->z_id);
3632
3633 return (0);
3634 }
3635
3636 /*
3637 * Move an entry from the provided source directory to the target
3638 * directory. Change the entry name as indicated.
3639 *
3640 * IN: sdip - Source directory containing the "old entry".
3641 * snm - Old entry name.
3642 * tdip - Target directory to contain the "new entry".
3643 * tnm - New entry name.
3644 * cr - credentials of caller.
3645 * flags - case flags
3646 *
3647 * RETURN: 0 on success, error code on failure.
3648 *
3649 * Timestamps:
3650 * sdip,tdip - ctime|mtime updated
3651 */
3652 /*ARGSUSED*/
3653 int
3654 zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
3655 cred_t *cr, int flags)
3656 {
3657 znode_t *tdzp, *szp, *tzp;
3658 znode_t *sdzp = ITOZ(sdip);
3659 zfsvfs_t *zfsvfs = ITOZSB(sdip);
3660 zilog_t *zilog;
3661 zfs_dirlock_t *sdl, *tdl;
3662 dmu_tx_t *tx;
3663 zfs_zlock_t *zl;
3664 int cmp, serr, terr;
3665 int error = 0;
3666 int zflg = 0;
3667 boolean_t waited = B_FALSE;
3668
3669 if (snm == NULL || tnm == NULL)
3670 return (SET_ERROR(EINVAL));
3671
3672 ZFS_ENTER(zfsvfs);
3673 ZFS_VERIFY_ZP(sdzp);
3674 zilog = zfsvfs->z_log;
3675
3676 tdzp = ITOZ(tdip);
3677 ZFS_VERIFY_ZP(tdzp);
3678
3679 /*
3680 * We check i_sb because snapshots and the ctldir must have different
3681 * super blocks.
3682 */
3683 if (tdip->i_sb != sdip->i_sb || zfsctl_is_node(tdip)) {
3684 ZFS_EXIT(zfsvfs);
3685 return (SET_ERROR(EXDEV));
3686 }
3687
3688 if (zfsvfs->z_utf8 && u8_validate(tnm,
3689 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3690 ZFS_EXIT(zfsvfs);
3691 return (SET_ERROR(EILSEQ));
3692 }
3693
3694 if (flags & FIGNORECASE)
3695 zflg |= ZCILOOK;
3696
3697 top:
3698 szp = NULL;
3699 tzp = NULL;
3700 zl = NULL;
3701
3702 /*
3703 * This is to prevent the creation of links into attribute space
3704 * by renaming a linked file into/outof an attribute directory.
3705 * See the comment in zfs_link() for why this is considered bad.
3706 */
3707 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3708 ZFS_EXIT(zfsvfs);
3709 return (SET_ERROR(EINVAL));
3710 }
3711
3712 /*
3713 * Lock source and target directory entries. To prevent deadlock,
3714 * a lock ordering must be defined. We lock the directory with
3715 * the smallest object id first, or if it's a tie, the one with
3716 * the lexically first name.
3717 */
3718 if (sdzp->z_id < tdzp->z_id) {
3719 cmp = -1;
3720 } else if (sdzp->z_id > tdzp->z_id) {
3721 cmp = 1;
3722 } else {
3723 /*
3724 * First compare the two name arguments without
3725 * considering any case folding.
3726 */
3727 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
3728
3729 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3730 ASSERT(error == 0 || !zfsvfs->z_utf8);
3731 if (cmp == 0) {
3732 /*
3733 * POSIX: "If the old argument and the new argument
3734 * both refer to links to the same existing file,
3735 * the rename() function shall return successfully
3736 * and perform no other action."
3737 */
3738 ZFS_EXIT(zfsvfs);
3739 return (0);
3740 }
3741 /*
3742 * If the file system is case-folding, then we may
3743 * have some more checking to do. A case-folding file
3744 * system is either supporting mixed case sensitivity
3745 * access or is completely case-insensitive. Note
3746 * that the file system is always case preserving.
3747 *
3748 * In mixed sensitivity mode case sensitive behavior
3749 * is the default. FIGNORECASE must be used to
3750 * explicitly request case insensitive behavior.
3751 *
3752 * If the source and target names provided differ only
3753 * by case (e.g., a request to rename 'tim' to 'Tim'),
3754 * we will treat this as a special case in the
3755 * case-insensitive mode: as long as the source name
3756 * is an exact match, we will allow this to proceed as
3757 * a name-change request.
3758 */
3759 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
3760 (zfsvfs->z_case == ZFS_CASE_MIXED &&
3761 flags & FIGNORECASE)) &&
3762 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
3763 &error) == 0) {
3764 /*
3765 * case preserving rename request, require exact
3766 * name matches
3767 */
3768 zflg |= ZCIEXACT;
3769 zflg &= ~ZCILOOK;
3770 }
3771 }
3772
3773 /*
3774 * If the source and destination directories are the same, we should
3775 * grab the z_name_lock of that directory only once.
3776 */
3777 if (sdzp == tdzp) {
3778 zflg |= ZHAVELOCK;
3779 rw_enter(&sdzp->z_name_lock, RW_READER);
3780 }
3781
3782 if (cmp < 0) {
3783 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3784 ZEXISTS | zflg, NULL, NULL);
3785 terr = zfs_dirent_lock(&tdl,
3786 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3787 } else {
3788 terr = zfs_dirent_lock(&tdl,
3789 tdzp, tnm, &tzp, zflg, NULL, NULL);
3790 serr = zfs_dirent_lock(&sdl,
3791 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3792 NULL, NULL);
3793 }
3794
3795 if (serr) {
3796 /*
3797 * Source entry invalid or not there.
3798 */
3799 if (!terr) {
3800 zfs_dirent_unlock(tdl);
3801 if (tzp)
3802 iput(ZTOI(tzp));
3803 }
3804
3805 if (sdzp == tdzp)
3806 rw_exit(&sdzp->z_name_lock);
3807
3808 if (strcmp(snm, "..") == 0)
3809 serr = EINVAL;
3810 ZFS_EXIT(zfsvfs);
3811 return (serr);
3812 }
3813 if (terr) {
3814 zfs_dirent_unlock(sdl);
3815 iput(ZTOI(szp));
3816
3817 if (sdzp == tdzp)
3818 rw_exit(&sdzp->z_name_lock);
3819
3820 if (strcmp(tnm, "..") == 0)
3821 terr = EINVAL;
3822 ZFS_EXIT(zfsvfs);
3823 return (terr);
3824 }
3825
3826 /*
3827 * If we are using project inheritance, means if the directory has
3828 * ZFS_PROJINHERIT set, then its descendant directories will inherit
3829 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
3830 * such case, we only allow renames into our tree when the project
3831 * IDs are the same.
3832 */
3833 if (tdzp->z_pflags & ZFS_PROJINHERIT &&
3834 tdzp->z_projid != szp->z_projid) {
3835 error = SET_ERROR(EXDEV);
3836 goto out;
3837 }
3838
3839 /*
3840 * Must have write access at the source to remove the old entry
3841 * and write access at the target to create the new entry.
3842 * Note that if target and source are the same, this can be
3843 * done in a single check.
3844 */
3845
3846 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
3847 goto out;
3848
3849 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3850 /*
3851 * Check to make sure rename is valid.
3852 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3853 */
3854 if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
3855 goto out;
3856 }
3857
3858 /*
3859 * Does target exist?
3860 */
3861 if (tzp) {
3862 /*
3863 * Source and target must be the same type.
3864 */
3865 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3866 if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
3867 error = SET_ERROR(ENOTDIR);
3868 goto out;
3869 }
3870 } else {
3871 if (S_ISDIR(ZTOI(tzp)->i_mode)) {
3872 error = SET_ERROR(EISDIR);
3873 goto out;
3874 }
3875 }
3876 /*
3877 * POSIX dictates that when the source and target
3878 * entries refer to the same file object, rename
3879 * must do nothing and exit without error.
3880 */
3881 if (szp->z_id == tzp->z_id) {
3882 error = 0;
3883 goto out;
3884 }
3885 }
3886
3887 tx = dmu_tx_create(zfsvfs->z_os);
3888 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3889 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3890 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3891 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3892 if (sdzp != tdzp) {
3893 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3894 zfs_sa_upgrade_txholds(tx, tdzp);
3895 }
3896 if (tzp) {
3897 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3898 zfs_sa_upgrade_txholds(tx, tzp);
3899 }
3900
3901 zfs_sa_upgrade_txholds(tx, szp);
3902 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3903 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
3904 if (error) {
3905 if (zl != NULL)
3906 zfs_rename_unlock(&zl);
3907 zfs_dirent_unlock(sdl);
3908 zfs_dirent_unlock(tdl);
3909
3910 if (sdzp == tdzp)
3911 rw_exit(&sdzp->z_name_lock);
3912
3913 if (error == ERESTART) {
3914 waited = B_TRUE;
3915 dmu_tx_wait(tx);
3916 dmu_tx_abort(tx);
3917 iput(ZTOI(szp));
3918 if (tzp)
3919 iput(ZTOI(tzp));
3920 goto top;
3921 }
3922 dmu_tx_abort(tx);
3923 iput(ZTOI(szp));
3924 if (tzp)
3925 iput(ZTOI(tzp));
3926 ZFS_EXIT(zfsvfs);
3927 return (error);
3928 }
3929
3930 if (tzp) /* Attempt to remove the existing target */
3931 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3932
3933 if (error == 0) {
3934 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3935 if (error == 0) {
3936 szp->z_pflags |= ZFS_AV_MODIFIED;
3937 if (tdzp->z_pflags & ZFS_PROJINHERIT)
3938 szp->z_pflags |= ZFS_PROJINHERIT;
3939
3940 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3941 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3942 ASSERT0(error);
3943
3944 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3945 if (error == 0) {
3946 zfs_log_rename(zilog, tx, TX_RENAME |
3947 (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3948 sdl->dl_name, tdzp, tdl->dl_name, szp);
3949 } else {
3950 /*
3951 * At this point, we have successfully created
3952 * the target name, but have failed to remove
3953 * the source name. Since the create was done
3954 * with the ZRENAMING flag, there are
3955 * complications; for one, the link count is
3956 * wrong. The easiest way to deal with this
3957 * is to remove the newly created target, and
3958 * return the original error. This must
3959 * succeed; fortunately, it is very unlikely to
3960 * fail, since we just created it.
3961 */
3962 VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3963 ZRENAMING, NULL), ==, 0);
3964 }
3965 } else {
3966 /*
3967 * If we had removed the existing target, subsequent
3968 * call to zfs_link_create() to add back the same entry
3969 * but, the new dnode (szp) should not fail.
3970 */
3971 ASSERT(tzp == NULL);
3972 }
3973 }
3974
3975 dmu_tx_commit(tx);
3976 out:
3977 if (zl != NULL)
3978 zfs_rename_unlock(&zl);
3979
3980 zfs_dirent_unlock(sdl);
3981 zfs_dirent_unlock(tdl);
3982
3983 zfs_inode_update(sdzp);
3984 if (sdzp == tdzp)
3985 rw_exit(&sdzp->z_name_lock);
3986
3987 if (sdzp != tdzp)
3988 zfs_inode_update(tdzp);
3989
3990 zfs_inode_update(szp);
3991 iput(ZTOI(szp));
3992 if (tzp) {
3993 zfs_inode_update(tzp);
3994 iput(ZTOI(tzp));
3995 }
3996
3997 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3998 zil_commit(zilog, 0);
3999
4000 ZFS_EXIT(zfsvfs);
4001 return (error);
4002 }
4003
4004 /*
4005 * Insert the indicated symbolic reference entry into the directory.
4006 *
4007 * IN: dip - Directory to contain new symbolic link.
4008 * link - Name for new symlink entry.
4009 * vap - Attributes of new entry.
4010 * target - Target path of new symlink.
4011 *
4012 * cr - credentials of caller.
4013 * flags - case flags
4014 *
4015 * RETURN: 0 on success, error code on failure.
4016 *
4017 * Timestamps:
4018 * dip - ctime|mtime updated
4019 */
4020 /*ARGSUSED*/
4021 int
4022 zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
4023 struct inode **ipp, cred_t *cr, int flags)
4024 {
4025 znode_t *zp, *dzp = ITOZ(dip);
4026 zfs_dirlock_t *dl;
4027 dmu_tx_t *tx;
4028 zfsvfs_t *zfsvfs = ITOZSB(dip);
4029 zilog_t *zilog;
4030 uint64_t len = strlen(link);
4031 int error;
4032 int zflg = ZNEW;
4033 zfs_acl_ids_t acl_ids;
4034 boolean_t fuid_dirtied;
4035 uint64_t txtype = TX_SYMLINK;
4036 boolean_t waited = B_FALSE;
4037
4038 ASSERT(S_ISLNK(vap->va_mode));
4039
4040 if (name == NULL)
4041 return (SET_ERROR(EINVAL));
4042
4043 ZFS_ENTER(zfsvfs);
4044 ZFS_VERIFY_ZP(dzp);
4045 zilog = zfsvfs->z_log;
4046
4047 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
4048 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4049 ZFS_EXIT(zfsvfs);
4050 return (SET_ERROR(EILSEQ));
4051 }
4052 if (flags & FIGNORECASE)
4053 zflg |= ZCILOOK;
4054
4055 if (len > MAXPATHLEN) {
4056 ZFS_EXIT(zfsvfs);
4057 return (SET_ERROR(ENAMETOOLONG));
4058 }
4059
4060 if ((error = zfs_acl_ids_create(dzp, 0,
4061 vap, cr, NULL, &acl_ids)) != 0) {
4062 ZFS_EXIT(zfsvfs);
4063 return (error);
4064 }
4065 top:
4066 *ipp = NULL;
4067
4068 /*
4069 * Attempt to lock directory; fail if entry already exists.
4070 */
4071 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
4072 if (error) {
4073 zfs_acl_ids_free(&acl_ids);
4074 ZFS_EXIT(zfsvfs);
4075 return (error);
4076 }
4077
4078 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
4079 zfs_acl_ids_free(&acl_ids);
4080 zfs_dirent_unlock(dl);
4081 ZFS_EXIT(zfsvfs);
4082 return (error);
4083 }
4084
4085 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, ZFS_DEFAULT_PROJID)) {
4086 zfs_acl_ids_free(&acl_ids);
4087 zfs_dirent_unlock(dl);
4088 ZFS_EXIT(zfsvfs);
4089 return (SET_ERROR(EDQUOT));
4090 }
4091 tx = dmu_tx_create(zfsvfs->z_os);
4092 fuid_dirtied = zfsvfs->z_fuid_dirty;
4093 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
4094 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4095 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
4096 ZFS_SA_BASE_ATTR_SIZE + len);
4097 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
4098 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
4099 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
4100 acl_ids.z_aclp->z_acl_bytes);
4101 }
4102 if (fuid_dirtied)
4103 zfs_fuid_txhold(zfsvfs, tx);
4104 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
4105 if (error) {
4106 zfs_dirent_unlock(dl);
4107 if (error == ERESTART) {
4108 waited = B_TRUE;
4109 dmu_tx_wait(tx);
4110 dmu_tx_abort(tx);
4111 goto top;
4112 }
4113 zfs_acl_ids_free(&acl_ids);
4114 dmu_tx_abort(tx);
4115 ZFS_EXIT(zfsvfs);
4116 return (error);
4117 }
4118
4119 /*
4120 * Create a new object for the symlink.
4121 * for version 4 ZPL datsets the symlink will be an SA attribute
4122 */
4123 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
4124
4125 if (fuid_dirtied)
4126 zfs_fuid_sync(zfsvfs, tx);
4127
4128 mutex_enter(&zp->z_lock);
4129 if (zp->z_is_sa)
4130 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
4131 link, len, tx);
4132 else
4133 zfs_sa_symlink(zp, link, len, tx);
4134 mutex_exit(&zp->z_lock);
4135
4136 zp->z_size = len;
4137 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
4138 &zp->z_size, sizeof (zp->z_size), tx);
4139 /*
4140 * Insert the new object into the directory.
4141 */
4142 error = zfs_link_create(dl, zp, tx, ZNEW);
4143 if (error != 0) {
4144 zfs_znode_delete(zp, tx);
4145 remove_inode_hash(ZTOI(zp));
4146 } else {
4147 if (flags & FIGNORECASE)
4148 txtype |= TX_CI;
4149 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
4150
4151 zfs_inode_update(dzp);
4152 zfs_inode_update(zp);
4153 }
4154
4155 zfs_acl_ids_free(&acl_ids);
4156
4157 dmu_tx_commit(tx);
4158
4159 zfs_dirent_unlock(dl);
4160
4161 if (error == 0) {
4162 *ipp = ZTOI(zp);
4163
4164 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4165 zil_commit(zilog, 0);
4166 } else {
4167 iput(ZTOI(zp));
4168 }
4169
4170 ZFS_EXIT(zfsvfs);
4171 return (error);
4172 }
4173
4174 /*
4175 * Return, in the buffer contained in the provided uio structure,
4176 * the symbolic path referred to by ip.
4177 *
4178 * IN: ip - inode of symbolic link
4179 * uio - structure to contain the link path.
4180 * cr - credentials of caller.
4181 *
4182 * RETURN: 0 if success
4183 * error code if failure
4184 *
4185 * Timestamps:
4186 * ip - atime updated
4187 */
4188 /* ARGSUSED */
4189 int
4190 zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr)
4191 {
4192 znode_t *zp = ITOZ(ip);
4193 zfsvfs_t *zfsvfs = ITOZSB(ip);
4194 int error;
4195
4196 ZFS_ENTER(zfsvfs);
4197 ZFS_VERIFY_ZP(zp);
4198
4199 mutex_enter(&zp->z_lock);
4200 if (zp->z_is_sa)
4201 error = sa_lookup_uio(zp->z_sa_hdl,
4202 SA_ZPL_SYMLINK(zfsvfs), uio);
4203 else
4204 error = zfs_sa_readlink(zp, uio);
4205 mutex_exit(&zp->z_lock);
4206
4207 ZFS_EXIT(zfsvfs);
4208 return (error);
4209 }
4210
4211 /*
4212 * Insert a new entry into directory tdip referencing sip.
4213 *
4214 * IN: tdip - Directory to contain new entry.
4215 * sip - inode of new entry.
4216 * name - name of new entry.
4217 * cr - credentials of caller.
4218 *
4219 * RETURN: 0 if success
4220 * error code if failure
4221 *
4222 * Timestamps:
4223 * tdip - ctime|mtime updated
4224 * sip - ctime updated
4225 */
4226 /* ARGSUSED */
4227 int
4228 zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr,
4229 int flags)
4230 {
4231 znode_t *dzp = ITOZ(tdip);
4232 znode_t *tzp, *szp;
4233 zfsvfs_t *zfsvfs = ITOZSB(tdip);
4234 zilog_t *zilog;
4235 zfs_dirlock_t *dl;
4236 dmu_tx_t *tx;
4237 int error;
4238 int zf = ZNEW;
4239 uint64_t parent;
4240 uid_t owner;
4241 boolean_t waited = B_FALSE;
4242 boolean_t is_tmpfile = 0;
4243 uint64_t txg;
4244 #ifdef HAVE_TMPFILE
4245 is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE));
4246 #endif
4247 ASSERT(S_ISDIR(tdip->i_mode));
4248
4249 if (name == NULL)
4250 return (SET_ERROR(EINVAL));
4251
4252 ZFS_ENTER(zfsvfs);
4253 ZFS_VERIFY_ZP(dzp);
4254 zilog = zfsvfs->z_log;
4255
4256 /*
4257 * POSIX dictates that we return EPERM here.
4258 * Better choices include ENOTSUP or EISDIR.
4259 */
4260 if (S_ISDIR(sip->i_mode)) {
4261 ZFS_EXIT(zfsvfs);
4262 return (SET_ERROR(EPERM));
4263 }
4264
4265 szp = ITOZ(sip);
4266 ZFS_VERIFY_ZP(szp);
4267
4268 /*
4269 * If we are using project inheritance, means if the directory has
4270 * ZFS_PROJINHERIT set, then its descendant directories will inherit
4271 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
4272 * such case, we only allow hard link creation in our tree when the
4273 * project IDs are the same.
4274 */
4275 if (dzp->z_pflags & ZFS_PROJINHERIT && dzp->z_projid != szp->z_projid) {
4276 ZFS_EXIT(zfsvfs);
4277 return (SET_ERROR(EXDEV));
4278 }
4279
4280 /*
4281 * We check i_sb because snapshots and the ctldir must have different
4282 * super blocks.
4283 */
4284 if (sip->i_sb != tdip->i_sb || zfsctl_is_node(sip)) {
4285 ZFS_EXIT(zfsvfs);
4286 return (SET_ERROR(EXDEV));
4287 }
4288
4289 /* Prevent links to .zfs/shares files */
4290
4291 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
4292 &parent, sizeof (uint64_t))) != 0) {
4293 ZFS_EXIT(zfsvfs);
4294 return (error);
4295 }
4296 if (parent == zfsvfs->z_shares_dir) {
4297 ZFS_EXIT(zfsvfs);
4298 return (SET_ERROR(EPERM));
4299 }
4300
4301 if (zfsvfs->z_utf8 && u8_validate(name,
4302 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4303 ZFS_EXIT(zfsvfs);
4304 return (SET_ERROR(EILSEQ));
4305 }
4306 if (flags & FIGNORECASE)
4307 zf |= ZCILOOK;
4308
4309 /*
4310 * We do not support links between attributes and non-attributes
4311 * because of the potential security risk of creating links
4312 * into "normal" file space in order to circumvent restrictions
4313 * imposed in attribute space.
4314 */
4315 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
4316 ZFS_EXIT(zfsvfs);
4317 return (SET_ERROR(EINVAL));
4318 }
4319
4320 owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid),
4321 cr, ZFS_OWNER);
4322 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
4323 ZFS_EXIT(zfsvfs);
4324 return (SET_ERROR(EPERM));
4325 }
4326
4327 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
4328 ZFS_EXIT(zfsvfs);
4329 return (error);
4330 }
4331
4332 top:
4333 /*
4334 * Attempt to lock directory; fail if entry already exists.
4335 */
4336 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
4337 if (error) {
4338 ZFS_EXIT(zfsvfs);
4339 return (error);
4340 }
4341
4342 tx = dmu_tx_create(zfsvfs->z_os);
4343 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
4344 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4345 if (is_tmpfile)
4346 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
4347
4348 zfs_sa_upgrade_txholds(tx, szp);
4349 zfs_sa_upgrade_txholds(tx, dzp);
4350 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
4351 if (error) {
4352 zfs_dirent_unlock(dl);
4353 if (error == ERESTART) {
4354 waited = B_TRUE;
4355 dmu_tx_wait(tx);
4356 dmu_tx_abort(tx);
4357 goto top;
4358 }
4359 dmu_tx_abort(tx);
4360 ZFS_EXIT(zfsvfs);
4361 return (error);
4362 }
4363 /* unmark z_unlinked so zfs_link_create will not reject */
4364 if (is_tmpfile)
4365 szp->z_unlinked = 0;
4366 error = zfs_link_create(dl, szp, tx, 0);
4367
4368 if (error == 0) {
4369 uint64_t txtype = TX_LINK;
4370 /*
4371 * tmpfile is created to be in z_unlinkedobj, so remove it.
4372 * Also, we don't log in ZIL, be cause all previous file
4373 * operation on the tmpfile are ignored by ZIL. Instead we
4374 * always wait for txg to sync to make sure all previous
4375 * operation are sync safe.
4376 */
4377 if (is_tmpfile) {
4378 VERIFY(zap_remove_int(zfsvfs->z_os,
4379 zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
4380 } else {
4381 if (flags & FIGNORECASE)
4382 txtype |= TX_CI;
4383 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4384 }
4385 } else if (is_tmpfile) {
4386 /* restore z_unlinked since when linking failed */
4387 szp->z_unlinked = 1;
4388 }
4389 txg = dmu_tx_get_txg(tx);
4390 dmu_tx_commit(tx);
4391
4392 zfs_dirent_unlock(dl);
4393
4394 if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4395 zil_commit(zilog, 0);
4396
4397 if (is_tmpfile)
4398 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg);
4399
4400 zfs_inode_update(dzp);
4401 zfs_inode_update(szp);
4402 ZFS_EXIT(zfsvfs);
4403 return (error);
4404 }
4405
4406 static void
4407 zfs_putpage_commit_cb(void *arg)
4408 {
4409 struct page *pp = arg;
4410
4411 ClearPageError(pp);
4412 end_page_writeback(pp);
4413 }
4414
4415 /*
4416 * Push a page out to disk, once the page is on stable storage the
4417 * registered commit callback will be run as notification of completion.
4418 *
4419 * IN: ip - page mapped for inode.
4420 * pp - page to push (page is locked)
4421 * wbc - writeback control data
4422 *
4423 * RETURN: 0 if success
4424 * error code if failure
4425 *
4426 * Timestamps:
4427 * ip - ctime|mtime updated
4428 */
4429 /* ARGSUSED */
4430 int
4431 zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
4432 {
4433 znode_t *zp = ITOZ(ip);
4434 zfsvfs_t *zfsvfs = ITOZSB(ip);
4435 loff_t offset;
4436 loff_t pgoff;
4437 unsigned int pglen;
4438 rl_t *rl;
4439 dmu_tx_t *tx;
4440 caddr_t va;
4441 int err = 0;
4442 uint64_t mtime[2], ctime[2];
4443 sa_bulk_attr_t bulk[3];
4444 int cnt = 0;
4445 struct address_space *mapping;
4446
4447 ZFS_ENTER(zfsvfs);
4448 ZFS_VERIFY_ZP(zp);
4449
4450 ASSERT(PageLocked(pp));
4451
4452 pgoff = page_offset(pp); /* Page byte-offset in file */
4453 offset = i_size_read(ip); /* File length in bytes */
4454 pglen = MIN(PAGE_SIZE, /* Page length in bytes */
4455 P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
4456
4457 /* Page is beyond end of file */
4458 if (pgoff >= offset) {
4459 unlock_page(pp);
4460 ZFS_EXIT(zfsvfs);
4461 return (0);
4462 }
4463
4464 /* Truncate page length to end of file */
4465 if (pgoff + pglen > offset)
4466 pglen = offset - pgoff;
4467
4468 #if 0
4469 /*
4470 * FIXME: Allow mmap writes past its quota. The correct fix
4471 * is to register a page_mkwrite() handler to count the page
4472 * against its quota when it is about to be dirtied.
4473 */
4474 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
4475 KUID_TO_SUID(ip->i_uid)) ||
4476 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
4477 KGID_TO_SGID(ip->i_gid)) ||
4478 (zp->z_projid != ZFS_DEFAULT_PROJID &&
4479 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
4480 zp->z_projid))) {
4481 err = EDQUOT;
4482 }
4483 #endif
4484
4485 /*
4486 * The ordering here is critical and must adhere to the following
4487 * rules in order to avoid deadlocking in either zfs_read() or
4488 * zfs_free_range() due to a lock inversion.
4489 *
4490 * 1) The page must be unlocked prior to acquiring the range lock.
4491 * This is critical because zfs_read() calls find_lock_page()
4492 * which may block on the page lock while holding the range lock.
4493 *
4494 * 2) Before setting or clearing write back on a page the range lock
4495 * must be held in order to prevent a lock inversion with the
4496 * zfs_free_range() function.
4497 *
4498 * This presents a problem because upon entering this function the
4499 * page lock is already held. To safely acquire the range lock the
4500 * page lock must be dropped. This creates a window where another
4501 * process could truncate, invalidate, dirty, or write out the page.
4502 *
4503 * Therefore, after successfully reacquiring the range and page locks
4504 * the current page state is checked. In the common case everything
4505 * will be as is expected and it can be written out. However, if
4506 * the page state has changed it must be handled accordingly.
4507 */
4508 mapping = pp->mapping;
4509 redirty_page_for_writepage(wbc, pp);
4510 unlock_page(pp);
4511
4512 rl = zfs_range_lock(&zp->z_range_lock, pgoff, pglen, RL_WRITER);
4513 lock_page(pp);
4514
4515 /* Page mapping changed or it was no longer dirty, we're done */
4516 if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
4517 unlock_page(pp);
4518 zfs_range_unlock(rl);
4519 ZFS_EXIT(zfsvfs);
4520 return (0);
4521 }
4522
4523 /* Another process started write block if required */
4524 if (PageWriteback(pp)) {
4525 unlock_page(pp);
4526 zfs_range_unlock(rl);
4527
4528 if (wbc->sync_mode != WB_SYNC_NONE)
4529 wait_on_page_writeback(pp);
4530
4531 ZFS_EXIT(zfsvfs);
4532 return (0);
4533 }
4534
4535 /* Clear the dirty flag the required locks are held */
4536 if (!clear_page_dirty_for_io(pp)) {
4537 unlock_page(pp);
4538 zfs_range_unlock(rl);
4539 ZFS_EXIT(zfsvfs);
4540 return (0);
4541 }
4542
4543 /*
4544 * Counterpart for redirty_page_for_writepage() above. This page
4545 * was in fact not skipped and should not be counted as if it were.
4546 */
4547 wbc->pages_skipped--;
4548 set_page_writeback(pp);
4549 unlock_page(pp);
4550
4551 tx = dmu_tx_create(zfsvfs->z_os);
4552 dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
4553 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4554 zfs_sa_upgrade_txholds(tx, zp);
4555
4556 err = dmu_tx_assign(tx, TXG_NOWAIT);
4557 if (err != 0) {
4558 if (err == ERESTART)
4559 dmu_tx_wait(tx);
4560
4561 dmu_tx_abort(tx);
4562 __set_page_dirty_nobuffers(pp);
4563 ClearPageError(pp);
4564 end_page_writeback(pp);
4565 zfs_range_unlock(rl);
4566 ZFS_EXIT(zfsvfs);
4567 return (err);
4568 }
4569
4570 va = kmap(pp);
4571 ASSERT3U(pglen, <=, PAGE_SIZE);
4572 dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx);
4573 kunmap(pp);
4574
4575 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4576 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
4577 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL,
4578 &zp->z_pflags, 8);
4579
4580 /* Preserve the mtime and ctime provided by the inode */
4581 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4582 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
4583 zp->z_atime_dirty = 0;
4584 zp->z_seq++;
4585
4586 err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4587
4588 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
4589 zfs_putpage_commit_cb, pp);
4590 dmu_tx_commit(tx);
4591
4592 zfs_range_unlock(rl);
4593
4594 if (wbc->sync_mode != WB_SYNC_NONE) {
4595 /*
4596 * Note that this is rarely called under writepages(), because
4597 * writepages() normally handles the entire commit for
4598 * performance reasons.
4599 */
4600 zil_commit(zfsvfs->z_log, zp->z_id);
4601 }
4602
4603 ZFS_EXIT(zfsvfs);
4604 return (err);
4605 }
4606
4607 /*
4608 * Update the system attributes when the inode has been dirtied. For the
4609 * moment we only update the mode, atime, mtime, and ctime.
4610 */
4611 int
4612 zfs_dirty_inode(struct inode *ip, int flags)
4613 {
4614 znode_t *zp = ITOZ(ip);
4615 zfsvfs_t *zfsvfs = ITOZSB(ip);
4616 dmu_tx_t *tx;
4617 uint64_t mode, atime[2], mtime[2], ctime[2];
4618 sa_bulk_attr_t bulk[4];
4619 int error = 0;
4620 int cnt = 0;
4621
4622 if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
4623 return (0);
4624
4625 ZFS_ENTER(zfsvfs);
4626 ZFS_VERIFY_ZP(zp);
4627
4628 #ifdef I_DIRTY_TIME
4629 /*
4630 * This is the lazytime semantic indroduced in Linux 4.0
4631 * This flag will only be called from update_time when lazytime is set.
4632 * (Note, I_DIRTY_SYNC will also set if not lazytime)
4633 * Fortunately mtime and ctime are managed within ZFS itself, so we
4634 * only need to dirty atime.
4635 */
4636 if (flags == I_DIRTY_TIME) {
4637 zp->z_atime_dirty = 1;
4638 goto out;
4639 }
4640 #endif
4641
4642 tx = dmu_tx_create(zfsvfs->z_os);
4643
4644 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4645 zfs_sa_upgrade_txholds(tx, zp);
4646
4647 error = dmu_tx_assign(tx, TXG_WAIT);
4648 if (error) {
4649 dmu_tx_abort(tx);
4650 goto out;
4651 }
4652
4653 mutex_enter(&zp->z_lock);
4654 zp->z_atime_dirty = 0;
4655
4656 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
4657 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
4658 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4659 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
4660
4661 /* Preserve the mode, mtime and ctime provided by the inode */
4662 ZFS_TIME_ENCODE(&ip->i_atime, atime);
4663 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4664 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
4665 mode = ip->i_mode;
4666
4667 zp->z_mode = mode;
4668
4669 error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4670 mutex_exit(&zp->z_lock);
4671
4672 dmu_tx_commit(tx);
4673 out:
4674 ZFS_EXIT(zfsvfs);
4675 return (error);
4676 }
4677
4678 /*ARGSUSED*/
4679 void
4680 zfs_inactive(struct inode *ip)
4681 {
4682 znode_t *zp = ITOZ(ip);
4683 zfsvfs_t *zfsvfs = ITOZSB(ip);
4684 uint64_t atime[2];
4685 int error;
4686 int need_unlock = 0;
4687
4688 /* Only read lock if we haven't already write locked, e.g. rollback */
4689 if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) {
4690 need_unlock = 1;
4691 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4692 }
4693 if (zp->z_sa_hdl == NULL) {
4694 if (need_unlock)
4695 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4696 return;
4697 }
4698
4699 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4700 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4701
4702 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4703 zfs_sa_upgrade_txholds(tx, zp);
4704 error = dmu_tx_assign(tx, TXG_WAIT);
4705 if (error) {
4706 dmu_tx_abort(tx);
4707 } else {
4708 ZFS_TIME_ENCODE(&ip->i_atime, atime);
4709 mutex_enter(&zp->z_lock);
4710 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4711 (void *)&atime, sizeof (atime), tx);
4712 zp->z_atime_dirty = 0;
4713 mutex_exit(&zp->z_lock);
4714 dmu_tx_commit(tx);
4715 }
4716 }
4717
4718 zfs_zinactive(zp);
4719 if (need_unlock)
4720 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4721 }
4722
4723 /*
4724 * Bounds-check the seek operation.
4725 *
4726 * IN: ip - inode seeking within
4727 * ooff - old file offset
4728 * noffp - pointer to new file offset
4729 * ct - caller context
4730 *
4731 * RETURN: 0 if success
4732 * EINVAL if new offset invalid
4733 */
4734 /* ARGSUSED */
4735 int
4736 zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp)
4737 {
4738 if (S_ISDIR(ip->i_mode))
4739 return (0);
4740 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4741 }
4742
4743 /*
4744 * Fill pages with data from the disk.
4745 */
4746 static int
4747 zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
4748 {
4749 znode_t *zp = ITOZ(ip);
4750 zfsvfs_t *zfsvfs = ITOZSB(ip);
4751 objset_t *os;
4752 struct page *cur_pp;
4753 u_offset_t io_off, total;
4754 size_t io_len;
4755 loff_t i_size;
4756 unsigned page_idx;
4757 int err;
4758
4759 os = zfsvfs->z_os;
4760 io_len = nr_pages << PAGE_SHIFT;
4761 i_size = i_size_read(ip);
4762 io_off = page_offset(pl[0]);
4763
4764 if (io_off + io_len > i_size)
4765 io_len = i_size - io_off;
4766
4767 /*
4768 * Iterate over list of pages and read each page individually.
4769 */
4770 page_idx = 0;
4771 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4772 caddr_t va;
4773
4774 cur_pp = pl[page_idx++];
4775 va = kmap(cur_pp);
4776 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4777 DMU_READ_PREFETCH);
4778 kunmap(cur_pp);
4779 if (err) {
4780 /* convert checksum errors into IO errors */
4781 if (err == ECKSUM)
4782 err = SET_ERROR(EIO);
4783 return (err);
4784 }
4785 }
4786
4787 return (0);
4788 }
4789
4790 /*
4791 * Uses zfs_fillpage to read data from the file and fill the pages.
4792 *
4793 * IN: ip - inode of file to get data from.
4794 * pl - list of pages to read
4795 * nr_pages - number of pages to read
4796 *
4797 * RETURN: 0 on success, error code on failure.
4798 *
4799 * Timestamps:
4800 * vp - atime updated
4801 */
4802 /* ARGSUSED */
4803 int
4804 zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages)
4805 {
4806 znode_t *zp = ITOZ(ip);
4807 zfsvfs_t *zfsvfs = ITOZSB(ip);
4808 int err;
4809
4810 if (pl == NULL)
4811 return (0);
4812
4813 ZFS_ENTER(zfsvfs);
4814 ZFS_VERIFY_ZP(zp);
4815
4816 err = zfs_fillpage(ip, pl, nr_pages);
4817
4818 ZFS_EXIT(zfsvfs);
4819 return (err);
4820 }
4821
4822 /*
4823 * Check ZFS specific permissions to memory map a section of a file.
4824 *
4825 * IN: ip - inode of the file to mmap
4826 * off - file offset
4827 * addrp - start address in memory region
4828 * len - length of memory region
4829 * vm_flags- address flags
4830 *
4831 * RETURN: 0 if success
4832 * error code if failure
4833 */
4834 /*ARGSUSED*/
4835 int
4836 zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
4837 unsigned long vm_flags)
4838 {
4839 znode_t *zp = ITOZ(ip);
4840 zfsvfs_t *zfsvfs = ITOZSB(ip);
4841
4842 ZFS_ENTER(zfsvfs);
4843 ZFS_VERIFY_ZP(zp);
4844
4845 if ((vm_flags & VM_WRITE) && (zp->z_pflags &
4846 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4847 ZFS_EXIT(zfsvfs);
4848 return (SET_ERROR(EPERM));
4849 }
4850
4851 if ((vm_flags & (VM_READ | VM_EXEC)) &&
4852 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4853 ZFS_EXIT(zfsvfs);
4854 return (SET_ERROR(EACCES));
4855 }
4856
4857 if (off < 0 || len > MAXOFFSET_T - off) {
4858 ZFS_EXIT(zfsvfs);
4859 return (SET_ERROR(ENXIO));
4860 }
4861
4862 ZFS_EXIT(zfsvfs);
4863 return (0);
4864 }
4865
4866 /*
4867 * convoff - converts the given data (start, whence) to the
4868 * given whence.
4869 */
4870 int
4871 convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
4872 {
4873 vattr_t vap;
4874 int error;
4875
4876 if ((lckdat->l_whence == 2) || (whence == 2)) {
4877 if ((error = zfs_getattr(ip, &vap, 0, CRED())))
4878 return (error);
4879 }
4880
4881 switch (lckdat->l_whence) {
4882 case 1:
4883 lckdat->l_start += offset;
4884 break;
4885 case 2:
4886 lckdat->l_start += vap.va_size;
4887 /* FALLTHRU */
4888 case 0:
4889 break;
4890 default:
4891 return (SET_ERROR(EINVAL));
4892 }
4893
4894 if (lckdat->l_start < 0)
4895 return (SET_ERROR(EINVAL));
4896
4897 switch (whence) {
4898 case 1:
4899 lckdat->l_start -= offset;
4900 break;
4901 case 2:
4902 lckdat->l_start -= vap.va_size;
4903 /* FALLTHRU */
4904 case 0:
4905 break;
4906 default:
4907 return (SET_ERROR(EINVAL));
4908 }
4909
4910 lckdat->l_whence = (short)whence;
4911 return (0);
4912 }
4913
4914 /*
4915 * Free or allocate space in a file. Currently, this function only
4916 * supports the `F_FREESP' command. However, this command is somewhat
4917 * misnamed, as its functionality includes the ability to allocate as
4918 * well as free space.
4919 *
4920 * IN: ip - inode of file to free data in.
4921 * cmd - action to take (only F_FREESP supported).
4922 * bfp - section of file to free/alloc.
4923 * flag - current file open mode flags.
4924 * offset - current file offset.
4925 * cr - credentials of caller [UNUSED].
4926 *
4927 * RETURN: 0 on success, error code on failure.
4928 *
4929 * Timestamps:
4930 * ip - ctime|mtime updated
4931 */
4932 /* ARGSUSED */
4933 int
4934 zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
4935 offset_t offset, cred_t *cr)
4936 {
4937 znode_t *zp = ITOZ(ip);
4938 zfsvfs_t *zfsvfs = ITOZSB(ip);
4939 uint64_t off, len;
4940 int error;
4941
4942 ZFS_ENTER(zfsvfs);
4943 ZFS_VERIFY_ZP(zp);
4944
4945 if (cmd != F_FREESP) {
4946 ZFS_EXIT(zfsvfs);
4947 return (SET_ERROR(EINVAL));
4948 }
4949
4950 /*
4951 * Callers might not be able to detect properly that we are read-only,
4952 * so check it explicitly here.
4953 */
4954 if (zfs_is_readonly(zfsvfs)) {
4955 ZFS_EXIT(zfsvfs);
4956 return (SET_ERROR(EROFS));
4957 }
4958
4959 if ((error = convoff(ip, bfp, 0, offset))) {
4960 ZFS_EXIT(zfsvfs);
4961 return (error);
4962 }
4963
4964 if (bfp->l_len < 0) {
4965 ZFS_EXIT(zfsvfs);
4966 return (SET_ERROR(EINVAL));
4967 }
4968
4969 /*
4970 * Permissions aren't checked on Solaris because on this OS
4971 * zfs_space() can only be called with an opened file handle.
4972 * On Linux we can get here through truncate_range() which
4973 * operates directly on inodes, so we need to check access rights.
4974 */
4975 if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
4976 ZFS_EXIT(zfsvfs);
4977 return (error);
4978 }
4979
4980 off = bfp->l_start;
4981 len = bfp->l_len; /* 0 means from off to end of file */
4982
4983 error = zfs_freesp(zp, off, len, flag, TRUE);
4984
4985 ZFS_EXIT(zfsvfs);
4986 return (error);
4987 }
4988
4989 /*ARGSUSED*/
4990 int
4991 zfs_fid(struct inode *ip, fid_t *fidp)
4992 {
4993 znode_t *zp = ITOZ(ip);
4994 zfsvfs_t *zfsvfs = ITOZSB(ip);
4995 uint32_t gen;
4996 uint64_t gen64;
4997 uint64_t object = zp->z_id;
4998 zfid_short_t *zfid;
4999 int size, i, error;
5000
5001 ZFS_ENTER(zfsvfs);
5002 ZFS_VERIFY_ZP(zp);
5003
5004 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
5005 &gen64, sizeof (uint64_t))) != 0) {
5006 ZFS_EXIT(zfsvfs);
5007 return (error);
5008 }
5009
5010 gen = (uint32_t)gen64;
5011
5012 size = SHORT_FID_LEN;
5013
5014 zfid = (zfid_short_t *)fidp;
5015
5016 zfid->zf_len = size;
5017
5018 for (i = 0; i < sizeof (zfid->zf_object); i++)
5019 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
5020
5021 /* Must have a non-zero generation number to distinguish from .zfs */
5022 if (gen == 0)
5023 gen = 1;
5024 for (i = 0; i < sizeof (zfid->zf_gen); i++)
5025 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
5026
5027 ZFS_EXIT(zfsvfs);
5028 return (0);
5029 }
5030
5031 /*ARGSUSED*/
5032 int
5033 zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
5034 {
5035 znode_t *zp = ITOZ(ip);
5036 zfsvfs_t *zfsvfs = ITOZSB(ip);
5037 int error;
5038 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5039
5040 ZFS_ENTER(zfsvfs);
5041 ZFS_VERIFY_ZP(zp);
5042 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
5043 ZFS_EXIT(zfsvfs);
5044
5045 return (error);
5046 }
5047
5048 /*ARGSUSED*/
5049 int
5050 zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
5051 {
5052 znode_t *zp = ITOZ(ip);
5053 zfsvfs_t *zfsvfs = ITOZSB(ip);
5054 int error;
5055 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5056 zilog_t *zilog = zfsvfs->z_log;
5057
5058 ZFS_ENTER(zfsvfs);
5059 ZFS_VERIFY_ZP(zp);
5060
5061 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
5062
5063 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
5064 zil_commit(zilog, 0);
5065
5066 ZFS_EXIT(zfsvfs);
5067 return (error);
5068 }
5069
5070 #ifdef HAVE_UIO_ZEROCOPY
5071 /*
5072 * Tunable, both must be a power of 2.
5073 *
5074 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
5075 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
5076 * an arcbuf for a partial block read
5077 */
5078 int zcr_blksz_min = (1 << 10); /* 1K */
5079 int zcr_blksz_max = (1 << 17); /* 128K */
5080
5081 /*ARGSUSED*/
5082 static int
5083 zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
5084 {
5085 znode_t *zp = ITOZ(ip);
5086 zfsvfs_t *zfsvfs = ITOZSB(ip);
5087 int max_blksz = zfsvfs->z_max_blksz;
5088 uio_t *uio = &xuio->xu_uio;
5089 ssize_t size = uio->uio_resid;
5090 offset_t offset = uio->uio_loffset;
5091 int blksz;
5092 int fullblk, i;
5093 arc_buf_t *abuf;
5094 ssize_t maxsize;
5095 int preamble, postamble;
5096
5097 if (xuio->xu_type != UIOTYPE_ZEROCOPY)
5098 return (SET_ERROR(EINVAL));
5099
5100 ZFS_ENTER(zfsvfs);
5101 ZFS_VERIFY_ZP(zp);
5102 switch (ioflag) {
5103 case UIO_WRITE:
5104 /*
5105 * Loan out an arc_buf for write if write size is bigger than
5106 * max_blksz, and the file's block size is also max_blksz.
5107 */
5108 blksz = max_blksz;
5109 if (size < blksz || zp->z_blksz != blksz) {
5110 ZFS_EXIT(zfsvfs);
5111 return (SET_ERROR(EINVAL));
5112 }
5113 /*
5114 * Caller requests buffers for write before knowing where the
5115 * write offset might be (e.g. NFS TCP write).
5116 */
5117 if (offset == -1) {
5118 preamble = 0;
5119 } else {
5120 preamble = P2PHASE(offset, blksz);
5121 if (preamble) {
5122 preamble = blksz - preamble;
5123 size -= preamble;
5124 }
5125 }
5126
5127 postamble = P2PHASE(size, blksz);
5128 size -= postamble;
5129
5130 fullblk = size / blksz;
5131 (void) dmu_xuio_init(xuio,
5132 (preamble != 0) + fullblk + (postamble != 0));
5133
5134 /*
5135 * Have to fix iov base/len for partial buffers. They
5136 * currently represent full arc_buf's.
5137 */
5138 if (preamble) {
5139 /* data begins in the middle of the arc_buf */
5140 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5141 blksz);
5142 ASSERT(abuf);
5143 (void) dmu_xuio_add(xuio, abuf,
5144 blksz - preamble, preamble);
5145 }
5146
5147 for (i = 0; i < fullblk; i++) {
5148 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5149 blksz);
5150 ASSERT(abuf);
5151 (void) dmu_xuio_add(xuio, abuf, 0, blksz);
5152 }
5153
5154 if (postamble) {
5155 /* data ends in the middle of the arc_buf */
5156 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5157 blksz);
5158 ASSERT(abuf);
5159 (void) dmu_xuio_add(xuio, abuf, 0, postamble);
5160 }
5161 break;
5162 case UIO_READ:
5163 /*
5164 * Loan out an arc_buf for read if the read size is larger than
5165 * the current file block size. Block alignment is not
5166 * considered. Partial arc_buf will be loaned out for read.
5167 */
5168 blksz = zp->z_blksz;
5169 if (blksz < zcr_blksz_min)
5170 blksz = zcr_blksz_min;
5171 if (blksz > zcr_blksz_max)
5172 blksz = zcr_blksz_max;
5173 /* avoid potential complexity of dealing with it */
5174 if (blksz > max_blksz) {
5175 ZFS_EXIT(zfsvfs);
5176 return (SET_ERROR(EINVAL));
5177 }
5178
5179 maxsize = zp->z_size - uio->uio_loffset;
5180 if (size > maxsize)
5181 size = maxsize;
5182
5183 if (size < blksz) {
5184 ZFS_EXIT(zfsvfs);
5185 return (SET_ERROR(EINVAL));
5186 }
5187 break;
5188 default:
5189 ZFS_EXIT(zfsvfs);
5190 return (SET_ERROR(EINVAL));
5191 }
5192
5193 uio->uio_extflg = UIO_XUIO;
5194 XUIO_XUZC_RW(xuio) = ioflag;
5195 ZFS_EXIT(zfsvfs);
5196 return (0);
5197 }
5198
5199 /*ARGSUSED*/
5200 static int
5201 zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
5202 {
5203 int i;
5204 arc_buf_t *abuf;
5205 int ioflag = XUIO_XUZC_RW(xuio);
5206
5207 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
5208
5209 i = dmu_xuio_cnt(xuio);
5210 while (i-- > 0) {
5211 abuf = dmu_xuio_arcbuf(xuio, i);
5212 /*
5213 * if abuf == NULL, it must be a write buffer
5214 * that has been returned in zfs_write().
5215 */
5216 if (abuf)
5217 dmu_return_arcbuf(abuf);
5218 ASSERT(abuf || ioflag == UIO_WRITE);
5219 }
5220
5221 dmu_xuio_fini(xuio);
5222 return (0);
5223 }
5224 #endif /* HAVE_UIO_ZEROCOPY */
5225
5226 #if defined(_KERNEL)
5227 EXPORT_SYMBOL(zfs_open);
5228 EXPORT_SYMBOL(zfs_close);
5229 EXPORT_SYMBOL(zfs_read);
5230 EXPORT_SYMBOL(zfs_write);
5231 EXPORT_SYMBOL(zfs_access);
5232 EXPORT_SYMBOL(zfs_lookup);
5233 EXPORT_SYMBOL(zfs_create);
5234 EXPORT_SYMBOL(zfs_tmpfile);
5235 EXPORT_SYMBOL(zfs_remove);
5236 EXPORT_SYMBOL(zfs_mkdir);
5237 EXPORT_SYMBOL(zfs_rmdir);
5238 EXPORT_SYMBOL(zfs_readdir);
5239 EXPORT_SYMBOL(zfs_fsync);
5240 EXPORT_SYMBOL(zfs_getattr);
5241 EXPORT_SYMBOL(zfs_getattr_fast);
5242 EXPORT_SYMBOL(zfs_setattr);
5243 EXPORT_SYMBOL(zfs_rename);
5244 EXPORT_SYMBOL(zfs_symlink);
5245 EXPORT_SYMBOL(zfs_readlink);
5246 EXPORT_SYMBOL(zfs_link);
5247 EXPORT_SYMBOL(zfs_inactive);
5248 EXPORT_SYMBOL(zfs_space);
5249 EXPORT_SYMBOL(zfs_fid);
5250 EXPORT_SYMBOL(zfs_getsecattr);
5251 EXPORT_SYMBOL(zfs_setsecattr);
5252 EXPORT_SYMBOL(zfs_getpage);
5253 EXPORT_SYMBOL(zfs_putpage);
5254 EXPORT_SYMBOL(zfs_dirty_inode);
5255 EXPORT_SYMBOL(zfs_map);
5256
5257 /* CSTYLED */
5258 module_param(zfs_delete_blocks, ulong, 0644);
5259 MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
5260 module_param(zfs_read_chunk_size, long, 0644);
5261 MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");
5262 #endif