]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zfs_vnops.c
Use SEEK_{SET,CUR,END} for file seek "whence"
[mirror_zfs.git] / module / zfs / zfs_vnops.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc.
27 */
28
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
31
32
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/time.h>
36 #include <sys/sysmacros.h>
37 #include <sys/vfs.h>
38 #include <sys/file.h>
39 #include <sys/stat.h>
40 #include <sys/kmem.h>
41 #include <sys/taskq.h>
42 #include <sys/uio.h>
43 #include <sys/vmsystm.h>
44 #include <sys/atomic.h>
45 #include <sys/pathname.h>
46 #include <sys/cmn_err.h>
47 #include <sys/errno.h>
48 #include <sys/zfs_dir.h>
49 #include <sys/zfs_acl.h>
50 #include <sys/zfs_ioctl.h>
51 #include <sys/fs/zfs.h>
52 #include <sys/dmu.h>
53 #include <sys/dmu_objset.h>
54 #include <sys/spa.h>
55 #include <sys/txg.h>
56 #include <sys/dbuf.h>
57 #include <sys/zap.h>
58 #include <sys/sa.h>
59 #include <sys/policy.h>
60 #include <sys/sunddi.h>
61 #include <sys/sid.h>
62 #include <sys/mode.h>
63 #include <sys/zfs_ctldir.h>
64 #include <sys/zfs_fuid.h>
65 #include <sys/zfs_sa.h>
66 #include <sys/zfs_vnops.h>
67 #include <sys/zfs_rlock.h>
68 #include <sys/cred.h>
69 #include <sys/zpl.h>
70 #include <sys/zil.h>
71 #include <sys/sa_impl.h>
72
73 /*
74 * Programming rules.
75 *
76 * Each vnode op performs some logical unit of work. To do this, the ZPL must
77 * properly lock its in-core state, create a DMU transaction, do the work,
78 * record this work in the intent log (ZIL), commit the DMU transaction,
79 * and wait for the intent log to commit if it is a synchronous operation.
80 * Moreover, the vnode ops must work in both normal and log replay context.
81 * The ordering of events is important to avoid deadlocks and references
82 * to freed memory. The example below illustrates the following Big Rules:
83 *
84 * (1) A check must be made in each zfs thread for a mounted file system.
85 * This is done avoiding races using ZFS_ENTER(zfsvfs).
86 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
87 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
88 * can return EIO from the calling function.
89 *
90 * (2) iput() should always be the last thing except for zil_commit()
91 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
92 * First, if it's the last reference, the vnode/znode
93 * can be freed, so the zp may point to freed memory. Second, the last
94 * reference will call zfs_zinactive(), which may induce a lot of work --
95 * pushing cached pages (which acquires range locks) and syncing out
96 * cached atime changes. Third, zfs_zinactive() may require a new tx,
97 * which could deadlock the system if you were already holding one.
98 * If you must call iput() within a tx then use zfs_iput_async().
99 *
100 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
101 * as they can span dmu_tx_assign() calls.
102 *
103 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
104 * dmu_tx_assign(). This is critical because we don't want to block
105 * while holding locks.
106 *
107 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
108 * reduces lock contention and CPU usage when we must wait (note that if
109 * throughput is constrained by the storage, nearly every transaction
110 * must wait).
111 *
112 * Note, in particular, that if a lock is sometimes acquired before
113 * the tx assigns, and sometimes after (e.g. z_lock), then failing
114 * to use a non-blocking assign can deadlock the system. The scenario:
115 *
116 * Thread A has grabbed a lock before calling dmu_tx_assign().
117 * Thread B is in an already-assigned tx, and blocks for this lock.
118 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
119 * forever, because the previous txg can't quiesce until B's tx commits.
120 *
121 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
122 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
123 * calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
124 * to indicate that this operation has already called dmu_tx_wait().
125 * This will ensure that we don't retry forever, waiting a short bit
126 * each time.
127 *
128 * (5) If the operation succeeded, generate the intent log entry for it
129 * before dropping locks. This ensures that the ordering of events
130 * in the intent log matches the order in which they actually occurred.
131 * During ZIL replay the zfs_log_* functions will update the sequence
132 * number to indicate the zil transaction has replayed.
133 *
134 * (6) At the end of each vnode op, the DMU tx must always commit,
135 * regardless of whether there were any errors.
136 *
137 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
138 * to ensure that synchronous semantics are provided when necessary.
139 *
140 * In general, this is how things should be ordered in each vnode op:
141 *
142 * ZFS_ENTER(zfsvfs); // exit if unmounted
143 * top:
144 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
145 * rw_enter(...); // grab any other locks you need
146 * tx = dmu_tx_create(...); // get DMU tx
147 * dmu_tx_hold_*(); // hold each object you might modify
148 * error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
149 * if (error) {
150 * rw_exit(...); // drop locks
151 * zfs_dirent_unlock(dl); // unlock directory entry
152 * iput(...); // release held vnodes
153 * if (error == ERESTART) {
154 * waited = B_TRUE;
155 * dmu_tx_wait(tx);
156 * dmu_tx_abort(tx);
157 * goto top;
158 * }
159 * dmu_tx_abort(tx); // abort DMU tx
160 * ZFS_EXIT(zfsvfs); // finished in zfs
161 * return (error); // really out of space
162 * }
163 * error = do_real_work(); // do whatever this VOP does
164 * if (error == 0)
165 * zfs_log_*(...); // on success, make ZIL entry
166 * dmu_tx_commit(tx); // commit DMU tx -- error or not
167 * rw_exit(...); // drop locks
168 * zfs_dirent_unlock(dl); // unlock directory entry
169 * iput(...); // release held vnodes
170 * zil_commit(zilog, foid); // synchronous when necessary
171 * ZFS_EXIT(zfsvfs); // finished in zfs
172 * return (error); // done, report error
173 */
174
175 /*
176 * Virus scanning is unsupported. It would be possible to add a hook
177 * here to performance the required virus scan. This could be done
178 * entirely in the kernel or potentially as an update to invoke a
179 * scanning utility.
180 */
181 static int
182 zfs_vscan(struct inode *ip, cred_t *cr, int async)
183 {
184 return (0);
185 }
186
187 /* ARGSUSED */
188 int
189 zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
190 {
191 znode_t *zp = ITOZ(ip);
192 zfsvfs_t *zfsvfs = ITOZSB(ip);
193
194 ZFS_ENTER(zfsvfs);
195 ZFS_VERIFY_ZP(zp);
196
197 /* Honor ZFS_APPENDONLY file attribute */
198 if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
199 ((flag & O_APPEND) == 0)) {
200 ZFS_EXIT(zfsvfs);
201 return (SET_ERROR(EPERM));
202 }
203
204 /* Virus scan eligible files on open */
205 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
206 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
207 if (zfs_vscan(ip, cr, 0) != 0) {
208 ZFS_EXIT(zfsvfs);
209 return (SET_ERROR(EACCES));
210 }
211 }
212
213 /* Keep a count of the synchronous opens in the znode */
214 if (flag & O_SYNC)
215 atomic_inc_32(&zp->z_sync_cnt);
216
217 ZFS_EXIT(zfsvfs);
218 return (0);
219 }
220
221 /* ARGSUSED */
222 int
223 zfs_close(struct inode *ip, int flag, cred_t *cr)
224 {
225 znode_t *zp = ITOZ(ip);
226 zfsvfs_t *zfsvfs = ITOZSB(ip);
227
228 ZFS_ENTER(zfsvfs);
229 ZFS_VERIFY_ZP(zp);
230
231 /* Decrement the synchronous opens in the znode */
232 if (flag & O_SYNC)
233 atomic_dec_32(&zp->z_sync_cnt);
234
235 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
236 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
237 VERIFY(zfs_vscan(ip, cr, 1) == 0);
238
239 ZFS_EXIT(zfsvfs);
240 return (0);
241 }
242
243 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
244 /*
245 * Lseek support for finding holes (cmd == SEEK_HOLE) and
246 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
247 */
248 static int
249 zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
250 {
251 znode_t *zp = ITOZ(ip);
252 uint64_t noff = (uint64_t)*off; /* new offset */
253 uint64_t file_sz;
254 int error;
255 boolean_t hole;
256
257 file_sz = zp->z_size;
258 if (noff >= file_sz) {
259 return (SET_ERROR(ENXIO));
260 }
261
262 if (cmd == SEEK_HOLE)
263 hole = B_TRUE;
264 else
265 hole = B_FALSE;
266
267 error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
268
269 if (error == ESRCH)
270 return (SET_ERROR(ENXIO));
271
272 /* file was dirty, so fall back to using generic logic */
273 if (error == EBUSY) {
274 if (hole)
275 *off = file_sz;
276
277 return (0);
278 }
279
280 /*
281 * We could find a hole that begins after the logical end-of-file,
282 * because dmu_offset_next() only works on whole blocks. If the
283 * EOF falls mid-block, then indicate that the "virtual hole"
284 * at the end of the file begins at the logical EOF, rather than
285 * at the end of the last block.
286 */
287 if (noff > file_sz) {
288 ASSERT(hole);
289 noff = file_sz;
290 }
291
292 if (noff < *off)
293 return (error);
294 *off = noff;
295 return (error);
296 }
297
298 int
299 zfs_holey(struct inode *ip, int cmd, loff_t *off)
300 {
301 znode_t *zp = ITOZ(ip);
302 zfsvfs_t *zfsvfs = ITOZSB(ip);
303 int error;
304
305 ZFS_ENTER(zfsvfs);
306 ZFS_VERIFY_ZP(zp);
307
308 error = zfs_holey_common(ip, cmd, off);
309
310 ZFS_EXIT(zfsvfs);
311 return (error);
312 }
313 #endif /* SEEK_HOLE && SEEK_DATA */
314
315 #if defined(_KERNEL)
316 /*
317 * When a file is memory mapped, we must keep the IO data synchronized
318 * between the DMU cache and the memory mapped pages. What this means:
319 *
320 * On Write: If we find a memory mapped page, we write to *both*
321 * the page and the dmu buffer.
322 */
323 static void
324 update_pages(struct inode *ip, int64_t start, int len,
325 objset_t *os, uint64_t oid)
326 {
327 struct address_space *mp = ip->i_mapping;
328 struct page *pp;
329 uint64_t nbytes;
330 int64_t off;
331 void *pb;
332
333 off = start & (PAGE_SIZE-1);
334 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
335 nbytes = MIN(PAGE_SIZE - off, len);
336
337 pp = find_lock_page(mp, start >> PAGE_SHIFT);
338 if (pp) {
339 if (mapping_writably_mapped(mp))
340 flush_dcache_page(pp);
341
342 pb = kmap(pp);
343 (void) dmu_read(os, oid, start+off, nbytes, pb+off,
344 DMU_READ_PREFETCH);
345 kunmap(pp);
346
347 if (mapping_writably_mapped(mp))
348 flush_dcache_page(pp);
349
350 mark_page_accessed(pp);
351 SetPageUptodate(pp);
352 ClearPageError(pp);
353 unlock_page(pp);
354 put_page(pp);
355 }
356
357 len -= nbytes;
358 off = 0;
359 }
360 }
361
362 /*
363 * When a file is memory mapped, we must keep the IO data synchronized
364 * between the DMU cache and the memory mapped pages. What this means:
365 *
366 * On Read: We "read" preferentially from memory mapped pages,
367 * else we default from the dmu buffer.
368 *
369 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
370 * the file is memory mapped.
371 */
372 static int
373 mappedread(struct inode *ip, int nbytes, uio_t *uio)
374 {
375 struct address_space *mp = ip->i_mapping;
376 struct page *pp;
377 znode_t *zp = ITOZ(ip);
378 int64_t start, off;
379 uint64_t bytes;
380 int len = nbytes;
381 int error = 0;
382 void *pb;
383
384 start = uio->uio_loffset;
385 off = start & (PAGE_SIZE-1);
386 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
387 bytes = MIN(PAGE_SIZE - off, len);
388
389 pp = find_lock_page(mp, start >> PAGE_SHIFT);
390 if (pp) {
391 ASSERT(PageUptodate(pp));
392 unlock_page(pp);
393
394 pb = kmap(pp);
395 error = uiomove(pb + off, bytes, UIO_READ, uio);
396 kunmap(pp);
397
398 if (mapping_writably_mapped(mp))
399 flush_dcache_page(pp);
400
401 mark_page_accessed(pp);
402 put_page(pp);
403 } else {
404 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
405 uio, bytes);
406 }
407
408 len -= bytes;
409 off = 0;
410 if (error)
411 break;
412 }
413 return (error);
414 }
415 #endif /* _KERNEL */
416
417 unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
418 unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
419
420 /*
421 * Read bytes from specified file into supplied buffer.
422 *
423 * IN: ip - inode of file to be read from.
424 * uio - structure supplying read location, range info,
425 * and return buffer.
426 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
427 * O_DIRECT flag; used to bypass page cache.
428 * cr - credentials of caller.
429 *
430 * OUT: uio - updated offset and range, buffer filled.
431 *
432 * RETURN: 0 on success, error code on failure.
433 *
434 * Side Effects:
435 * inode - atime updated if byte count > 0
436 */
437 /* ARGSUSED */
438 int
439 zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
440 {
441 int error = 0;
442 boolean_t frsync = B_FALSE;
443
444 znode_t *zp = ITOZ(ip);
445 zfsvfs_t *zfsvfs = ITOZSB(ip);
446 ZFS_ENTER(zfsvfs);
447 ZFS_VERIFY_ZP(zp);
448
449 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
450 ZFS_EXIT(zfsvfs);
451 return (SET_ERROR(EACCES));
452 }
453
454 /*
455 * Validate file offset
456 */
457 if (uio->uio_loffset < (offset_t)0) {
458 ZFS_EXIT(zfsvfs);
459 return (SET_ERROR(EINVAL));
460 }
461
462 /*
463 * Fasttrack empty reads
464 */
465 if (uio->uio_resid == 0) {
466 ZFS_EXIT(zfsvfs);
467 return (0);
468 }
469
470 #ifdef FRSYNC
471 /*
472 * If we're in FRSYNC mode, sync out this znode before reading it.
473 * Only do this for non-snapshots.
474 *
475 * Some platforms do not support FRSYNC and instead map it
476 * to FSYNC, which results in unnecessary calls to zil_commit. We
477 * only honor FRSYNC requests on platforms which support it.
478 */
479 frsync = !!(ioflag & FRSYNC);
480 #endif
481 if (zfsvfs->z_log &&
482 (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
483 zil_commit(zfsvfs->z_log, zp->z_id);
484
485 /*
486 * Lock the range against changes.
487 */
488 locked_range_t *lr = rangelock_enter(&zp->z_rangelock,
489 uio->uio_loffset, uio->uio_resid, RL_READER);
490
491 /*
492 * If we are reading past end-of-file we can skip
493 * to the end; but we might still need to set atime.
494 */
495 if (uio->uio_loffset >= zp->z_size) {
496 error = 0;
497 goto out;
498 }
499
500 ASSERT(uio->uio_loffset < zp->z_size);
501 ssize_t n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
502 ssize_t start_resid = n;
503
504 #ifdef HAVE_UIO_ZEROCOPY
505 xuio_t *xuio = NULL;
506 if ((uio->uio_extflg == UIO_XUIO) &&
507 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
508 int nblk;
509 int blksz = zp->z_blksz;
510 uint64_t offset = uio->uio_loffset;
511
512 xuio = (xuio_t *)uio;
513 if ((ISP2(blksz))) {
514 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
515 blksz)) / blksz;
516 } else {
517 ASSERT(offset + n <= blksz);
518 nblk = 1;
519 }
520 (void) dmu_xuio_init(xuio, nblk);
521
522 if (vn_has_cached_data(ip)) {
523 /*
524 * For simplicity, we always allocate a full buffer
525 * even if we only expect to read a portion of a block.
526 */
527 while (--nblk >= 0) {
528 (void) dmu_xuio_add(xuio,
529 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
530 blksz), 0, blksz);
531 }
532 }
533 }
534 #endif /* HAVE_UIO_ZEROCOPY */
535
536 while (n > 0) {
537 ssize_t nbytes = MIN(n, zfs_read_chunk_size -
538 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
539
540 if (zp->z_is_mapped && !(ioflag & O_DIRECT)) {
541 error = mappedread(ip, nbytes, uio);
542 } else {
543 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
544 uio, nbytes);
545 }
546
547 if (error) {
548 /* convert checksum errors into IO errors */
549 if (error == ECKSUM)
550 error = SET_ERROR(EIO);
551 break;
552 }
553
554 n -= nbytes;
555 }
556
557 int64_t nread = start_resid - n;
558 dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
559 task_io_account_read(nread);
560 out:
561 rangelock_exit(lr);
562
563 ZFS_EXIT(zfsvfs);
564 return (error);
565 }
566
567 /*
568 * Write the bytes to a file.
569 *
570 * IN: ip - inode of file to be written to.
571 * uio - structure supplying write location, range info,
572 * and data buffer.
573 * ioflag - FAPPEND flag set if in append mode.
574 * O_DIRECT flag; used to bypass page cache.
575 * cr - credentials of caller.
576 *
577 * OUT: uio - updated offset and range.
578 *
579 * RETURN: 0 if success
580 * error code if failure
581 *
582 * Timestamps:
583 * ip - ctime|mtime updated if byte count > 0
584 */
585
586 /* ARGSUSED */
587 int
588 zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
589 {
590 int error = 0;
591 ssize_t start_resid = uio->uio_resid;
592
593 /*
594 * Fasttrack empty write
595 */
596 ssize_t n = start_resid;
597 if (n == 0)
598 return (0);
599
600 rlim64_t limit = uio->uio_limit;
601 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
602 limit = MAXOFFSET_T;
603
604 znode_t *zp = ITOZ(ip);
605 zfsvfs_t *zfsvfs = ZTOZSB(zp);
606 ZFS_ENTER(zfsvfs);
607 ZFS_VERIFY_ZP(zp);
608
609 sa_bulk_attr_t bulk[4];
610 int count = 0;
611 uint64_t mtime[2], ctime[2];
612 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
613 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
614 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
615 &zp->z_size, 8);
616 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
617 &zp->z_pflags, 8);
618
619 /*
620 * Callers might not be able to detect properly that we are read-only,
621 * so check it explicitly here.
622 */
623 if (zfs_is_readonly(zfsvfs)) {
624 ZFS_EXIT(zfsvfs);
625 return (SET_ERROR(EROFS));
626 }
627
628 /*
629 * If immutable or not appending then return EPERM
630 */
631 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
632 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
633 (uio->uio_loffset < zp->z_size))) {
634 ZFS_EXIT(zfsvfs);
635 return (SET_ERROR(EPERM));
636 }
637
638 /*
639 * Validate file offset
640 */
641 offset_t woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
642 if (woff < 0) {
643 ZFS_EXIT(zfsvfs);
644 return (SET_ERROR(EINVAL));
645 }
646
647 int max_blksz = zfsvfs->z_max_blksz;
648 xuio_t *xuio = NULL;
649
650 /*
651 * Pre-fault the pages to ensure slow (eg NFS) pages
652 * don't hold up txg.
653 * Skip this if uio contains loaned arc_buf.
654 */
655 #ifdef HAVE_UIO_ZEROCOPY
656 if ((uio->uio_extflg == UIO_XUIO) &&
657 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
658 xuio = (xuio_t *)uio;
659 else
660 #endif
661 if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
662 ZFS_EXIT(zfsvfs);
663 return (SET_ERROR(EFAULT));
664 }
665
666 /*
667 * If in append mode, set the io offset pointer to eof.
668 */
669 locked_range_t *lr;
670 if (ioflag & FAPPEND) {
671 /*
672 * Obtain an appending range lock to guarantee file append
673 * semantics. We reset the write offset once we have the lock.
674 */
675 lr = rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
676 woff = lr->lr_offset;
677 if (lr->lr_length == UINT64_MAX) {
678 /*
679 * We overlocked the file because this write will cause
680 * the file block size to increase.
681 * Note that zp_size cannot change with this lock held.
682 */
683 woff = zp->z_size;
684 }
685 uio->uio_loffset = woff;
686 } else {
687 /*
688 * Note that if the file block size will change as a result of
689 * this write, then this range lock will lock the entire file
690 * so that we can re-write the block safely.
691 */
692 lr = rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
693 }
694
695 if (woff >= limit) {
696 rangelock_exit(lr);
697 ZFS_EXIT(zfsvfs);
698 return (SET_ERROR(EFBIG));
699 }
700
701 if ((woff + n) > limit || woff > (limit - n))
702 n = limit - woff;
703
704 /* Will this write extend the file length? */
705 int write_eof = (woff + n > zp->z_size);
706
707 uint64_t end_size = MAX(zp->z_size, woff + n);
708 zilog_t *zilog = zfsvfs->z_log;
709 #ifdef HAVE_UIO_ZEROCOPY
710 int i_iov = 0;
711 const iovec_t *iovp = uio->uio_iov;
712 ASSERTV(int iovcnt = uio->uio_iovcnt);
713 #endif
714
715
716 /*
717 * Write the file in reasonable size chunks. Each chunk is written
718 * in a separate transaction; this keeps the intent log records small
719 * and allows us to do more fine-grained space accounting.
720 */
721 while (n > 0) {
722 woff = uio->uio_loffset;
723
724 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
725 KUID_TO_SUID(ip->i_uid)) ||
726 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
727 KGID_TO_SGID(ip->i_gid)) ||
728 (zp->z_projid != ZFS_DEFAULT_PROJID &&
729 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
730 zp->z_projid))) {
731 error = SET_ERROR(EDQUOT);
732 break;
733 }
734
735 arc_buf_t *abuf = NULL;
736 const iovec_t *aiov = NULL;
737 if (xuio) {
738 #ifdef HAVE_UIO_ZEROCOPY
739 ASSERT(i_iov < iovcnt);
740 ASSERT3U(uio->uio_segflg, !=, UIO_BVEC);
741 aiov = &iovp[i_iov];
742 abuf = dmu_xuio_arcbuf(xuio, i_iov);
743 dmu_xuio_clear(xuio, i_iov);
744 ASSERT((aiov->iov_base == abuf->b_data) ||
745 ((char *)aiov->iov_base - (char *)abuf->b_data +
746 aiov->iov_len == arc_buf_size(abuf)));
747 i_iov++;
748 #endif
749 } else if (n >= max_blksz && woff >= zp->z_size &&
750 P2PHASE(woff, max_blksz) == 0 &&
751 zp->z_blksz == max_blksz) {
752 /*
753 * This write covers a full block. "Borrow" a buffer
754 * from the dmu so that we can fill it before we enter
755 * a transaction. This avoids the possibility of
756 * holding up the transaction if the data copy hangs
757 * up on a pagefault (e.g., from an NFS server mapping).
758 */
759 size_t cbytes;
760
761 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
762 max_blksz);
763 ASSERT(abuf != NULL);
764 ASSERT(arc_buf_size(abuf) == max_blksz);
765 if ((error = uiocopy(abuf->b_data, max_blksz,
766 UIO_WRITE, uio, &cbytes))) {
767 dmu_return_arcbuf(abuf);
768 break;
769 }
770 ASSERT(cbytes == max_blksz);
771 }
772
773 /*
774 * Start a transaction.
775 */
776 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
777 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
778 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
779 zfs_sa_upgrade_txholds(tx, zp);
780 error = dmu_tx_assign(tx, TXG_WAIT);
781 if (error) {
782 dmu_tx_abort(tx);
783 if (abuf != NULL)
784 dmu_return_arcbuf(abuf);
785 break;
786 }
787
788 /*
789 * If rangelock_enter() over-locked we grow the blocksize
790 * and then reduce the lock range. This will only happen
791 * on the first iteration since rangelock_reduce() will
792 * shrink down lr_length to the appropriate size.
793 */
794 if (lr->lr_length == UINT64_MAX) {
795 uint64_t new_blksz;
796
797 if (zp->z_blksz > max_blksz) {
798 /*
799 * File's blocksize is already larger than the
800 * "recordsize" property. Only let it grow to
801 * the next power of 2.
802 */
803 ASSERT(!ISP2(zp->z_blksz));
804 new_blksz = MIN(end_size,
805 1 << highbit64(zp->z_blksz));
806 } else {
807 new_blksz = MIN(end_size, max_blksz);
808 }
809 zfs_grow_blocksize(zp, new_blksz, tx);
810 rangelock_reduce(lr, woff, n);
811 }
812
813 /*
814 * XXX - should we really limit each write to z_max_blksz?
815 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
816 */
817 ssize_t nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
818
819 ssize_t tx_bytes;
820 if (abuf == NULL) {
821 tx_bytes = uio->uio_resid;
822 uio->uio_fault_disable = B_TRUE;
823 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
824 uio, nbytes, tx);
825 if (error == EFAULT) {
826 dmu_tx_commit(tx);
827 if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
828 break;
829 }
830 continue;
831 } else if (error != 0) {
832 dmu_tx_commit(tx);
833 break;
834 }
835 tx_bytes -= uio->uio_resid;
836 } else {
837 tx_bytes = nbytes;
838 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
839 /*
840 * If this is not a full block write, but we are
841 * extending the file past EOF and this data starts
842 * block-aligned, use assign_arcbuf(). Otherwise,
843 * write via dmu_write().
844 */
845 if (tx_bytes < max_blksz && (!write_eof ||
846 aiov->iov_base != abuf->b_data)) {
847 ASSERT(xuio);
848 dmu_write(zfsvfs->z_os, zp->z_id, woff,
849 /* cppcheck-suppress nullPointer */
850 aiov->iov_len, aiov->iov_base, tx);
851 dmu_return_arcbuf(abuf);
852 xuio_stat_wbuf_copied();
853 } else {
854 ASSERT(xuio || tx_bytes == max_blksz);
855 error = dmu_assign_arcbuf_by_dbuf(
856 sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
857 if (error != 0) {
858 dmu_return_arcbuf(abuf);
859 dmu_tx_commit(tx);
860 break;
861 }
862 }
863 ASSERT(tx_bytes <= uio->uio_resid);
864 uioskip(uio, tx_bytes);
865 }
866 if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT)) {
867 update_pages(ip, woff,
868 tx_bytes, zfsvfs->z_os, zp->z_id);
869 }
870
871 /*
872 * If we made no progress, we're done. If we made even
873 * partial progress, update the znode and ZIL accordingly.
874 */
875 if (tx_bytes == 0) {
876 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
877 (void *)&zp->z_size, sizeof (uint64_t), tx);
878 dmu_tx_commit(tx);
879 ASSERT(error != 0);
880 break;
881 }
882
883 /*
884 * Clear Set-UID/Set-GID bits on successful write if not
885 * privileged and at least one of the execute bits is set.
886 *
887 * It would be nice to to this after all writes have
888 * been done, but that would still expose the ISUID/ISGID
889 * to another app after the partial write is committed.
890 *
891 * Note: we don't call zfs_fuid_map_id() here because
892 * user 0 is not an ephemeral uid.
893 */
894 mutex_enter(&zp->z_acl_lock);
895 uint32_t uid = KUID_TO_SUID(ip->i_uid);
896 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
897 (S_IXUSR >> 6))) != 0 &&
898 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
899 secpolicy_vnode_setid_retain(cr,
900 ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
901 uint64_t newmode;
902 zp->z_mode &= ~(S_ISUID | S_ISGID);
903 ip->i_mode = newmode = zp->z_mode;
904 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
905 (void *)&newmode, sizeof (uint64_t), tx);
906 }
907 mutex_exit(&zp->z_acl_lock);
908
909 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
910
911 /*
912 * Update the file size (zp_size) if it has changed;
913 * account for possible concurrent updates.
914 */
915 while ((end_size = zp->z_size) < uio->uio_loffset) {
916 (void) atomic_cas_64(&zp->z_size, end_size,
917 uio->uio_loffset);
918 ASSERT(error == 0);
919 }
920 /*
921 * If we are replaying and eof is non zero then force
922 * the file size to the specified eof. Note, there's no
923 * concurrency during replay.
924 */
925 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
926 zp->z_size = zfsvfs->z_replay_eof;
927
928 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
929
930 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
931 NULL, NULL);
932 dmu_tx_commit(tx);
933
934 if (error != 0)
935 break;
936 ASSERT(tx_bytes == nbytes);
937 n -= nbytes;
938
939 if (!xuio && n > 0) {
940 if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
941 error = EFAULT;
942 break;
943 }
944 }
945 }
946
947 zfs_inode_update(zp);
948 rangelock_exit(lr);
949
950 /*
951 * If we're in replay mode, or we made no progress, return error.
952 * Otherwise, it's at least a partial write, so it's successful.
953 */
954 if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
955 ZFS_EXIT(zfsvfs);
956 return (error);
957 }
958
959 if (ioflag & (FSYNC | FDSYNC) ||
960 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
961 zil_commit(zilog, zp->z_id);
962
963 int64_t nwritten = start_resid - uio->uio_resid;
964 dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
965 task_io_account_write(nwritten);
966
967 ZFS_EXIT(zfsvfs);
968 return (0);
969 }
970
971 /*
972 * Drop a reference on the passed inode asynchronously. This ensures
973 * that the caller will never drop the last reference on an inode in
974 * the current context. Doing so while holding open a tx could result
975 * in a deadlock if iput_final() re-enters the filesystem code.
976 */
977 void
978 zfs_iput_async(struct inode *ip)
979 {
980 objset_t *os = ITOZSB(ip)->z_os;
981
982 ASSERT(atomic_read(&ip->i_count) > 0);
983 ASSERT(os != NULL);
984
985 if (atomic_read(&ip->i_count) == 1)
986 VERIFY(taskq_dispatch(dsl_pool_iput_taskq(dmu_objset_pool(os)),
987 (task_func_t *)iput, ip, TQ_SLEEP) != TASKQID_INVALID);
988 else
989 iput(ip);
990 }
991
992 /* ARGSUSED */
993 void
994 zfs_get_done(zgd_t *zgd, int error)
995 {
996 znode_t *zp = zgd->zgd_private;
997
998 if (zgd->zgd_db)
999 dmu_buf_rele(zgd->zgd_db, zgd);
1000
1001 rangelock_exit(zgd->zgd_lr);
1002
1003 /*
1004 * Release the vnode asynchronously as we currently have the
1005 * txg stopped from syncing.
1006 */
1007 zfs_iput_async(ZTOI(zp));
1008
1009 kmem_free(zgd, sizeof (zgd_t));
1010 }
1011
1012 #ifdef DEBUG
1013 static int zil_fault_io = 0;
1014 #endif
1015
1016 /*
1017 * Get data to generate a TX_WRITE intent log record.
1018 */
1019 int
1020 zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
1021 {
1022 zfsvfs_t *zfsvfs = arg;
1023 objset_t *os = zfsvfs->z_os;
1024 znode_t *zp;
1025 uint64_t object = lr->lr_foid;
1026 uint64_t offset = lr->lr_offset;
1027 uint64_t size = lr->lr_length;
1028 dmu_buf_t *db;
1029 zgd_t *zgd;
1030 int error = 0;
1031
1032 ASSERT3P(lwb, !=, NULL);
1033 ASSERT3P(zio, !=, NULL);
1034 ASSERT3U(size, !=, 0);
1035
1036 /*
1037 * Nothing to do if the file has been removed
1038 */
1039 if (zfs_zget(zfsvfs, object, &zp) != 0)
1040 return (SET_ERROR(ENOENT));
1041 if (zp->z_unlinked) {
1042 /*
1043 * Release the vnode asynchronously as we currently have the
1044 * txg stopped from syncing.
1045 */
1046 zfs_iput_async(ZTOI(zp));
1047 return (SET_ERROR(ENOENT));
1048 }
1049
1050 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1051 zgd->zgd_lwb = lwb;
1052 zgd->zgd_private = zp;
1053
1054 /*
1055 * Write records come in two flavors: immediate and indirect.
1056 * For small writes it's cheaper to store the data with the
1057 * log record (immediate); for large writes it's cheaper to
1058 * sync the data and get a pointer to it (indirect) so that
1059 * we don't have to write the data twice.
1060 */
1061 if (buf != NULL) { /* immediate write */
1062 zgd->zgd_lr = rangelock_enter(&zp->z_rangelock,
1063 offset, size, RL_READER);
1064 /* test for truncation needs to be done while range locked */
1065 if (offset >= zp->z_size) {
1066 error = SET_ERROR(ENOENT);
1067 } else {
1068 error = dmu_read(os, object, offset, size, buf,
1069 DMU_READ_NO_PREFETCH);
1070 }
1071 ASSERT(error == 0 || error == ENOENT);
1072 } else { /* indirect write */
1073 /*
1074 * Have to lock the whole block to ensure when it's
1075 * written out and its checksum is being calculated
1076 * that no one can change the data. We need to re-check
1077 * blocksize after we get the lock in case it's changed!
1078 */
1079 for (;;) {
1080 uint64_t blkoff;
1081 size = zp->z_blksz;
1082 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1083 offset -= blkoff;
1084 zgd->zgd_lr = rangelock_enter(&zp->z_rangelock,
1085 offset, size, RL_READER);
1086 if (zp->z_blksz == size)
1087 break;
1088 offset += blkoff;
1089 rangelock_exit(zgd->zgd_lr);
1090 }
1091 /* test for truncation needs to be done while range locked */
1092 if (lr->lr_offset >= zp->z_size)
1093 error = SET_ERROR(ENOENT);
1094 #ifdef DEBUG
1095 if (zil_fault_io) {
1096 error = SET_ERROR(EIO);
1097 zil_fault_io = 0;
1098 }
1099 #endif
1100 if (error == 0)
1101 error = dmu_buf_hold(os, object, offset, zgd, &db,
1102 DMU_READ_NO_PREFETCH);
1103
1104 if (error == 0) {
1105 blkptr_t *bp = &lr->lr_blkptr;
1106
1107 zgd->zgd_db = db;
1108 zgd->zgd_bp = bp;
1109
1110 ASSERT(db->db_offset == offset);
1111 ASSERT(db->db_size == size);
1112
1113 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1114 zfs_get_done, zgd);
1115 ASSERT(error || lr->lr_length <= size);
1116
1117 /*
1118 * On success, we need to wait for the write I/O
1119 * initiated by dmu_sync() to complete before we can
1120 * release this dbuf. We will finish everything up
1121 * in the zfs_get_done() callback.
1122 */
1123 if (error == 0)
1124 return (0);
1125
1126 if (error == EALREADY) {
1127 lr->lr_common.lrc_txtype = TX_WRITE2;
1128 /*
1129 * TX_WRITE2 relies on the data previously
1130 * written by the TX_WRITE that caused
1131 * EALREADY. We zero out the BP because
1132 * it is the old, currently-on-disk BP.
1133 */
1134 zgd->zgd_bp = NULL;
1135 BP_ZERO(bp);
1136 error = 0;
1137 }
1138 }
1139 }
1140
1141 zfs_get_done(zgd, error);
1142
1143 return (error);
1144 }
1145
1146 /*ARGSUSED*/
1147 int
1148 zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
1149 {
1150 znode_t *zp = ITOZ(ip);
1151 zfsvfs_t *zfsvfs = ITOZSB(ip);
1152 int error;
1153
1154 ZFS_ENTER(zfsvfs);
1155 ZFS_VERIFY_ZP(zp);
1156
1157 if (flag & V_ACE_MASK)
1158 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1159 else
1160 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1161
1162 ZFS_EXIT(zfsvfs);
1163 return (error);
1164 }
1165
1166 /*
1167 * Lookup an entry in a directory, or an extended attribute directory.
1168 * If it exists, return a held inode reference for it.
1169 *
1170 * IN: dip - inode of directory to search.
1171 * nm - name of entry to lookup.
1172 * flags - LOOKUP_XATTR set if looking for an attribute.
1173 * cr - credentials of caller.
1174 * direntflags - directory lookup flags
1175 * realpnp - returned pathname.
1176 *
1177 * OUT: ipp - inode of located entry, NULL if not found.
1178 *
1179 * RETURN: 0 on success, error code on failure.
1180 *
1181 * Timestamps:
1182 * NA
1183 */
1184 /* ARGSUSED */
1185 int
1186 zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
1187 cred_t *cr, int *direntflags, pathname_t *realpnp)
1188 {
1189 znode_t *zdp = ITOZ(dip);
1190 zfsvfs_t *zfsvfs = ITOZSB(dip);
1191 int error = 0;
1192
1193 /*
1194 * Fast path lookup, however we must skip DNLC lookup
1195 * for case folding or normalizing lookups because the
1196 * DNLC code only stores the passed in name. This means
1197 * creating 'a' and removing 'A' on a case insensitive
1198 * file system would work, but DNLC still thinks 'a'
1199 * exists and won't let you create it again on the next
1200 * pass through fast path.
1201 */
1202 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1203
1204 if (!S_ISDIR(dip->i_mode)) {
1205 return (SET_ERROR(ENOTDIR));
1206 } else if (zdp->z_sa_hdl == NULL) {
1207 return (SET_ERROR(EIO));
1208 }
1209
1210 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1211 error = zfs_fastaccesschk_execute(zdp, cr);
1212 if (!error) {
1213 *ipp = dip;
1214 igrab(*ipp);
1215 return (0);
1216 }
1217 return (error);
1218 }
1219 }
1220
1221 ZFS_ENTER(zfsvfs);
1222 ZFS_VERIFY_ZP(zdp);
1223
1224 *ipp = NULL;
1225
1226 if (flags & LOOKUP_XATTR) {
1227 /*
1228 * We don't allow recursive attributes..
1229 * Maybe someday we will.
1230 */
1231 if (zdp->z_pflags & ZFS_XATTR) {
1232 ZFS_EXIT(zfsvfs);
1233 return (SET_ERROR(EINVAL));
1234 }
1235
1236 if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
1237 ZFS_EXIT(zfsvfs);
1238 return (error);
1239 }
1240
1241 /*
1242 * Do we have permission to get into attribute directory?
1243 */
1244
1245 if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0,
1246 B_FALSE, cr))) {
1247 iput(*ipp);
1248 *ipp = NULL;
1249 }
1250
1251 ZFS_EXIT(zfsvfs);
1252 return (error);
1253 }
1254
1255 if (!S_ISDIR(dip->i_mode)) {
1256 ZFS_EXIT(zfsvfs);
1257 return (SET_ERROR(ENOTDIR));
1258 }
1259
1260 /*
1261 * Check accessibility of directory.
1262 */
1263
1264 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
1265 ZFS_EXIT(zfsvfs);
1266 return (error);
1267 }
1268
1269 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1270 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1271 ZFS_EXIT(zfsvfs);
1272 return (SET_ERROR(EILSEQ));
1273 }
1274
1275 error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
1276 if ((error == 0) && (*ipp))
1277 zfs_inode_update(ITOZ(*ipp));
1278
1279 ZFS_EXIT(zfsvfs);
1280 return (error);
1281 }
1282
1283 /*
1284 * Attempt to create a new entry in a directory. If the entry
1285 * already exists, truncate the file if permissible, else return
1286 * an error. Return the ip of the created or trunc'd file.
1287 *
1288 * IN: dip - inode of directory to put new file entry in.
1289 * name - name of new file entry.
1290 * vap - attributes of new file.
1291 * excl - flag indicating exclusive or non-exclusive mode.
1292 * mode - mode to open file with.
1293 * cr - credentials of caller.
1294 * flag - file flag.
1295 * vsecp - ACL to be set
1296 *
1297 * OUT: ipp - inode of created or trunc'd entry.
1298 *
1299 * RETURN: 0 on success, error code on failure.
1300 *
1301 * Timestamps:
1302 * dip - ctime|mtime updated if new entry created
1303 * ip - ctime|mtime always, atime if new
1304 */
1305
1306 /* ARGSUSED */
1307 int
1308 zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
1309 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1310 {
1311 znode_t *zp, *dzp = ITOZ(dip);
1312 zfsvfs_t *zfsvfs = ITOZSB(dip);
1313 zilog_t *zilog;
1314 objset_t *os;
1315 zfs_dirlock_t *dl;
1316 dmu_tx_t *tx;
1317 int error;
1318 uid_t uid;
1319 gid_t gid;
1320 zfs_acl_ids_t acl_ids;
1321 boolean_t fuid_dirtied;
1322 boolean_t have_acl = B_FALSE;
1323 boolean_t waited = B_FALSE;
1324
1325 /*
1326 * If we have an ephemeral id, ACL, or XVATTR then
1327 * make sure file system is at proper version
1328 */
1329
1330 gid = crgetgid(cr);
1331 uid = crgetuid(cr);
1332
1333 if (zfsvfs->z_use_fuids == B_FALSE &&
1334 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1335 return (SET_ERROR(EINVAL));
1336
1337 if (name == NULL)
1338 return (SET_ERROR(EINVAL));
1339
1340 ZFS_ENTER(zfsvfs);
1341 ZFS_VERIFY_ZP(dzp);
1342 os = zfsvfs->z_os;
1343 zilog = zfsvfs->z_log;
1344
1345 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1346 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1347 ZFS_EXIT(zfsvfs);
1348 return (SET_ERROR(EILSEQ));
1349 }
1350
1351 if (vap->va_mask & ATTR_XVATTR) {
1352 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1353 crgetuid(cr), cr, vap->va_mode)) != 0) {
1354 ZFS_EXIT(zfsvfs);
1355 return (error);
1356 }
1357 }
1358
1359 top:
1360 *ipp = NULL;
1361 if (*name == '\0') {
1362 /*
1363 * Null component name refers to the directory itself.
1364 */
1365 igrab(dip);
1366 zp = dzp;
1367 dl = NULL;
1368 error = 0;
1369 } else {
1370 /* possible igrab(zp) */
1371 int zflg = 0;
1372
1373 if (flag & FIGNORECASE)
1374 zflg |= ZCILOOK;
1375
1376 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1377 NULL, NULL);
1378 if (error) {
1379 if (have_acl)
1380 zfs_acl_ids_free(&acl_ids);
1381 if (strcmp(name, "..") == 0)
1382 error = SET_ERROR(EISDIR);
1383 ZFS_EXIT(zfsvfs);
1384 return (error);
1385 }
1386 }
1387
1388 if (zp == NULL) {
1389 uint64_t txtype;
1390 uint64_t projid = ZFS_DEFAULT_PROJID;
1391
1392 /*
1393 * Create a new file object and update the directory
1394 * to reference it.
1395 */
1396 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1397 if (have_acl)
1398 zfs_acl_ids_free(&acl_ids);
1399 goto out;
1400 }
1401
1402 /*
1403 * We only support the creation of regular files in
1404 * extended attribute directories.
1405 */
1406
1407 if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
1408 if (have_acl)
1409 zfs_acl_ids_free(&acl_ids);
1410 error = SET_ERROR(EINVAL);
1411 goto out;
1412 }
1413
1414 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1415 cr, vsecp, &acl_ids)) != 0)
1416 goto out;
1417 have_acl = B_TRUE;
1418
1419 if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
1420 projid = zfs_inherit_projid(dzp);
1421 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
1422 zfs_acl_ids_free(&acl_ids);
1423 error = SET_ERROR(EDQUOT);
1424 goto out;
1425 }
1426
1427 tx = dmu_tx_create(os);
1428
1429 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1430 ZFS_SA_BASE_ATTR_SIZE);
1431
1432 fuid_dirtied = zfsvfs->z_fuid_dirty;
1433 if (fuid_dirtied)
1434 zfs_fuid_txhold(zfsvfs, tx);
1435 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1436 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1437 if (!zfsvfs->z_use_sa &&
1438 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1439 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1440 0, acl_ids.z_aclp->z_acl_bytes);
1441 }
1442
1443 error = dmu_tx_assign(tx,
1444 (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1445 if (error) {
1446 zfs_dirent_unlock(dl);
1447 if (error == ERESTART) {
1448 waited = B_TRUE;
1449 dmu_tx_wait(tx);
1450 dmu_tx_abort(tx);
1451 goto top;
1452 }
1453 zfs_acl_ids_free(&acl_ids);
1454 dmu_tx_abort(tx);
1455 ZFS_EXIT(zfsvfs);
1456 return (error);
1457 }
1458 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1459
1460 error = zfs_link_create(dl, zp, tx, ZNEW);
1461 if (error != 0) {
1462 /*
1463 * Since, we failed to add the directory entry for it,
1464 * delete the newly created dnode.
1465 */
1466 zfs_znode_delete(zp, tx);
1467 remove_inode_hash(ZTOI(zp));
1468 zfs_acl_ids_free(&acl_ids);
1469 dmu_tx_commit(tx);
1470 goto out;
1471 }
1472
1473 if (fuid_dirtied)
1474 zfs_fuid_sync(zfsvfs, tx);
1475
1476 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1477 if (flag & FIGNORECASE)
1478 txtype |= TX_CI;
1479 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1480 vsecp, acl_ids.z_fuidp, vap);
1481 zfs_acl_ids_free(&acl_ids);
1482 dmu_tx_commit(tx);
1483 } else {
1484 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1485
1486 if (have_acl)
1487 zfs_acl_ids_free(&acl_ids);
1488 have_acl = B_FALSE;
1489
1490 /*
1491 * A directory entry already exists for this name.
1492 */
1493 /*
1494 * Can't truncate an existing file if in exclusive mode.
1495 */
1496 if (excl) {
1497 error = SET_ERROR(EEXIST);
1498 goto out;
1499 }
1500 /*
1501 * Can't open a directory for writing.
1502 */
1503 if (S_ISDIR(ZTOI(zp)->i_mode)) {
1504 error = SET_ERROR(EISDIR);
1505 goto out;
1506 }
1507 /*
1508 * Verify requested access to file.
1509 */
1510 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1511 goto out;
1512 }
1513
1514 mutex_enter(&dzp->z_lock);
1515 dzp->z_seq++;
1516 mutex_exit(&dzp->z_lock);
1517
1518 /*
1519 * Truncate regular files if requested.
1520 */
1521 if (S_ISREG(ZTOI(zp)->i_mode) &&
1522 (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
1523 /* we can't hold any locks when calling zfs_freesp() */
1524 if (dl) {
1525 zfs_dirent_unlock(dl);
1526 dl = NULL;
1527 }
1528 error = zfs_freesp(zp, 0, 0, mode, TRUE);
1529 }
1530 }
1531 out:
1532
1533 if (dl)
1534 zfs_dirent_unlock(dl);
1535
1536 if (error) {
1537 if (zp)
1538 iput(ZTOI(zp));
1539 } else {
1540 zfs_inode_update(dzp);
1541 zfs_inode_update(zp);
1542 *ipp = ZTOI(zp);
1543 }
1544
1545 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1546 zil_commit(zilog, 0);
1547
1548 ZFS_EXIT(zfsvfs);
1549 return (error);
1550 }
1551
1552 /* ARGSUSED */
1553 int
1554 zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
1555 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1556 {
1557 znode_t *zp = NULL, *dzp = ITOZ(dip);
1558 zfsvfs_t *zfsvfs = ITOZSB(dip);
1559 objset_t *os;
1560 dmu_tx_t *tx;
1561 int error;
1562 uid_t uid;
1563 gid_t gid;
1564 zfs_acl_ids_t acl_ids;
1565 uint64_t projid = ZFS_DEFAULT_PROJID;
1566 boolean_t fuid_dirtied;
1567 boolean_t have_acl = B_FALSE;
1568 boolean_t waited = B_FALSE;
1569
1570 /*
1571 * If we have an ephemeral id, ACL, or XVATTR then
1572 * make sure file system is at proper version
1573 */
1574
1575 gid = crgetgid(cr);
1576 uid = crgetuid(cr);
1577
1578 if (zfsvfs->z_use_fuids == B_FALSE &&
1579 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1580 return (SET_ERROR(EINVAL));
1581
1582 ZFS_ENTER(zfsvfs);
1583 ZFS_VERIFY_ZP(dzp);
1584 os = zfsvfs->z_os;
1585
1586 if (vap->va_mask & ATTR_XVATTR) {
1587 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1588 crgetuid(cr), cr, vap->va_mode)) != 0) {
1589 ZFS_EXIT(zfsvfs);
1590 return (error);
1591 }
1592 }
1593
1594 top:
1595 *ipp = NULL;
1596
1597 /*
1598 * Create a new file object and update the directory
1599 * to reference it.
1600 */
1601 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1602 if (have_acl)
1603 zfs_acl_ids_free(&acl_ids);
1604 goto out;
1605 }
1606
1607 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1608 cr, vsecp, &acl_ids)) != 0)
1609 goto out;
1610 have_acl = B_TRUE;
1611
1612 if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
1613 projid = zfs_inherit_projid(dzp);
1614 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
1615 zfs_acl_ids_free(&acl_ids);
1616 error = SET_ERROR(EDQUOT);
1617 goto out;
1618 }
1619
1620 tx = dmu_tx_create(os);
1621
1622 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1623 ZFS_SA_BASE_ATTR_SIZE);
1624 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1625
1626 fuid_dirtied = zfsvfs->z_fuid_dirty;
1627 if (fuid_dirtied)
1628 zfs_fuid_txhold(zfsvfs, tx);
1629 if (!zfsvfs->z_use_sa &&
1630 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1631 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1632 0, acl_ids.z_aclp->z_acl_bytes);
1633 }
1634 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1635 if (error) {
1636 if (error == ERESTART) {
1637 waited = B_TRUE;
1638 dmu_tx_wait(tx);
1639 dmu_tx_abort(tx);
1640 goto top;
1641 }
1642 zfs_acl_ids_free(&acl_ids);
1643 dmu_tx_abort(tx);
1644 ZFS_EXIT(zfsvfs);
1645 return (error);
1646 }
1647 zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids);
1648
1649 if (fuid_dirtied)
1650 zfs_fuid_sync(zfsvfs, tx);
1651
1652 /* Add to unlinked set */
1653 zp->z_unlinked = 1;
1654 zfs_unlinked_add(zp, tx);
1655 zfs_acl_ids_free(&acl_ids);
1656 dmu_tx_commit(tx);
1657 out:
1658
1659 if (error) {
1660 if (zp)
1661 iput(ZTOI(zp));
1662 } else {
1663 zfs_inode_update(dzp);
1664 zfs_inode_update(zp);
1665 *ipp = ZTOI(zp);
1666 }
1667
1668 ZFS_EXIT(zfsvfs);
1669 return (error);
1670 }
1671
1672 /*
1673 * Remove an entry from a directory.
1674 *
1675 * IN: dip - inode of directory to remove entry from.
1676 * name - name of entry to remove.
1677 * cr - credentials of caller.
1678 *
1679 * RETURN: 0 if success
1680 * error code if failure
1681 *
1682 * Timestamps:
1683 * dip - ctime|mtime
1684 * ip - ctime (if nlink > 0)
1685 */
1686
1687 uint64_t null_xattr = 0;
1688
1689 /*ARGSUSED*/
1690 int
1691 zfs_remove(struct inode *dip, char *name, cred_t *cr, int flags)
1692 {
1693 znode_t *zp, *dzp = ITOZ(dip);
1694 znode_t *xzp;
1695 struct inode *ip;
1696 zfsvfs_t *zfsvfs = ITOZSB(dip);
1697 zilog_t *zilog;
1698 uint64_t acl_obj, xattr_obj;
1699 uint64_t xattr_obj_unlinked = 0;
1700 uint64_t obj = 0;
1701 uint64_t links;
1702 zfs_dirlock_t *dl;
1703 dmu_tx_t *tx;
1704 boolean_t may_delete_now, delete_now = FALSE;
1705 boolean_t unlinked, toobig = FALSE;
1706 uint64_t txtype;
1707 pathname_t *realnmp = NULL;
1708 pathname_t realnm;
1709 int error;
1710 int zflg = ZEXISTS;
1711 boolean_t waited = B_FALSE;
1712
1713 if (name == NULL)
1714 return (SET_ERROR(EINVAL));
1715
1716 ZFS_ENTER(zfsvfs);
1717 ZFS_VERIFY_ZP(dzp);
1718 zilog = zfsvfs->z_log;
1719
1720 if (flags & FIGNORECASE) {
1721 zflg |= ZCILOOK;
1722 pn_alloc(&realnm);
1723 realnmp = &realnm;
1724 }
1725
1726 top:
1727 xattr_obj = 0;
1728 xzp = NULL;
1729 /*
1730 * Attempt to lock directory; fail if entry doesn't exist.
1731 */
1732 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1733 NULL, realnmp))) {
1734 if (realnmp)
1735 pn_free(realnmp);
1736 ZFS_EXIT(zfsvfs);
1737 return (error);
1738 }
1739
1740 ip = ZTOI(zp);
1741
1742 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1743 goto out;
1744 }
1745
1746 /*
1747 * Need to use rmdir for removing directories.
1748 */
1749 if (S_ISDIR(ip->i_mode)) {
1750 error = SET_ERROR(EPERM);
1751 goto out;
1752 }
1753
1754 mutex_enter(&zp->z_lock);
1755 may_delete_now = atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped);
1756 mutex_exit(&zp->z_lock);
1757
1758 /*
1759 * We may delete the znode now, or we may put it in the unlinked set;
1760 * it depends on whether we're the last link, and on whether there are
1761 * other holds on the inode. So we dmu_tx_hold() the right things to
1762 * allow for either case.
1763 */
1764 obj = zp->z_id;
1765 tx = dmu_tx_create(zfsvfs->z_os);
1766 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1767 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1768 zfs_sa_upgrade_txholds(tx, zp);
1769 zfs_sa_upgrade_txholds(tx, dzp);
1770 if (may_delete_now) {
1771 toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks;
1772 /* if the file is too big, only hold_free a token amount */
1773 dmu_tx_hold_free(tx, zp->z_id, 0,
1774 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1775 }
1776
1777 /* are there any extended attributes? */
1778 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1779 &xattr_obj, sizeof (xattr_obj));
1780 if (error == 0 && xattr_obj) {
1781 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1782 ASSERT0(error);
1783 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1784 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1785 }
1786
1787 mutex_enter(&zp->z_lock);
1788 if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1789 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1790 mutex_exit(&zp->z_lock);
1791
1792 /* charge as an update -- would be nice not to charge at all */
1793 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1794
1795 /*
1796 * Mark this transaction as typically resulting in a net free of space
1797 */
1798 dmu_tx_mark_netfree(tx);
1799
1800 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1801 if (error) {
1802 zfs_dirent_unlock(dl);
1803 if (error == ERESTART) {
1804 waited = B_TRUE;
1805 dmu_tx_wait(tx);
1806 dmu_tx_abort(tx);
1807 iput(ip);
1808 if (xzp)
1809 iput(ZTOI(xzp));
1810 goto top;
1811 }
1812 if (realnmp)
1813 pn_free(realnmp);
1814 dmu_tx_abort(tx);
1815 iput(ip);
1816 if (xzp)
1817 iput(ZTOI(xzp));
1818 ZFS_EXIT(zfsvfs);
1819 return (error);
1820 }
1821
1822 /*
1823 * Remove the directory entry.
1824 */
1825 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1826
1827 if (error) {
1828 dmu_tx_commit(tx);
1829 goto out;
1830 }
1831
1832 if (unlinked) {
1833 /*
1834 * Hold z_lock so that we can make sure that the ACL obj
1835 * hasn't changed. Could have been deleted due to
1836 * zfs_sa_upgrade().
1837 */
1838 mutex_enter(&zp->z_lock);
1839 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1840 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1841 delete_now = may_delete_now && !toobig &&
1842 atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped) &&
1843 xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
1844 acl_obj;
1845 }
1846
1847 if (delete_now) {
1848 if (xattr_obj_unlinked) {
1849 ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2);
1850 mutex_enter(&xzp->z_lock);
1851 xzp->z_unlinked = 1;
1852 clear_nlink(ZTOI(xzp));
1853 links = 0;
1854 error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
1855 &links, sizeof (links), tx);
1856 ASSERT3U(error, ==, 0);
1857 mutex_exit(&xzp->z_lock);
1858 zfs_unlinked_add(xzp, tx);
1859
1860 if (zp->z_is_sa)
1861 error = sa_remove(zp->z_sa_hdl,
1862 SA_ZPL_XATTR(zfsvfs), tx);
1863 else
1864 error = sa_update(zp->z_sa_hdl,
1865 SA_ZPL_XATTR(zfsvfs), &null_xattr,
1866 sizeof (uint64_t), tx);
1867 ASSERT0(error);
1868 }
1869 /*
1870 * Add to the unlinked set because a new reference could be
1871 * taken concurrently resulting in a deferred destruction.
1872 */
1873 zfs_unlinked_add(zp, tx);
1874 mutex_exit(&zp->z_lock);
1875 } else if (unlinked) {
1876 mutex_exit(&zp->z_lock);
1877 zfs_unlinked_add(zp, tx);
1878 }
1879
1880 txtype = TX_REMOVE;
1881 if (flags & FIGNORECASE)
1882 txtype |= TX_CI;
1883 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1884
1885 dmu_tx_commit(tx);
1886 out:
1887 if (realnmp)
1888 pn_free(realnmp);
1889
1890 zfs_dirent_unlock(dl);
1891 zfs_inode_update(dzp);
1892 zfs_inode_update(zp);
1893
1894 if (delete_now)
1895 iput(ip);
1896 else
1897 zfs_iput_async(ip);
1898
1899 if (xzp) {
1900 zfs_inode_update(xzp);
1901 zfs_iput_async(ZTOI(xzp));
1902 }
1903
1904 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1905 zil_commit(zilog, 0);
1906
1907 ZFS_EXIT(zfsvfs);
1908 return (error);
1909 }
1910
1911 /*
1912 * Create a new directory and insert it into dip using the name
1913 * provided. Return a pointer to the inserted directory.
1914 *
1915 * IN: dip - inode of directory to add subdir to.
1916 * dirname - name of new directory.
1917 * vap - attributes of new directory.
1918 * cr - credentials of caller.
1919 * vsecp - ACL to be set
1920 *
1921 * OUT: ipp - inode of created directory.
1922 *
1923 * RETURN: 0 if success
1924 * error code if failure
1925 *
1926 * Timestamps:
1927 * dip - ctime|mtime updated
1928 * ipp - ctime|mtime|atime updated
1929 */
1930 /*ARGSUSED*/
1931 int
1932 zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
1933 cred_t *cr, int flags, vsecattr_t *vsecp)
1934 {
1935 znode_t *zp, *dzp = ITOZ(dip);
1936 zfsvfs_t *zfsvfs = ITOZSB(dip);
1937 zilog_t *zilog;
1938 zfs_dirlock_t *dl;
1939 uint64_t txtype;
1940 dmu_tx_t *tx;
1941 int error;
1942 int zf = ZNEW;
1943 uid_t uid;
1944 gid_t gid = crgetgid(cr);
1945 zfs_acl_ids_t acl_ids;
1946 boolean_t fuid_dirtied;
1947 boolean_t waited = B_FALSE;
1948
1949 ASSERT(S_ISDIR(vap->va_mode));
1950
1951 /*
1952 * If we have an ephemeral id, ACL, or XVATTR then
1953 * make sure file system is at proper version
1954 */
1955
1956 uid = crgetuid(cr);
1957 if (zfsvfs->z_use_fuids == B_FALSE &&
1958 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1959 return (SET_ERROR(EINVAL));
1960
1961 if (dirname == NULL)
1962 return (SET_ERROR(EINVAL));
1963
1964 ZFS_ENTER(zfsvfs);
1965 ZFS_VERIFY_ZP(dzp);
1966 zilog = zfsvfs->z_log;
1967
1968 if (dzp->z_pflags & ZFS_XATTR) {
1969 ZFS_EXIT(zfsvfs);
1970 return (SET_ERROR(EINVAL));
1971 }
1972
1973 if (zfsvfs->z_utf8 && u8_validate(dirname,
1974 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1975 ZFS_EXIT(zfsvfs);
1976 return (SET_ERROR(EILSEQ));
1977 }
1978 if (flags & FIGNORECASE)
1979 zf |= ZCILOOK;
1980
1981 if (vap->va_mask & ATTR_XVATTR) {
1982 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1983 crgetuid(cr), cr, vap->va_mode)) != 0) {
1984 ZFS_EXIT(zfsvfs);
1985 return (error);
1986 }
1987 }
1988
1989 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1990 vsecp, &acl_ids)) != 0) {
1991 ZFS_EXIT(zfsvfs);
1992 return (error);
1993 }
1994 /*
1995 * First make sure the new directory doesn't exist.
1996 *
1997 * Existence is checked first to make sure we don't return
1998 * EACCES instead of EEXIST which can cause some applications
1999 * to fail.
2000 */
2001 top:
2002 *ipp = NULL;
2003
2004 if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
2005 NULL, NULL))) {
2006 zfs_acl_ids_free(&acl_ids);
2007 ZFS_EXIT(zfsvfs);
2008 return (error);
2009 }
2010
2011 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
2012 zfs_acl_ids_free(&acl_ids);
2013 zfs_dirent_unlock(dl);
2014 ZFS_EXIT(zfsvfs);
2015 return (error);
2016 }
2017
2018 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
2019 zfs_acl_ids_free(&acl_ids);
2020 zfs_dirent_unlock(dl);
2021 ZFS_EXIT(zfsvfs);
2022 return (SET_ERROR(EDQUOT));
2023 }
2024
2025 /*
2026 * Add a new entry to the directory.
2027 */
2028 tx = dmu_tx_create(zfsvfs->z_os);
2029 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
2030 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
2031 fuid_dirtied = zfsvfs->z_fuid_dirty;
2032 if (fuid_dirtied)
2033 zfs_fuid_txhold(zfsvfs, tx);
2034 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2035 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
2036 acl_ids.z_aclp->z_acl_bytes);
2037 }
2038
2039 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
2040 ZFS_SA_BASE_ATTR_SIZE);
2041
2042 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
2043 if (error) {
2044 zfs_dirent_unlock(dl);
2045 if (error == ERESTART) {
2046 waited = B_TRUE;
2047 dmu_tx_wait(tx);
2048 dmu_tx_abort(tx);
2049 goto top;
2050 }
2051 zfs_acl_ids_free(&acl_ids);
2052 dmu_tx_abort(tx);
2053 ZFS_EXIT(zfsvfs);
2054 return (error);
2055 }
2056
2057 /*
2058 * Create new node.
2059 */
2060 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
2061
2062 /*
2063 * Now put new name in parent dir.
2064 */
2065 error = zfs_link_create(dl, zp, tx, ZNEW);
2066 if (error != 0) {
2067 zfs_znode_delete(zp, tx);
2068 remove_inode_hash(ZTOI(zp));
2069 goto out;
2070 }
2071
2072 if (fuid_dirtied)
2073 zfs_fuid_sync(zfsvfs, tx);
2074
2075 *ipp = ZTOI(zp);
2076
2077 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
2078 if (flags & FIGNORECASE)
2079 txtype |= TX_CI;
2080 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
2081 acl_ids.z_fuidp, vap);
2082
2083 out:
2084 zfs_acl_ids_free(&acl_ids);
2085
2086 dmu_tx_commit(tx);
2087
2088 zfs_dirent_unlock(dl);
2089
2090 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2091 zil_commit(zilog, 0);
2092
2093 if (error != 0) {
2094 iput(ZTOI(zp));
2095 } else {
2096 zfs_inode_update(dzp);
2097 zfs_inode_update(zp);
2098 }
2099 ZFS_EXIT(zfsvfs);
2100 return (error);
2101 }
2102
2103 /*
2104 * Remove a directory subdir entry. If the current working
2105 * directory is the same as the subdir to be removed, the
2106 * remove will fail.
2107 *
2108 * IN: dip - inode of directory to remove from.
2109 * name - name of directory to be removed.
2110 * cwd - inode of current working directory.
2111 * cr - credentials of caller.
2112 * flags - case flags
2113 *
2114 * RETURN: 0 on success, error code on failure.
2115 *
2116 * Timestamps:
2117 * dip - ctime|mtime updated
2118 */
2119 /*ARGSUSED*/
2120 int
2121 zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr,
2122 int flags)
2123 {
2124 znode_t *dzp = ITOZ(dip);
2125 znode_t *zp;
2126 struct inode *ip;
2127 zfsvfs_t *zfsvfs = ITOZSB(dip);
2128 zilog_t *zilog;
2129 zfs_dirlock_t *dl;
2130 dmu_tx_t *tx;
2131 int error;
2132 int zflg = ZEXISTS;
2133 boolean_t waited = B_FALSE;
2134
2135 if (name == NULL)
2136 return (SET_ERROR(EINVAL));
2137
2138 ZFS_ENTER(zfsvfs);
2139 ZFS_VERIFY_ZP(dzp);
2140 zilog = zfsvfs->z_log;
2141
2142 if (flags & FIGNORECASE)
2143 zflg |= ZCILOOK;
2144 top:
2145 zp = NULL;
2146
2147 /*
2148 * Attempt to lock directory; fail if entry doesn't exist.
2149 */
2150 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
2151 NULL, NULL))) {
2152 ZFS_EXIT(zfsvfs);
2153 return (error);
2154 }
2155
2156 ip = ZTOI(zp);
2157
2158 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
2159 goto out;
2160 }
2161
2162 if (!S_ISDIR(ip->i_mode)) {
2163 error = SET_ERROR(ENOTDIR);
2164 goto out;
2165 }
2166
2167 if (ip == cwd) {
2168 error = SET_ERROR(EINVAL);
2169 goto out;
2170 }
2171
2172 /*
2173 * Grab a lock on the directory to make sure that no one is
2174 * trying to add (or lookup) entries while we are removing it.
2175 */
2176 rw_enter(&zp->z_name_lock, RW_WRITER);
2177
2178 /*
2179 * Grab a lock on the parent pointer to make sure we play well
2180 * with the treewalk and directory rename code.
2181 */
2182 rw_enter(&zp->z_parent_lock, RW_WRITER);
2183
2184 tx = dmu_tx_create(zfsvfs->z_os);
2185 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2186 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2187 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2188 zfs_sa_upgrade_txholds(tx, zp);
2189 zfs_sa_upgrade_txholds(tx, dzp);
2190 dmu_tx_mark_netfree(tx);
2191 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
2192 if (error) {
2193 rw_exit(&zp->z_parent_lock);
2194 rw_exit(&zp->z_name_lock);
2195 zfs_dirent_unlock(dl);
2196 if (error == ERESTART) {
2197 waited = B_TRUE;
2198 dmu_tx_wait(tx);
2199 dmu_tx_abort(tx);
2200 iput(ip);
2201 goto top;
2202 }
2203 dmu_tx_abort(tx);
2204 iput(ip);
2205 ZFS_EXIT(zfsvfs);
2206 return (error);
2207 }
2208
2209 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
2210
2211 if (error == 0) {
2212 uint64_t txtype = TX_RMDIR;
2213 if (flags & FIGNORECASE)
2214 txtype |= TX_CI;
2215 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2216 }
2217
2218 dmu_tx_commit(tx);
2219
2220 rw_exit(&zp->z_parent_lock);
2221 rw_exit(&zp->z_name_lock);
2222 out:
2223 zfs_dirent_unlock(dl);
2224
2225 zfs_inode_update(dzp);
2226 zfs_inode_update(zp);
2227 iput(ip);
2228
2229 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2230 zil_commit(zilog, 0);
2231
2232 ZFS_EXIT(zfsvfs);
2233 return (error);
2234 }
2235
2236 /*
2237 * Read as many directory entries as will fit into the provided
2238 * dirent buffer from the given directory cursor position.
2239 *
2240 * IN: ip - inode of directory to read.
2241 * dirent - buffer for directory entries.
2242 *
2243 * OUT: dirent - filler buffer of directory entries.
2244 *
2245 * RETURN: 0 if success
2246 * error code if failure
2247 *
2248 * Timestamps:
2249 * ip - atime updated
2250 *
2251 * Note that the low 4 bits of the cookie returned by zap is always zero.
2252 * This allows us to use the low range for "special" directory entries:
2253 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2254 * we use the offset 2 for the '.zfs' directory.
2255 */
2256 /* ARGSUSED */
2257 int
2258 zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr)
2259 {
2260 znode_t *zp = ITOZ(ip);
2261 zfsvfs_t *zfsvfs = ITOZSB(ip);
2262 objset_t *os;
2263 zap_cursor_t zc;
2264 zap_attribute_t zap;
2265 int error;
2266 uint8_t prefetch;
2267 uint8_t type;
2268 int done = 0;
2269 uint64_t parent;
2270 uint64_t offset; /* must be unsigned; checks for < 1 */
2271
2272 ZFS_ENTER(zfsvfs);
2273 ZFS_VERIFY_ZP(zp);
2274
2275 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2276 &parent, sizeof (parent))) != 0)
2277 goto out;
2278
2279 /*
2280 * Quit if directory has been removed (posix)
2281 */
2282 if (zp->z_unlinked)
2283 goto out;
2284
2285 error = 0;
2286 os = zfsvfs->z_os;
2287 offset = ctx->pos;
2288 prefetch = zp->z_zn_prefetch;
2289
2290 /*
2291 * Initialize the iterator cursor.
2292 */
2293 if (offset <= 3) {
2294 /*
2295 * Start iteration from the beginning of the directory.
2296 */
2297 zap_cursor_init(&zc, os, zp->z_id);
2298 } else {
2299 /*
2300 * The offset is a serialized cursor.
2301 */
2302 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2303 }
2304
2305 /*
2306 * Transform to file-system independent format
2307 */
2308 while (!done) {
2309 uint64_t objnum;
2310 /*
2311 * Special case `.', `..', and `.zfs'.
2312 */
2313 if (offset == 0) {
2314 (void) strcpy(zap.za_name, ".");
2315 zap.za_normalization_conflict = 0;
2316 objnum = zp->z_id;
2317 type = DT_DIR;
2318 } else if (offset == 1) {
2319 (void) strcpy(zap.za_name, "..");
2320 zap.za_normalization_conflict = 0;
2321 objnum = parent;
2322 type = DT_DIR;
2323 } else if (offset == 2 && zfs_show_ctldir(zp)) {
2324 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2325 zap.za_normalization_conflict = 0;
2326 objnum = ZFSCTL_INO_ROOT;
2327 type = DT_DIR;
2328 } else {
2329 /*
2330 * Grab next entry.
2331 */
2332 if ((error = zap_cursor_retrieve(&zc, &zap))) {
2333 if (error == ENOENT)
2334 break;
2335 else
2336 goto update;
2337 }
2338
2339 /*
2340 * Allow multiple entries provided the first entry is
2341 * the object id. Non-zpl consumers may safely make
2342 * use of the additional space.
2343 *
2344 * XXX: This should be a feature flag for compatibility
2345 */
2346 if (zap.za_integer_length != 8 ||
2347 zap.za_num_integers == 0) {
2348 cmn_err(CE_WARN, "zap_readdir: bad directory "
2349 "entry, obj = %lld, offset = %lld, "
2350 "length = %d, num = %lld\n",
2351 (u_longlong_t)zp->z_id,
2352 (u_longlong_t)offset,
2353 zap.za_integer_length,
2354 (u_longlong_t)zap.za_num_integers);
2355 error = SET_ERROR(ENXIO);
2356 goto update;
2357 }
2358
2359 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2360 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2361 }
2362
2363 done = !zpl_dir_emit(ctx, zap.za_name, strlen(zap.za_name),
2364 objnum, type);
2365 if (done)
2366 break;
2367
2368 /* Prefetch znode */
2369 if (prefetch) {
2370 dmu_prefetch(os, objnum, 0, 0, 0,
2371 ZIO_PRIORITY_SYNC_READ);
2372 }
2373
2374 /*
2375 * Move to the next entry, fill in the previous offset.
2376 */
2377 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2378 zap_cursor_advance(&zc);
2379 offset = zap_cursor_serialize(&zc);
2380 } else {
2381 offset += 1;
2382 }
2383 ctx->pos = offset;
2384 }
2385 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2386
2387 update:
2388 zap_cursor_fini(&zc);
2389 if (error == ENOENT)
2390 error = 0;
2391 out:
2392 ZFS_EXIT(zfsvfs);
2393
2394 return (error);
2395 }
2396
2397 ulong_t zfs_fsync_sync_cnt = 4;
2398
2399 int
2400 zfs_fsync(struct inode *ip, int syncflag, cred_t *cr)
2401 {
2402 znode_t *zp = ITOZ(ip);
2403 zfsvfs_t *zfsvfs = ITOZSB(ip);
2404
2405 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2406
2407 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2408 ZFS_ENTER(zfsvfs);
2409 ZFS_VERIFY_ZP(zp);
2410 zil_commit(zfsvfs->z_log, zp->z_id);
2411 ZFS_EXIT(zfsvfs);
2412 }
2413 tsd_set(zfs_fsyncer_key, NULL);
2414
2415 return (0);
2416 }
2417
2418
2419 /*
2420 * Get the requested file attributes and place them in the provided
2421 * vattr structure.
2422 *
2423 * IN: ip - inode of file.
2424 * vap - va_mask identifies requested attributes.
2425 * If ATTR_XVATTR set, then optional attrs are requested
2426 * flags - ATTR_NOACLCHECK (CIFS server context)
2427 * cr - credentials of caller.
2428 *
2429 * OUT: vap - attribute values.
2430 *
2431 * RETURN: 0 (always succeeds)
2432 */
2433 /* ARGSUSED */
2434 int
2435 zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2436 {
2437 znode_t *zp = ITOZ(ip);
2438 zfsvfs_t *zfsvfs = ITOZSB(ip);
2439 int error = 0;
2440 uint64_t links;
2441 uint64_t atime[2], mtime[2], ctime[2];
2442 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2443 xoptattr_t *xoap = NULL;
2444 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2445 sa_bulk_attr_t bulk[3];
2446 int count = 0;
2447
2448 ZFS_ENTER(zfsvfs);
2449 ZFS_VERIFY_ZP(zp);
2450
2451 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2452
2453 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
2454 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2455 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2456
2457 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2458 ZFS_EXIT(zfsvfs);
2459 return (error);
2460 }
2461
2462 /*
2463 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2464 * Also, if we are the owner don't bother, since owner should
2465 * always be allowed to read basic attributes of file.
2466 */
2467 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2468 (vap->va_uid != crgetuid(cr))) {
2469 if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2470 skipaclchk, cr))) {
2471 ZFS_EXIT(zfsvfs);
2472 return (error);
2473 }
2474 }
2475
2476 /*
2477 * Return all attributes. It's cheaper to provide the answer
2478 * than to determine whether we were asked the question.
2479 */
2480
2481 mutex_enter(&zp->z_lock);
2482 vap->va_type = vn_mode_to_vtype(zp->z_mode);
2483 vap->va_mode = zp->z_mode;
2484 vap->va_fsid = ZTOI(zp)->i_sb->s_dev;
2485 vap->va_nodeid = zp->z_id;
2486 if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
2487 links = ZTOI(zp)->i_nlink + 1;
2488 else
2489 links = ZTOI(zp)->i_nlink;
2490 vap->va_nlink = MIN(links, ZFS_LINK_MAX);
2491 vap->va_size = i_size_read(ip);
2492 vap->va_rdev = ip->i_rdev;
2493 vap->va_seq = ip->i_generation;
2494
2495 /*
2496 * Add in any requested optional attributes and the create time.
2497 * Also set the corresponding bits in the returned attribute bitmap.
2498 */
2499 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2500 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2501 xoap->xoa_archive =
2502 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2503 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2504 }
2505
2506 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2507 xoap->xoa_readonly =
2508 ((zp->z_pflags & ZFS_READONLY) != 0);
2509 XVA_SET_RTN(xvap, XAT_READONLY);
2510 }
2511
2512 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2513 xoap->xoa_system =
2514 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2515 XVA_SET_RTN(xvap, XAT_SYSTEM);
2516 }
2517
2518 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2519 xoap->xoa_hidden =
2520 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2521 XVA_SET_RTN(xvap, XAT_HIDDEN);
2522 }
2523
2524 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2525 xoap->xoa_nounlink =
2526 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2527 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2528 }
2529
2530 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2531 xoap->xoa_immutable =
2532 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2533 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2534 }
2535
2536 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2537 xoap->xoa_appendonly =
2538 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2539 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2540 }
2541
2542 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2543 xoap->xoa_nodump =
2544 ((zp->z_pflags & ZFS_NODUMP) != 0);
2545 XVA_SET_RTN(xvap, XAT_NODUMP);
2546 }
2547
2548 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2549 xoap->xoa_opaque =
2550 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2551 XVA_SET_RTN(xvap, XAT_OPAQUE);
2552 }
2553
2554 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2555 xoap->xoa_av_quarantined =
2556 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2557 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2558 }
2559
2560 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2561 xoap->xoa_av_modified =
2562 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2563 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2564 }
2565
2566 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2567 S_ISREG(ip->i_mode)) {
2568 zfs_sa_get_scanstamp(zp, xvap);
2569 }
2570
2571 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2572 uint64_t times[2];
2573
2574 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
2575 times, sizeof (times));
2576 ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2577 XVA_SET_RTN(xvap, XAT_CREATETIME);
2578 }
2579
2580 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2581 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2582 XVA_SET_RTN(xvap, XAT_REPARSE);
2583 }
2584 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2585 xoap->xoa_generation = ip->i_generation;
2586 XVA_SET_RTN(xvap, XAT_GEN);
2587 }
2588
2589 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2590 xoap->xoa_offline =
2591 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2592 XVA_SET_RTN(xvap, XAT_OFFLINE);
2593 }
2594
2595 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2596 xoap->xoa_sparse =
2597 ((zp->z_pflags & ZFS_SPARSE) != 0);
2598 XVA_SET_RTN(xvap, XAT_SPARSE);
2599 }
2600
2601 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
2602 xoap->xoa_projinherit =
2603 ((zp->z_pflags & ZFS_PROJINHERIT) != 0);
2604 XVA_SET_RTN(xvap, XAT_PROJINHERIT);
2605 }
2606
2607 if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
2608 xoap->xoa_projid = zp->z_projid;
2609 XVA_SET_RTN(xvap, XAT_PROJID);
2610 }
2611 }
2612
2613 ZFS_TIME_DECODE(&vap->va_atime, atime);
2614 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2615 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2616
2617 mutex_exit(&zp->z_lock);
2618
2619 sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2620
2621 if (zp->z_blksz == 0) {
2622 /*
2623 * Block size hasn't been set; suggest maximal I/O transfers.
2624 */
2625 vap->va_blksize = zfsvfs->z_max_blksz;
2626 }
2627
2628 ZFS_EXIT(zfsvfs);
2629 return (0);
2630 }
2631
2632 /*
2633 * Get the basic file attributes and place them in the provided kstat
2634 * structure. The inode is assumed to be the authoritative source
2635 * for most of the attributes. However, the znode currently has the
2636 * authoritative atime, blksize, and block count.
2637 *
2638 * IN: ip - inode of file.
2639 *
2640 * OUT: sp - kstat values.
2641 *
2642 * RETURN: 0 (always succeeds)
2643 */
2644 /* ARGSUSED */
2645 int
2646 zfs_getattr_fast(struct inode *ip, struct kstat *sp)
2647 {
2648 znode_t *zp = ITOZ(ip);
2649 zfsvfs_t *zfsvfs = ITOZSB(ip);
2650 uint32_t blksize;
2651 u_longlong_t nblocks;
2652
2653 ZFS_ENTER(zfsvfs);
2654 ZFS_VERIFY_ZP(zp);
2655
2656 mutex_enter(&zp->z_lock);
2657
2658 generic_fillattr(ip, sp);
2659
2660 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
2661 sp->blksize = blksize;
2662 sp->blocks = nblocks;
2663
2664 if (unlikely(zp->z_blksz == 0)) {
2665 /*
2666 * Block size hasn't been set; suggest maximal I/O transfers.
2667 */
2668 sp->blksize = zfsvfs->z_max_blksz;
2669 }
2670
2671 mutex_exit(&zp->z_lock);
2672
2673 /*
2674 * Required to prevent NFS client from detecting different inode
2675 * numbers of snapshot root dentry before and after snapshot mount.
2676 */
2677 if (zfsvfs->z_issnap) {
2678 if (ip->i_sb->s_root->d_inode == ip)
2679 sp->ino = ZFSCTL_INO_SNAPDIRS -
2680 dmu_objset_id(zfsvfs->z_os);
2681 }
2682
2683 ZFS_EXIT(zfsvfs);
2684
2685 return (0);
2686 }
2687
2688 /*
2689 * For the operation of changing file's user/group/project, we need to
2690 * handle not only the main object that is assigned to the file directly,
2691 * but also the ones that are used by the file via hidden xattr directory.
2692 *
2693 * Because the xattr directory may contains many EA entries, as to it may
2694 * be impossible to change all of them via the transaction of changing the
2695 * main object's user/group/project attributes. Then we have to change them
2696 * via other multiple independent transactions one by one. It may be not good
2697 * solution, but we have no better idea yet.
2698 */
2699 static int
2700 zfs_setattr_dir(znode_t *dzp)
2701 {
2702 struct inode *dxip = ZTOI(dzp);
2703 struct inode *xip = NULL;
2704 zfsvfs_t *zfsvfs = ITOZSB(dxip);
2705 objset_t *os = zfsvfs->z_os;
2706 zap_cursor_t zc;
2707 zap_attribute_t zap;
2708 zfs_dirlock_t *dl;
2709 znode_t *zp;
2710 dmu_tx_t *tx = NULL;
2711 uint64_t uid, gid;
2712 sa_bulk_attr_t bulk[4];
2713 int count;
2714 int err;
2715
2716 zap_cursor_init(&zc, os, dzp->z_id);
2717 while ((err = zap_cursor_retrieve(&zc, &zap)) == 0) {
2718 count = 0;
2719 if (zap.za_integer_length != 8 || zap.za_num_integers != 1) {
2720 err = ENXIO;
2721 break;
2722 }
2723
2724 err = zfs_dirent_lock(&dl, dzp, (char *)zap.za_name, &zp,
2725 ZEXISTS, NULL, NULL);
2726 if (err == ENOENT)
2727 goto next;
2728 if (err)
2729 break;
2730
2731 xip = ZTOI(zp);
2732 if (KUID_TO_SUID(xip->i_uid) == KUID_TO_SUID(dxip->i_uid) &&
2733 KGID_TO_SGID(xip->i_gid) == KGID_TO_SGID(dxip->i_gid) &&
2734 zp->z_projid == dzp->z_projid)
2735 goto next;
2736
2737 tx = dmu_tx_create(os);
2738 if (!(zp->z_pflags & ZFS_PROJID))
2739 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2740 else
2741 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2742
2743 err = dmu_tx_assign(tx, TXG_WAIT);
2744 if (err)
2745 break;
2746
2747 mutex_enter(&dzp->z_lock);
2748
2749 if (KUID_TO_SUID(xip->i_uid) != KUID_TO_SUID(dxip->i_uid)) {
2750 xip->i_uid = dxip->i_uid;
2751 uid = zfs_uid_read(dxip);
2752 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
2753 &uid, sizeof (uid));
2754 }
2755
2756 if (KGID_TO_SGID(xip->i_gid) != KGID_TO_SGID(dxip->i_gid)) {
2757 xip->i_gid = dxip->i_gid;
2758 gid = zfs_gid_read(dxip);
2759 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
2760 &gid, sizeof (gid));
2761 }
2762
2763 if (zp->z_projid != dzp->z_projid) {
2764 if (!(zp->z_pflags & ZFS_PROJID)) {
2765 zp->z_pflags |= ZFS_PROJID;
2766 SA_ADD_BULK_ATTR(bulk, count,
2767 SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags,
2768 sizeof (zp->z_pflags));
2769 }
2770
2771 zp->z_projid = dzp->z_projid;
2772 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PROJID(zfsvfs),
2773 NULL, &zp->z_projid, sizeof (zp->z_projid));
2774 }
2775
2776 mutex_exit(&dzp->z_lock);
2777
2778 if (likely(count > 0)) {
2779 err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
2780 dmu_tx_commit(tx);
2781 } else {
2782 dmu_tx_abort(tx);
2783 }
2784 tx = NULL;
2785 if (err != 0 && err != ENOENT)
2786 break;
2787
2788 next:
2789 if (xip) {
2790 iput(xip);
2791 xip = NULL;
2792 zfs_dirent_unlock(dl);
2793 }
2794 zap_cursor_advance(&zc);
2795 }
2796
2797 if (tx)
2798 dmu_tx_abort(tx);
2799 if (xip) {
2800 iput(xip);
2801 zfs_dirent_unlock(dl);
2802 }
2803 zap_cursor_fini(&zc);
2804
2805 return (err == ENOENT ? 0 : err);
2806 }
2807
2808 /*
2809 * Set the file attributes to the values contained in the
2810 * vattr structure.
2811 *
2812 * IN: ip - inode of file to be modified.
2813 * vap - new attribute values.
2814 * If ATTR_XVATTR set, then optional attrs are being set
2815 * flags - ATTR_UTIME set if non-default time values provided.
2816 * - ATTR_NOACLCHECK (CIFS context only).
2817 * cr - credentials of caller.
2818 *
2819 * RETURN: 0 if success
2820 * error code if failure
2821 *
2822 * Timestamps:
2823 * ip - ctime updated, mtime updated if size changed.
2824 */
2825 /* ARGSUSED */
2826 int
2827 zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2828 {
2829 znode_t *zp = ITOZ(ip);
2830 zfsvfs_t *zfsvfs = ITOZSB(ip);
2831 objset_t *os = zfsvfs->z_os;
2832 zilog_t *zilog;
2833 dmu_tx_t *tx;
2834 vattr_t oldva;
2835 xvattr_t *tmpxvattr;
2836 uint_t mask = vap->va_mask;
2837 uint_t saved_mask = 0;
2838 int trim_mask = 0;
2839 uint64_t new_mode;
2840 uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid;
2841 uint64_t xattr_obj;
2842 uint64_t mtime[2], ctime[2], atime[2];
2843 uint64_t projid = ZFS_INVALID_PROJID;
2844 znode_t *attrzp;
2845 int need_policy = FALSE;
2846 int err, err2 = 0;
2847 zfs_fuid_info_t *fuidp = NULL;
2848 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2849 xoptattr_t *xoap;
2850 zfs_acl_t *aclp;
2851 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2852 boolean_t fuid_dirtied = B_FALSE;
2853 boolean_t handle_eadir = B_FALSE;
2854 sa_bulk_attr_t *bulk, *xattr_bulk;
2855 int count = 0, xattr_count = 0, bulks = 8;
2856
2857 if (mask == 0)
2858 return (0);
2859
2860 ZFS_ENTER(zfsvfs);
2861 ZFS_VERIFY_ZP(zp);
2862
2863 /*
2864 * If this is a xvattr_t, then get a pointer to the structure of
2865 * optional attributes. If this is NULL, then we have a vattr_t.
2866 */
2867 xoap = xva_getxoptattr(xvap);
2868 if (xoap != NULL && (mask & ATTR_XVATTR)) {
2869 if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
2870 if (!dmu_objset_projectquota_enabled(os) ||
2871 (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode))) {
2872 ZFS_EXIT(zfsvfs);
2873 return (SET_ERROR(ENOTSUP));
2874 }
2875
2876 projid = xoap->xoa_projid;
2877 if (unlikely(projid == ZFS_INVALID_PROJID)) {
2878 ZFS_EXIT(zfsvfs);
2879 return (SET_ERROR(EINVAL));
2880 }
2881
2882 if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
2883 projid = ZFS_INVALID_PROJID;
2884 else
2885 need_policy = TRUE;
2886 }
2887
2888 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
2889 (xoap->xoa_projinherit !=
2890 ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
2891 (!dmu_objset_projectquota_enabled(os) ||
2892 (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode)))) {
2893 ZFS_EXIT(zfsvfs);
2894 return (SET_ERROR(ENOTSUP));
2895 }
2896 }
2897
2898 zilog = zfsvfs->z_log;
2899
2900 /*
2901 * Make sure that if we have ephemeral uid/gid or xvattr specified
2902 * that file system is at proper version level
2903 */
2904
2905 if (zfsvfs->z_use_fuids == B_FALSE &&
2906 (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2907 ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2908 (mask & ATTR_XVATTR))) {
2909 ZFS_EXIT(zfsvfs);
2910 return (SET_ERROR(EINVAL));
2911 }
2912
2913 if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
2914 ZFS_EXIT(zfsvfs);
2915 return (SET_ERROR(EISDIR));
2916 }
2917
2918 if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
2919 ZFS_EXIT(zfsvfs);
2920 return (SET_ERROR(EINVAL));
2921 }
2922
2923 tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
2924 xva_init(tmpxvattr);
2925
2926 bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
2927 xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
2928
2929 /*
2930 * Immutable files can only alter immutable bit and atime
2931 */
2932 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2933 ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
2934 ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2935 err = SET_ERROR(EPERM);
2936 goto out3;
2937 }
2938
2939 if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2940 err = SET_ERROR(EPERM);
2941 goto out3;
2942 }
2943
2944 /*
2945 * Verify timestamps doesn't overflow 32 bits.
2946 * ZFS can handle large timestamps, but 32bit syscalls can't
2947 * handle times greater than 2039. This check should be removed
2948 * once large timestamps are fully supported.
2949 */
2950 if (mask & (ATTR_ATIME | ATTR_MTIME)) {
2951 if (((mask & ATTR_ATIME) &&
2952 TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2953 ((mask & ATTR_MTIME) &&
2954 TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2955 err = SET_ERROR(EOVERFLOW);
2956 goto out3;
2957 }
2958 }
2959
2960 top:
2961 attrzp = NULL;
2962 aclp = NULL;
2963
2964 /* Can this be moved to before the top label? */
2965 if (zfs_is_readonly(zfsvfs)) {
2966 err = SET_ERROR(EROFS);
2967 goto out3;
2968 }
2969
2970 /*
2971 * First validate permissions
2972 */
2973
2974 if (mask & ATTR_SIZE) {
2975 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2976 if (err)
2977 goto out3;
2978
2979 /*
2980 * XXX - Note, we are not providing any open
2981 * mode flags here (like FNDELAY), so we may
2982 * block if there are locks present... this
2983 * should be addressed in openat().
2984 */
2985 /* XXX - would it be OK to generate a log record here? */
2986 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2987 if (err)
2988 goto out3;
2989 }
2990
2991 if (mask & (ATTR_ATIME|ATTR_MTIME) ||
2992 ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2993 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2994 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2995 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2996 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2997 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2998 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2999 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
3000 skipaclchk, cr);
3001 }
3002
3003 if (mask & (ATTR_UID|ATTR_GID)) {
3004 int idmask = (mask & (ATTR_UID|ATTR_GID));
3005 int take_owner;
3006 int take_group;
3007
3008 /*
3009 * NOTE: even if a new mode is being set,
3010 * we may clear S_ISUID/S_ISGID bits.
3011 */
3012
3013 if (!(mask & ATTR_MODE))
3014 vap->va_mode = zp->z_mode;
3015
3016 /*
3017 * Take ownership or chgrp to group we are a member of
3018 */
3019
3020 take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
3021 take_group = (mask & ATTR_GID) &&
3022 zfs_groupmember(zfsvfs, vap->va_gid, cr);
3023
3024 /*
3025 * If both ATTR_UID and ATTR_GID are set then take_owner and
3026 * take_group must both be set in order to allow taking
3027 * ownership.
3028 *
3029 * Otherwise, send the check through secpolicy_vnode_setattr()
3030 *
3031 */
3032
3033 if (((idmask == (ATTR_UID|ATTR_GID)) &&
3034 take_owner && take_group) ||
3035 ((idmask == ATTR_UID) && take_owner) ||
3036 ((idmask == ATTR_GID) && take_group)) {
3037 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
3038 skipaclchk, cr) == 0) {
3039 /*
3040 * Remove setuid/setgid for non-privileged users
3041 */
3042 (void) secpolicy_setid_clear(vap, cr);
3043 trim_mask = (mask & (ATTR_UID|ATTR_GID));
3044 } else {
3045 need_policy = TRUE;
3046 }
3047 } else {
3048 need_policy = TRUE;
3049 }
3050 }
3051
3052 mutex_enter(&zp->z_lock);
3053 oldva.va_mode = zp->z_mode;
3054 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
3055 if (mask & ATTR_XVATTR) {
3056 /*
3057 * Update xvattr mask to include only those attributes
3058 * that are actually changing.
3059 *
3060 * the bits will be restored prior to actually setting
3061 * the attributes so the caller thinks they were set.
3062 */
3063 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
3064 if (xoap->xoa_appendonly !=
3065 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
3066 need_policy = TRUE;
3067 } else {
3068 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
3069 XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
3070 }
3071 }
3072
3073 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
3074 if (xoap->xoa_projinherit !=
3075 ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
3076 need_policy = TRUE;
3077 } else {
3078 XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
3079 XVA_SET_REQ(tmpxvattr, XAT_PROJINHERIT);
3080 }
3081 }
3082
3083 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
3084 if (xoap->xoa_nounlink !=
3085 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
3086 need_policy = TRUE;
3087 } else {
3088 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
3089 XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
3090 }
3091 }
3092
3093 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
3094 if (xoap->xoa_immutable !=
3095 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
3096 need_policy = TRUE;
3097 } else {
3098 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
3099 XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
3100 }
3101 }
3102
3103 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
3104 if (xoap->xoa_nodump !=
3105 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
3106 need_policy = TRUE;
3107 } else {
3108 XVA_CLR_REQ(xvap, XAT_NODUMP);
3109 XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
3110 }
3111 }
3112
3113 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
3114 if (xoap->xoa_av_modified !=
3115 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
3116 need_policy = TRUE;
3117 } else {
3118 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
3119 XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
3120 }
3121 }
3122
3123 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
3124 if ((!S_ISREG(ip->i_mode) &&
3125 xoap->xoa_av_quarantined) ||
3126 xoap->xoa_av_quarantined !=
3127 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
3128 need_policy = TRUE;
3129 } else {
3130 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
3131 XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
3132 }
3133 }
3134
3135 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
3136 mutex_exit(&zp->z_lock);
3137 err = SET_ERROR(EPERM);
3138 goto out3;
3139 }
3140
3141 if (need_policy == FALSE &&
3142 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
3143 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
3144 need_policy = TRUE;
3145 }
3146 }
3147
3148 mutex_exit(&zp->z_lock);
3149
3150 if (mask & ATTR_MODE) {
3151 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
3152 err = secpolicy_setid_setsticky_clear(ip, vap,
3153 &oldva, cr);
3154 if (err)
3155 goto out3;
3156
3157 trim_mask |= ATTR_MODE;
3158 } else {
3159 need_policy = TRUE;
3160 }
3161 }
3162
3163 if (need_policy) {
3164 /*
3165 * If trim_mask is set then take ownership
3166 * has been granted or write_acl is present and user
3167 * has the ability to modify mode. In that case remove
3168 * UID|GID and or MODE from mask so that
3169 * secpolicy_vnode_setattr() doesn't revoke it.
3170 */
3171
3172 if (trim_mask) {
3173 saved_mask = vap->va_mask;
3174 vap->va_mask &= ~trim_mask;
3175 }
3176 err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
3177 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
3178 if (err)
3179 goto out3;
3180
3181 if (trim_mask)
3182 vap->va_mask |= saved_mask;
3183 }
3184
3185 /*
3186 * secpolicy_vnode_setattr, or take ownership may have
3187 * changed va_mask
3188 */
3189 mask = vap->va_mask;
3190
3191 if ((mask & (ATTR_UID | ATTR_GID)) || projid != ZFS_INVALID_PROJID) {
3192 handle_eadir = B_TRUE;
3193 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
3194 &xattr_obj, sizeof (xattr_obj));
3195
3196 if (err == 0 && xattr_obj) {
3197 err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
3198 if (err)
3199 goto out2;
3200 }
3201 if (mask & ATTR_UID) {
3202 new_kuid = zfs_fuid_create(zfsvfs,
3203 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
3204 if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
3205 zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
3206 new_kuid)) {
3207 if (attrzp)
3208 iput(ZTOI(attrzp));
3209 err = SET_ERROR(EDQUOT);
3210 goto out2;
3211 }
3212 }
3213
3214 if (mask & ATTR_GID) {
3215 new_kgid = zfs_fuid_create(zfsvfs,
3216 (uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
3217 if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
3218 zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
3219 new_kgid)) {
3220 if (attrzp)
3221 iput(ZTOI(attrzp));
3222 err = SET_ERROR(EDQUOT);
3223 goto out2;
3224 }
3225 }
3226
3227 if (projid != ZFS_INVALID_PROJID &&
3228 zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
3229 if (attrzp)
3230 iput(ZTOI(attrzp));
3231 err = EDQUOT;
3232 goto out2;
3233 }
3234 }
3235 tx = dmu_tx_create(os);
3236
3237 if (mask & ATTR_MODE) {
3238 uint64_t pmode = zp->z_mode;
3239 uint64_t acl_obj;
3240 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
3241
3242 zfs_acl_chmod_setattr(zp, &aclp, new_mode);
3243
3244 mutex_enter(&zp->z_lock);
3245 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
3246 /*
3247 * Are we upgrading ACL from old V0 format
3248 * to V1 format?
3249 */
3250 if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
3251 zfs_znode_acl_version(zp) ==
3252 ZFS_ACL_VERSION_INITIAL) {
3253 dmu_tx_hold_free(tx, acl_obj, 0,
3254 DMU_OBJECT_END);
3255 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3256 0, aclp->z_acl_bytes);
3257 } else {
3258 dmu_tx_hold_write(tx, acl_obj, 0,
3259 aclp->z_acl_bytes);
3260 }
3261 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3262 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3263 0, aclp->z_acl_bytes);
3264 }
3265 mutex_exit(&zp->z_lock);
3266 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3267 } else {
3268 if (((mask & ATTR_XVATTR) &&
3269 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
3270 (projid != ZFS_INVALID_PROJID &&
3271 !(zp->z_pflags & ZFS_PROJID)))
3272 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3273 else
3274 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3275 }
3276
3277 if (attrzp) {
3278 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
3279 }
3280
3281 fuid_dirtied = zfsvfs->z_fuid_dirty;
3282 if (fuid_dirtied)
3283 zfs_fuid_txhold(zfsvfs, tx);
3284
3285 zfs_sa_upgrade_txholds(tx, zp);
3286
3287 err = dmu_tx_assign(tx, TXG_WAIT);
3288 if (err)
3289 goto out;
3290
3291 count = 0;
3292 /*
3293 * Set each attribute requested.
3294 * We group settings according to the locks they need to acquire.
3295 *
3296 * Note: you cannot set ctime directly, although it will be
3297 * updated as a side-effect of calling this function.
3298 */
3299
3300 if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
3301 /*
3302 * For the existed object that is upgraded from old system,
3303 * its on-disk layout has no slot for the project ID attribute.
3304 * But quota accounting logic needs to access related slots by
3305 * offset directly. So we need to adjust old objects' layout
3306 * to make the project ID to some unified and fixed offset.
3307 */
3308 if (attrzp)
3309 err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
3310 if (err == 0)
3311 err = sa_add_projid(zp->z_sa_hdl, tx, projid);
3312
3313 if (unlikely(err == EEXIST))
3314 err = 0;
3315 else if (err != 0)
3316 goto out;
3317 else
3318 projid = ZFS_INVALID_PROJID;
3319 }
3320
3321 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3322 mutex_enter(&zp->z_acl_lock);
3323 mutex_enter(&zp->z_lock);
3324
3325 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3326 &zp->z_pflags, sizeof (zp->z_pflags));
3327
3328 if (attrzp) {
3329 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3330 mutex_enter(&attrzp->z_acl_lock);
3331 mutex_enter(&attrzp->z_lock);
3332 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3333 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3334 sizeof (attrzp->z_pflags));
3335 if (projid != ZFS_INVALID_PROJID) {
3336 attrzp->z_projid = projid;
3337 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3338 SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
3339 sizeof (attrzp->z_projid));
3340 }
3341 }
3342
3343 if (mask & (ATTR_UID|ATTR_GID)) {
3344
3345 if (mask & ATTR_UID) {
3346 ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid);
3347 new_uid = zfs_uid_read(ZTOI(zp));
3348 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3349 &new_uid, sizeof (new_uid));
3350 if (attrzp) {
3351 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3352 SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3353 sizeof (new_uid));
3354 ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid);
3355 }
3356 }
3357
3358 if (mask & ATTR_GID) {
3359 ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid);
3360 new_gid = zfs_gid_read(ZTOI(zp));
3361 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3362 NULL, &new_gid, sizeof (new_gid));
3363 if (attrzp) {
3364 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3365 SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3366 sizeof (new_gid));
3367 ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid);
3368 }
3369 }
3370 if (!(mask & ATTR_MODE)) {
3371 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3372 NULL, &new_mode, sizeof (new_mode));
3373 new_mode = zp->z_mode;
3374 }
3375 err = zfs_acl_chown_setattr(zp);
3376 ASSERT(err == 0);
3377 if (attrzp) {
3378 err = zfs_acl_chown_setattr(attrzp);
3379 ASSERT(err == 0);
3380 }
3381 }
3382
3383 if (mask & ATTR_MODE) {
3384 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3385 &new_mode, sizeof (new_mode));
3386 zp->z_mode = ZTOI(zp)->i_mode = new_mode;
3387 ASSERT3P(aclp, !=, NULL);
3388 err = zfs_aclset_common(zp, aclp, cr, tx);
3389 ASSERT0(err);
3390 if (zp->z_acl_cached)
3391 zfs_acl_free(zp->z_acl_cached);
3392 zp->z_acl_cached = aclp;
3393 aclp = NULL;
3394 }
3395
3396 if ((mask & ATTR_ATIME) || zp->z_atime_dirty) {
3397 zp->z_atime_dirty = 0;
3398 ZFS_TIME_ENCODE(&ip->i_atime, atime);
3399 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3400 &atime, sizeof (atime));
3401 }
3402
3403 if (mask & (ATTR_MTIME | ATTR_SIZE)) {
3404 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3405 ZTOI(zp)->i_mtime = zpl_inode_timespec_trunc(vap->va_mtime,
3406 ZTOI(zp)->i_sb->s_time_gran);
3407
3408 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3409 mtime, sizeof (mtime));
3410 }
3411
3412 if (mask & (ATTR_CTIME | ATTR_SIZE)) {
3413 ZFS_TIME_ENCODE(&vap->va_ctime, ctime);
3414 ZTOI(zp)->i_ctime = zpl_inode_timespec_trunc(vap->va_ctime,
3415 ZTOI(zp)->i_sb->s_time_gran);
3416 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3417 ctime, sizeof (ctime));
3418 }
3419
3420 if (projid != ZFS_INVALID_PROJID) {
3421 zp->z_projid = projid;
3422 SA_ADD_BULK_ATTR(bulk, count,
3423 SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
3424 sizeof (zp->z_projid));
3425 }
3426
3427 if (attrzp && mask) {
3428 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3429 SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
3430 sizeof (ctime));
3431 }
3432
3433 /*
3434 * Do this after setting timestamps to prevent timestamp
3435 * update from toggling bit
3436 */
3437
3438 if (xoap && (mask & ATTR_XVATTR)) {
3439
3440 /*
3441 * restore trimmed off masks
3442 * so that return masks can be set for caller.
3443 */
3444
3445 if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
3446 XVA_SET_REQ(xvap, XAT_APPENDONLY);
3447 }
3448 if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
3449 XVA_SET_REQ(xvap, XAT_NOUNLINK);
3450 }
3451 if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
3452 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3453 }
3454 if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
3455 XVA_SET_REQ(xvap, XAT_NODUMP);
3456 }
3457 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
3458 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3459 }
3460 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
3461 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3462 }
3463 if (XVA_ISSET_REQ(tmpxvattr, XAT_PROJINHERIT)) {
3464 XVA_SET_REQ(xvap, XAT_PROJINHERIT);
3465 }
3466
3467 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3468 ASSERT(S_ISREG(ip->i_mode));
3469
3470 zfs_xvattr_set(zp, xvap, tx);
3471 }
3472
3473 if (fuid_dirtied)
3474 zfs_fuid_sync(zfsvfs, tx);
3475
3476 if (mask != 0)
3477 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3478
3479 mutex_exit(&zp->z_lock);
3480 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3481 mutex_exit(&zp->z_acl_lock);
3482
3483 if (attrzp) {
3484 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3485 mutex_exit(&attrzp->z_acl_lock);
3486 mutex_exit(&attrzp->z_lock);
3487 }
3488 out:
3489 if (err == 0 && xattr_count > 0) {
3490 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3491 xattr_count, tx);
3492 ASSERT(err2 == 0);
3493 }
3494
3495 if (aclp)
3496 zfs_acl_free(aclp);
3497
3498 if (fuidp) {
3499 zfs_fuid_info_free(fuidp);
3500 fuidp = NULL;
3501 }
3502
3503 if (err) {
3504 dmu_tx_abort(tx);
3505 if (attrzp)
3506 iput(ZTOI(attrzp));
3507 if (err == ERESTART)
3508 goto top;
3509 } else {
3510 if (count > 0)
3511 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3512 dmu_tx_commit(tx);
3513 if (attrzp) {
3514 if (err2 == 0 && handle_eadir)
3515 err2 = zfs_setattr_dir(attrzp);
3516 iput(ZTOI(attrzp));
3517 }
3518 zfs_inode_update(zp);
3519 }
3520
3521 out2:
3522 if (os->os_sync == ZFS_SYNC_ALWAYS)
3523 zil_commit(zilog, 0);
3524
3525 out3:
3526 kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks);
3527 kmem_free(bulk, sizeof (sa_bulk_attr_t) * bulks);
3528 kmem_free(tmpxvattr, sizeof (xvattr_t));
3529 ZFS_EXIT(zfsvfs);
3530 return (err);
3531 }
3532
3533 typedef struct zfs_zlock {
3534 krwlock_t *zl_rwlock; /* lock we acquired */
3535 znode_t *zl_znode; /* znode we held */
3536 struct zfs_zlock *zl_next; /* next in list */
3537 } zfs_zlock_t;
3538
3539 /*
3540 * Drop locks and release vnodes that were held by zfs_rename_lock().
3541 */
3542 static void
3543 zfs_rename_unlock(zfs_zlock_t **zlpp)
3544 {
3545 zfs_zlock_t *zl;
3546
3547 while ((zl = *zlpp) != NULL) {
3548 if (zl->zl_znode != NULL)
3549 zfs_iput_async(ZTOI(zl->zl_znode));
3550 rw_exit(zl->zl_rwlock);
3551 *zlpp = zl->zl_next;
3552 kmem_free(zl, sizeof (*zl));
3553 }
3554 }
3555
3556 /*
3557 * Search back through the directory tree, using the ".." entries.
3558 * Lock each directory in the chain to prevent concurrent renames.
3559 * Fail any attempt to move a directory into one of its own descendants.
3560 * XXX - z_parent_lock can overlap with map or grow locks
3561 */
3562 static int
3563 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3564 {
3565 zfs_zlock_t *zl;
3566 znode_t *zp = tdzp;
3567 uint64_t rootid = ZTOZSB(zp)->z_root;
3568 uint64_t oidp = zp->z_id;
3569 krwlock_t *rwlp = &szp->z_parent_lock;
3570 krw_t rw = RW_WRITER;
3571
3572 /*
3573 * First pass write-locks szp and compares to zp->z_id.
3574 * Later passes read-lock zp and compare to zp->z_parent.
3575 */
3576 do {
3577 if (!rw_tryenter(rwlp, rw)) {
3578 /*
3579 * Another thread is renaming in this path.
3580 * Note that if we are a WRITER, we don't have any
3581 * parent_locks held yet.
3582 */
3583 if (rw == RW_READER && zp->z_id > szp->z_id) {
3584 /*
3585 * Drop our locks and restart
3586 */
3587 zfs_rename_unlock(&zl);
3588 *zlpp = NULL;
3589 zp = tdzp;
3590 oidp = zp->z_id;
3591 rwlp = &szp->z_parent_lock;
3592 rw = RW_WRITER;
3593 continue;
3594 } else {
3595 /*
3596 * Wait for other thread to drop its locks
3597 */
3598 rw_enter(rwlp, rw);
3599 }
3600 }
3601
3602 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3603 zl->zl_rwlock = rwlp;
3604 zl->zl_znode = NULL;
3605 zl->zl_next = *zlpp;
3606 *zlpp = zl;
3607
3608 if (oidp == szp->z_id) /* We're a descendant of szp */
3609 return (SET_ERROR(EINVAL));
3610
3611 if (oidp == rootid) /* We've hit the top */
3612 return (0);
3613
3614 if (rw == RW_READER) { /* i.e. not the first pass */
3615 int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
3616 if (error)
3617 return (error);
3618 zl->zl_znode = zp;
3619 }
3620 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
3621 &oidp, sizeof (oidp));
3622 rwlp = &zp->z_parent_lock;
3623 rw = RW_READER;
3624
3625 } while (zp->z_id != sdzp->z_id);
3626
3627 return (0);
3628 }
3629
3630 /*
3631 * Move an entry from the provided source directory to the target
3632 * directory. Change the entry name as indicated.
3633 *
3634 * IN: sdip - Source directory containing the "old entry".
3635 * snm - Old entry name.
3636 * tdip - Target directory to contain the "new entry".
3637 * tnm - New entry name.
3638 * cr - credentials of caller.
3639 * flags - case flags
3640 *
3641 * RETURN: 0 on success, error code on failure.
3642 *
3643 * Timestamps:
3644 * sdip,tdip - ctime|mtime updated
3645 */
3646 /*ARGSUSED*/
3647 int
3648 zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
3649 cred_t *cr, int flags)
3650 {
3651 znode_t *tdzp, *szp, *tzp;
3652 znode_t *sdzp = ITOZ(sdip);
3653 zfsvfs_t *zfsvfs = ITOZSB(sdip);
3654 zilog_t *zilog;
3655 zfs_dirlock_t *sdl, *tdl;
3656 dmu_tx_t *tx;
3657 zfs_zlock_t *zl;
3658 int cmp, serr, terr;
3659 int error = 0;
3660 int zflg = 0;
3661 boolean_t waited = B_FALSE;
3662
3663 if (snm == NULL || tnm == NULL)
3664 return (SET_ERROR(EINVAL));
3665
3666 ZFS_ENTER(zfsvfs);
3667 ZFS_VERIFY_ZP(sdzp);
3668 zilog = zfsvfs->z_log;
3669
3670 tdzp = ITOZ(tdip);
3671 ZFS_VERIFY_ZP(tdzp);
3672
3673 /*
3674 * We check i_sb because snapshots and the ctldir must have different
3675 * super blocks.
3676 */
3677 if (tdip->i_sb != sdip->i_sb || zfsctl_is_node(tdip)) {
3678 ZFS_EXIT(zfsvfs);
3679 return (SET_ERROR(EXDEV));
3680 }
3681
3682 if (zfsvfs->z_utf8 && u8_validate(tnm,
3683 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3684 ZFS_EXIT(zfsvfs);
3685 return (SET_ERROR(EILSEQ));
3686 }
3687
3688 if (flags & FIGNORECASE)
3689 zflg |= ZCILOOK;
3690
3691 top:
3692 szp = NULL;
3693 tzp = NULL;
3694 zl = NULL;
3695
3696 /*
3697 * This is to prevent the creation of links into attribute space
3698 * by renaming a linked file into/outof an attribute directory.
3699 * See the comment in zfs_link() for why this is considered bad.
3700 */
3701 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3702 ZFS_EXIT(zfsvfs);
3703 return (SET_ERROR(EINVAL));
3704 }
3705
3706 /*
3707 * Lock source and target directory entries. To prevent deadlock,
3708 * a lock ordering must be defined. We lock the directory with
3709 * the smallest object id first, or if it's a tie, the one with
3710 * the lexically first name.
3711 */
3712 if (sdzp->z_id < tdzp->z_id) {
3713 cmp = -1;
3714 } else if (sdzp->z_id > tdzp->z_id) {
3715 cmp = 1;
3716 } else {
3717 /*
3718 * First compare the two name arguments without
3719 * considering any case folding.
3720 */
3721 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
3722
3723 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3724 ASSERT(error == 0 || !zfsvfs->z_utf8);
3725 if (cmp == 0) {
3726 /*
3727 * POSIX: "If the old argument and the new argument
3728 * both refer to links to the same existing file,
3729 * the rename() function shall return successfully
3730 * and perform no other action."
3731 */
3732 ZFS_EXIT(zfsvfs);
3733 return (0);
3734 }
3735 /*
3736 * If the file system is case-folding, then we may
3737 * have some more checking to do. A case-folding file
3738 * system is either supporting mixed case sensitivity
3739 * access or is completely case-insensitive. Note
3740 * that the file system is always case preserving.
3741 *
3742 * In mixed sensitivity mode case sensitive behavior
3743 * is the default. FIGNORECASE must be used to
3744 * explicitly request case insensitive behavior.
3745 *
3746 * If the source and target names provided differ only
3747 * by case (e.g., a request to rename 'tim' to 'Tim'),
3748 * we will treat this as a special case in the
3749 * case-insensitive mode: as long as the source name
3750 * is an exact match, we will allow this to proceed as
3751 * a name-change request.
3752 */
3753 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
3754 (zfsvfs->z_case == ZFS_CASE_MIXED &&
3755 flags & FIGNORECASE)) &&
3756 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
3757 &error) == 0) {
3758 /*
3759 * case preserving rename request, require exact
3760 * name matches
3761 */
3762 zflg |= ZCIEXACT;
3763 zflg &= ~ZCILOOK;
3764 }
3765 }
3766
3767 /*
3768 * If the source and destination directories are the same, we should
3769 * grab the z_name_lock of that directory only once.
3770 */
3771 if (sdzp == tdzp) {
3772 zflg |= ZHAVELOCK;
3773 rw_enter(&sdzp->z_name_lock, RW_READER);
3774 }
3775
3776 if (cmp < 0) {
3777 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3778 ZEXISTS | zflg, NULL, NULL);
3779 terr = zfs_dirent_lock(&tdl,
3780 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3781 } else {
3782 terr = zfs_dirent_lock(&tdl,
3783 tdzp, tnm, &tzp, zflg, NULL, NULL);
3784 serr = zfs_dirent_lock(&sdl,
3785 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3786 NULL, NULL);
3787 }
3788
3789 if (serr) {
3790 /*
3791 * Source entry invalid or not there.
3792 */
3793 if (!terr) {
3794 zfs_dirent_unlock(tdl);
3795 if (tzp)
3796 iput(ZTOI(tzp));
3797 }
3798
3799 if (sdzp == tdzp)
3800 rw_exit(&sdzp->z_name_lock);
3801
3802 if (strcmp(snm, "..") == 0)
3803 serr = EINVAL;
3804 ZFS_EXIT(zfsvfs);
3805 return (serr);
3806 }
3807 if (terr) {
3808 zfs_dirent_unlock(sdl);
3809 iput(ZTOI(szp));
3810
3811 if (sdzp == tdzp)
3812 rw_exit(&sdzp->z_name_lock);
3813
3814 if (strcmp(tnm, "..") == 0)
3815 terr = EINVAL;
3816 ZFS_EXIT(zfsvfs);
3817 return (terr);
3818 }
3819
3820 /*
3821 * If we are using project inheritance, means if the directory has
3822 * ZFS_PROJINHERIT set, then its descendant directories will inherit
3823 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
3824 * such case, we only allow renames into our tree when the project
3825 * IDs are the same.
3826 */
3827 if (tdzp->z_pflags & ZFS_PROJINHERIT &&
3828 tdzp->z_projid != szp->z_projid) {
3829 error = SET_ERROR(EXDEV);
3830 goto out;
3831 }
3832
3833 /*
3834 * Must have write access at the source to remove the old entry
3835 * and write access at the target to create the new entry.
3836 * Note that if target and source are the same, this can be
3837 * done in a single check.
3838 */
3839
3840 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
3841 goto out;
3842
3843 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3844 /*
3845 * Check to make sure rename is valid.
3846 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3847 */
3848 if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
3849 goto out;
3850 }
3851
3852 /*
3853 * Does target exist?
3854 */
3855 if (tzp) {
3856 /*
3857 * Source and target must be the same type.
3858 */
3859 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3860 if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
3861 error = SET_ERROR(ENOTDIR);
3862 goto out;
3863 }
3864 } else {
3865 if (S_ISDIR(ZTOI(tzp)->i_mode)) {
3866 error = SET_ERROR(EISDIR);
3867 goto out;
3868 }
3869 }
3870 /*
3871 * POSIX dictates that when the source and target
3872 * entries refer to the same file object, rename
3873 * must do nothing and exit without error.
3874 */
3875 if (szp->z_id == tzp->z_id) {
3876 error = 0;
3877 goto out;
3878 }
3879 }
3880
3881 tx = dmu_tx_create(zfsvfs->z_os);
3882 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3883 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3884 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3885 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3886 if (sdzp != tdzp) {
3887 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3888 zfs_sa_upgrade_txholds(tx, tdzp);
3889 }
3890 if (tzp) {
3891 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3892 zfs_sa_upgrade_txholds(tx, tzp);
3893 }
3894
3895 zfs_sa_upgrade_txholds(tx, szp);
3896 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3897 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
3898 if (error) {
3899 if (zl != NULL)
3900 zfs_rename_unlock(&zl);
3901 zfs_dirent_unlock(sdl);
3902 zfs_dirent_unlock(tdl);
3903
3904 if (sdzp == tdzp)
3905 rw_exit(&sdzp->z_name_lock);
3906
3907 if (error == ERESTART) {
3908 waited = B_TRUE;
3909 dmu_tx_wait(tx);
3910 dmu_tx_abort(tx);
3911 iput(ZTOI(szp));
3912 if (tzp)
3913 iput(ZTOI(tzp));
3914 goto top;
3915 }
3916 dmu_tx_abort(tx);
3917 iput(ZTOI(szp));
3918 if (tzp)
3919 iput(ZTOI(tzp));
3920 ZFS_EXIT(zfsvfs);
3921 return (error);
3922 }
3923
3924 if (tzp) /* Attempt to remove the existing target */
3925 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3926
3927 if (error == 0) {
3928 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3929 if (error == 0) {
3930 szp->z_pflags |= ZFS_AV_MODIFIED;
3931 if (tdzp->z_pflags & ZFS_PROJINHERIT)
3932 szp->z_pflags |= ZFS_PROJINHERIT;
3933
3934 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3935 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3936 ASSERT0(error);
3937
3938 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3939 if (error == 0) {
3940 zfs_log_rename(zilog, tx, TX_RENAME |
3941 (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3942 sdl->dl_name, tdzp, tdl->dl_name, szp);
3943 } else {
3944 /*
3945 * At this point, we have successfully created
3946 * the target name, but have failed to remove
3947 * the source name. Since the create was done
3948 * with the ZRENAMING flag, there are
3949 * complications; for one, the link count is
3950 * wrong. The easiest way to deal with this
3951 * is to remove the newly created target, and
3952 * return the original error. This must
3953 * succeed; fortunately, it is very unlikely to
3954 * fail, since we just created it.
3955 */
3956 VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3957 ZRENAMING, NULL), ==, 0);
3958 }
3959 } else {
3960 /*
3961 * If we had removed the existing target, subsequent
3962 * call to zfs_link_create() to add back the same entry
3963 * but, the new dnode (szp) should not fail.
3964 */
3965 ASSERT(tzp == NULL);
3966 }
3967 }
3968
3969 dmu_tx_commit(tx);
3970 out:
3971 if (zl != NULL)
3972 zfs_rename_unlock(&zl);
3973
3974 zfs_dirent_unlock(sdl);
3975 zfs_dirent_unlock(tdl);
3976
3977 zfs_inode_update(sdzp);
3978 if (sdzp == tdzp)
3979 rw_exit(&sdzp->z_name_lock);
3980
3981 if (sdzp != tdzp)
3982 zfs_inode_update(tdzp);
3983
3984 zfs_inode_update(szp);
3985 iput(ZTOI(szp));
3986 if (tzp) {
3987 zfs_inode_update(tzp);
3988 iput(ZTOI(tzp));
3989 }
3990
3991 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3992 zil_commit(zilog, 0);
3993
3994 ZFS_EXIT(zfsvfs);
3995 return (error);
3996 }
3997
3998 /*
3999 * Insert the indicated symbolic reference entry into the directory.
4000 *
4001 * IN: dip - Directory to contain new symbolic link.
4002 * link - Name for new symlink entry.
4003 * vap - Attributes of new entry.
4004 * target - Target path of new symlink.
4005 *
4006 * cr - credentials of caller.
4007 * flags - case flags
4008 *
4009 * RETURN: 0 on success, error code on failure.
4010 *
4011 * Timestamps:
4012 * dip - ctime|mtime updated
4013 */
4014 /*ARGSUSED*/
4015 int
4016 zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
4017 struct inode **ipp, cred_t *cr, int flags)
4018 {
4019 znode_t *zp, *dzp = ITOZ(dip);
4020 zfs_dirlock_t *dl;
4021 dmu_tx_t *tx;
4022 zfsvfs_t *zfsvfs = ITOZSB(dip);
4023 zilog_t *zilog;
4024 uint64_t len = strlen(link);
4025 int error;
4026 int zflg = ZNEW;
4027 zfs_acl_ids_t acl_ids;
4028 boolean_t fuid_dirtied;
4029 uint64_t txtype = TX_SYMLINK;
4030 boolean_t waited = B_FALSE;
4031
4032 ASSERT(S_ISLNK(vap->va_mode));
4033
4034 if (name == NULL)
4035 return (SET_ERROR(EINVAL));
4036
4037 ZFS_ENTER(zfsvfs);
4038 ZFS_VERIFY_ZP(dzp);
4039 zilog = zfsvfs->z_log;
4040
4041 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
4042 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4043 ZFS_EXIT(zfsvfs);
4044 return (SET_ERROR(EILSEQ));
4045 }
4046 if (flags & FIGNORECASE)
4047 zflg |= ZCILOOK;
4048
4049 if (len > MAXPATHLEN) {
4050 ZFS_EXIT(zfsvfs);
4051 return (SET_ERROR(ENAMETOOLONG));
4052 }
4053
4054 if ((error = zfs_acl_ids_create(dzp, 0,
4055 vap, cr, NULL, &acl_ids)) != 0) {
4056 ZFS_EXIT(zfsvfs);
4057 return (error);
4058 }
4059 top:
4060 *ipp = NULL;
4061
4062 /*
4063 * Attempt to lock directory; fail if entry already exists.
4064 */
4065 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
4066 if (error) {
4067 zfs_acl_ids_free(&acl_ids);
4068 ZFS_EXIT(zfsvfs);
4069 return (error);
4070 }
4071
4072 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
4073 zfs_acl_ids_free(&acl_ids);
4074 zfs_dirent_unlock(dl);
4075 ZFS_EXIT(zfsvfs);
4076 return (error);
4077 }
4078
4079 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, ZFS_DEFAULT_PROJID)) {
4080 zfs_acl_ids_free(&acl_ids);
4081 zfs_dirent_unlock(dl);
4082 ZFS_EXIT(zfsvfs);
4083 return (SET_ERROR(EDQUOT));
4084 }
4085 tx = dmu_tx_create(zfsvfs->z_os);
4086 fuid_dirtied = zfsvfs->z_fuid_dirty;
4087 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
4088 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4089 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
4090 ZFS_SA_BASE_ATTR_SIZE + len);
4091 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
4092 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
4093 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
4094 acl_ids.z_aclp->z_acl_bytes);
4095 }
4096 if (fuid_dirtied)
4097 zfs_fuid_txhold(zfsvfs, tx);
4098 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
4099 if (error) {
4100 zfs_dirent_unlock(dl);
4101 if (error == ERESTART) {
4102 waited = B_TRUE;
4103 dmu_tx_wait(tx);
4104 dmu_tx_abort(tx);
4105 goto top;
4106 }
4107 zfs_acl_ids_free(&acl_ids);
4108 dmu_tx_abort(tx);
4109 ZFS_EXIT(zfsvfs);
4110 return (error);
4111 }
4112
4113 /*
4114 * Create a new object for the symlink.
4115 * for version 4 ZPL datsets the symlink will be an SA attribute
4116 */
4117 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
4118
4119 if (fuid_dirtied)
4120 zfs_fuid_sync(zfsvfs, tx);
4121
4122 mutex_enter(&zp->z_lock);
4123 if (zp->z_is_sa)
4124 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
4125 link, len, tx);
4126 else
4127 zfs_sa_symlink(zp, link, len, tx);
4128 mutex_exit(&zp->z_lock);
4129
4130 zp->z_size = len;
4131 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
4132 &zp->z_size, sizeof (zp->z_size), tx);
4133 /*
4134 * Insert the new object into the directory.
4135 */
4136 error = zfs_link_create(dl, zp, tx, ZNEW);
4137 if (error != 0) {
4138 zfs_znode_delete(zp, tx);
4139 remove_inode_hash(ZTOI(zp));
4140 } else {
4141 if (flags & FIGNORECASE)
4142 txtype |= TX_CI;
4143 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
4144
4145 zfs_inode_update(dzp);
4146 zfs_inode_update(zp);
4147 }
4148
4149 zfs_acl_ids_free(&acl_ids);
4150
4151 dmu_tx_commit(tx);
4152
4153 zfs_dirent_unlock(dl);
4154
4155 if (error == 0) {
4156 *ipp = ZTOI(zp);
4157
4158 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4159 zil_commit(zilog, 0);
4160 } else {
4161 iput(ZTOI(zp));
4162 }
4163
4164 ZFS_EXIT(zfsvfs);
4165 return (error);
4166 }
4167
4168 /*
4169 * Return, in the buffer contained in the provided uio structure,
4170 * the symbolic path referred to by ip.
4171 *
4172 * IN: ip - inode of symbolic link
4173 * uio - structure to contain the link path.
4174 * cr - credentials of caller.
4175 *
4176 * RETURN: 0 if success
4177 * error code if failure
4178 *
4179 * Timestamps:
4180 * ip - atime updated
4181 */
4182 /* ARGSUSED */
4183 int
4184 zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr)
4185 {
4186 znode_t *zp = ITOZ(ip);
4187 zfsvfs_t *zfsvfs = ITOZSB(ip);
4188 int error;
4189
4190 ZFS_ENTER(zfsvfs);
4191 ZFS_VERIFY_ZP(zp);
4192
4193 mutex_enter(&zp->z_lock);
4194 if (zp->z_is_sa)
4195 error = sa_lookup_uio(zp->z_sa_hdl,
4196 SA_ZPL_SYMLINK(zfsvfs), uio);
4197 else
4198 error = zfs_sa_readlink(zp, uio);
4199 mutex_exit(&zp->z_lock);
4200
4201 ZFS_EXIT(zfsvfs);
4202 return (error);
4203 }
4204
4205 /*
4206 * Insert a new entry into directory tdip referencing sip.
4207 *
4208 * IN: tdip - Directory to contain new entry.
4209 * sip - inode of new entry.
4210 * name - name of new entry.
4211 * cr - credentials of caller.
4212 *
4213 * RETURN: 0 if success
4214 * error code if failure
4215 *
4216 * Timestamps:
4217 * tdip - ctime|mtime updated
4218 * sip - ctime updated
4219 */
4220 /* ARGSUSED */
4221 int
4222 zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr,
4223 int flags)
4224 {
4225 znode_t *dzp = ITOZ(tdip);
4226 znode_t *tzp, *szp;
4227 zfsvfs_t *zfsvfs = ITOZSB(tdip);
4228 zilog_t *zilog;
4229 zfs_dirlock_t *dl;
4230 dmu_tx_t *tx;
4231 int error;
4232 int zf = ZNEW;
4233 uint64_t parent;
4234 uid_t owner;
4235 boolean_t waited = B_FALSE;
4236 boolean_t is_tmpfile = 0;
4237 uint64_t txg;
4238 #ifdef HAVE_TMPFILE
4239 is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE));
4240 #endif
4241 ASSERT(S_ISDIR(tdip->i_mode));
4242
4243 if (name == NULL)
4244 return (SET_ERROR(EINVAL));
4245
4246 ZFS_ENTER(zfsvfs);
4247 ZFS_VERIFY_ZP(dzp);
4248 zilog = zfsvfs->z_log;
4249
4250 /*
4251 * POSIX dictates that we return EPERM here.
4252 * Better choices include ENOTSUP or EISDIR.
4253 */
4254 if (S_ISDIR(sip->i_mode)) {
4255 ZFS_EXIT(zfsvfs);
4256 return (SET_ERROR(EPERM));
4257 }
4258
4259 szp = ITOZ(sip);
4260 ZFS_VERIFY_ZP(szp);
4261
4262 /*
4263 * If we are using project inheritance, means if the directory has
4264 * ZFS_PROJINHERIT set, then its descendant directories will inherit
4265 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
4266 * such case, we only allow hard link creation in our tree when the
4267 * project IDs are the same.
4268 */
4269 if (dzp->z_pflags & ZFS_PROJINHERIT && dzp->z_projid != szp->z_projid) {
4270 ZFS_EXIT(zfsvfs);
4271 return (SET_ERROR(EXDEV));
4272 }
4273
4274 /*
4275 * We check i_sb because snapshots and the ctldir must have different
4276 * super blocks.
4277 */
4278 if (sip->i_sb != tdip->i_sb || zfsctl_is_node(sip)) {
4279 ZFS_EXIT(zfsvfs);
4280 return (SET_ERROR(EXDEV));
4281 }
4282
4283 /* Prevent links to .zfs/shares files */
4284
4285 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
4286 &parent, sizeof (uint64_t))) != 0) {
4287 ZFS_EXIT(zfsvfs);
4288 return (error);
4289 }
4290 if (parent == zfsvfs->z_shares_dir) {
4291 ZFS_EXIT(zfsvfs);
4292 return (SET_ERROR(EPERM));
4293 }
4294
4295 if (zfsvfs->z_utf8 && u8_validate(name,
4296 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4297 ZFS_EXIT(zfsvfs);
4298 return (SET_ERROR(EILSEQ));
4299 }
4300 if (flags & FIGNORECASE)
4301 zf |= ZCILOOK;
4302
4303 /*
4304 * We do not support links between attributes and non-attributes
4305 * because of the potential security risk of creating links
4306 * into "normal" file space in order to circumvent restrictions
4307 * imposed in attribute space.
4308 */
4309 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
4310 ZFS_EXIT(zfsvfs);
4311 return (SET_ERROR(EINVAL));
4312 }
4313
4314 owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid),
4315 cr, ZFS_OWNER);
4316 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
4317 ZFS_EXIT(zfsvfs);
4318 return (SET_ERROR(EPERM));
4319 }
4320
4321 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
4322 ZFS_EXIT(zfsvfs);
4323 return (error);
4324 }
4325
4326 top:
4327 /*
4328 * Attempt to lock directory; fail if entry already exists.
4329 */
4330 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
4331 if (error) {
4332 ZFS_EXIT(zfsvfs);
4333 return (error);
4334 }
4335
4336 tx = dmu_tx_create(zfsvfs->z_os);
4337 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
4338 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4339 if (is_tmpfile)
4340 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
4341
4342 zfs_sa_upgrade_txholds(tx, szp);
4343 zfs_sa_upgrade_txholds(tx, dzp);
4344 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
4345 if (error) {
4346 zfs_dirent_unlock(dl);
4347 if (error == ERESTART) {
4348 waited = B_TRUE;
4349 dmu_tx_wait(tx);
4350 dmu_tx_abort(tx);
4351 goto top;
4352 }
4353 dmu_tx_abort(tx);
4354 ZFS_EXIT(zfsvfs);
4355 return (error);
4356 }
4357 /* unmark z_unlinked so zfs_link_create will not reject */
4358 if (is_tmpfile)
4359 szp->z_unlinked = 0;
4360 error = zfs_link_create(dl, szp, tx, 0);
4361
4362 if (error == 0) {
4363 uint64_t txtype = TX_LINK;
4364 /*
4365 * tmpfile is created to be in z_unlinkedobj, so remove it.
4366 * Also, we don't log in ZIL, be cause all previous file
4367 * operation on the tmpfile are ignored by ZIL. Instead we
4368 * always wait for txg to sync to make sure all previous
4369 * operation are sync safe.
4370 */
4371 if (is_tmpfile) {
4372 VERIFY(zap_remove_int(zfsvfs->z_os,
4373 zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
4374 } else {
4375 if (flags & FIGNORECASE)
4376 txtype |= TX_CI;
4377 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4378 }
4379 } else if (is_tmpfile) {
4380 /* restore z_unlinked since when linking failed */
4381 szp->z_unlinked = 1;
4382 }
4383 txg = dmu_tx_get_txg(tx);
4384 dmu_tx_commit(tx);
4385
4386 zfs_dirent_unlock(dl);
4387
4388 if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4389 zil_commit(zilog, 0);
4390
4391 if (is_tmpfile)
4392 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg);
4393
4394 zfs_inode_update(dzp);
4395 zfs_inode_update(szp);
4396 ZFS_EXIT(zfsvfs);
4397 return (error);
4398 }
4399
4400 static void
4401 zfs_putpage_commit_cb(void *arg)
4402 {
4403 struct page *pp = arg;
4404
4405 ClearPageError(pp);
4406 end_page_writeback(pp);
4407 }
4408
4409 /*
4410 * Push a page out to disk, once the page is on stable storage the
4411 * registered commit callback will be run as notification of completion.
4412 *
4413 * IN: ip - page mapped for inode.
4414 * pp - page to push (page is locked)
4415 * wbc - writeback control data
4416 *
4417 * RETURN: 0 if success
4418 * error code if failure
4419 *
4420 * Timestamps:
4421 * ip - ctime|mtime updated
4422 */
4423 /* ARGSUSED */
4424 int
4425 zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
4426 {
4427 znode_t *zp = ITOZ(ip);
4428 zfsvfs_t *zfsvfs = ITOZSB(ip);
4429 loff_t offset;
4430 loff_t pgoff;
4431 unsigned int pglen;
4432 dmu_tx_t *tx;
4433 caddr_t va;
4434 int err = 0;
4435 uint64_t mtime[2], ctime[2];
4436 sa_bulk_attr_t bulk[3];
4437 int cnt = 0;
4438 struct address_space *mapping;
4439
4440 ZFS_ENTER(zfsvfs);
4441 ZFS_VERIFY_ZP(zp);
4442
4443 ASSERT(PageLocked(pp));
4444
4445 pgoff = page_offset(pp); /* Page byte-offset in file */
4446 offset = i_size_read(ip); /* File length in bytes */
4447 pglen = MIN(PAGE_SIZE, /* Page length in bytes */
4448 P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
4449
4450 /* Page is beyond end of file */
4451 if (pgoff >= offset) {
4452 unlock_page(pp);
4453 ZFS_EXIT(zfsvfs);
4454 return (0);
4455 }
4456
4457 /* Truncate page length to end of file */
4458 if (pgoff + pglen > offset)
4459 pglen = offset - pgoff;
4460
4461 #if 0
4462 /*
4463 * FIXME: Allow mmap writes past its quota. The correct fix
4464 * is to register a page_mkwrite() handler to count the page
4465 * against its quota when it is about to be dirtied.
4466 */
4467 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
4468 KUID_TO_SUID(ip->i_uid)) ||
4469 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
4470 KGID_TO_SGID(ip->i_gid)) ||
4471 (zp->z_projid != ZFS_DEFAULT_PROJID &&
4472 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
4473 zp->z_projid))) {
4474 err = EDQUOT;
4475 }
4476 #endif
4477
4478 /*
4479 * The ordering here is critical and must adhere to the following
4480 * rules in order to avoid deadlocking in either zfs_read() or
4481 * zfs_free_range() due to a lock inversion.
4482 *
4483 * 1) The page must be unlocked prior to acquiring the range lock.
4484 * This is critical because zfs_read() calls find_lock_page()
4485 * which may block on the page lock while holding the range lock.
4486 *
4487 * 2) Before setting or clearing write back on a page the range lock
4488 * must be held in order to prevent a lock inversion with the
4489 * zfs_free_range() function.
4490 *
4491 * This presents a problem because upon entering this function the
4492 * page lock is already held. To safely acquire the range lock the
4493 * page lock must be dropped. This creates a window where another
4494 * process could truncate, invalidate, dirty, or write out the page.
4495 *
4496 * Therefore, after successfully reacquiring the range and page locks
4497 * the current page state is checked. In the common case everything
4498 * will be as is expected and it can be written out. However, if
4499 * the page state has changed it must be handled accordingly.
4500 */
4501 mapping = pp->mapping;
4502 redirty_page_for_writepage(wbc, pp);
4503 unlock_page(pp);
4504
4505 locked_range_t *lr = rangelock_enter(&zp->z_rangelock,
4506 pgoff, pglen, RL_WRITER);
4507 lock_page(pp);
4508
4509 /* Page mapping changed or it was no longer dirty, we're done */
4510 if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
4511 unlock_page(pp);
4512 rangelock_exit(lr);
4513 ZFS_EXIT(zfsvfs);
4514 return (0);
4515 }
4516
4517 /* Another process started write block if required */
4518 if (PageWriteback(pp)) {
4519 unlock_page(pp);
4520 rangelock_exit(lr);
4521
4522 if (wbc->sync_mode != WB_SYNC_NONE)
4523 wait_on_page_writeback(pp);
4524
4525 ZFS_EXIT(zfsvfs);
4526 return (0);
4527 }
4528
4529 /* Clear the dirty flag the required locks are held */
4530 if (!clear_page_dirty_for_io(pp)) {
4531 unlock_page(pp);
4532 rangelock_exit(lr);
4533 ZFS_EXIT(zfsvfs);
4534 return (0);
4535 }
4536
4537 /*
4538 * Counterpart for redirty_page_for_writepage() above. This page
4539 * was in fact not skipped and should not be counted as if it were.
4540 */
4541 wbc->pages_skipped--;
4542 set_page_writeback(pp);
4543 unlock_page(pp);
4544
4545 tx = dmu_tx_create(zfsvfs->z_os);
4546 dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
4547 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4548 zfs_sa_upgrade_txholds(tx, zp);
4549
4550 err = dmu_tx_assign(tx, TXG_NOWAIT);
4551 if (err != 0) {
4552 if (err == ERESTART)
4553 dmu_tx_wait(tx);
4554
4555 dmu_tx_abort(tx);
4556 __set_page_dirty_nobuffers(pp);
4557 ClearPageError(pp);
4558 end_page_writeback(pp);
4559 rangelock_exit(lr);
4560 ZFS_EXIT(zfsvfs);
4561 return (err);
4562 }
4563
4564 va = kmap(pp);
4565 ASSERT3U(pglen, <=, PAGE_SIZE);
4566 dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx);
4567 kunmap(pp);
4568
4569 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4570 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
4571 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL,
4572 &zp->z_pflags, 8);
4573
4574 /* Preserve the mtime and ctime provided by the inode */
4575 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4576 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
4577 zp->z_atime_dirty = 0;
4578 zp->z_seq++;
4579
4580 err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4581
4582 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
4583 zfs_putpage_commit_cb, pp);
4584 dmu_tx_commit(tx);
4585
4586 rangelock_exit(lr);
4587
4588 if (wbc->sync_mode != WB_SYNC_NONE) {
4589 /*
4590 * Note that this is rarely called under writepages(), because
4591 * writepages() normally handles the entire commit for
4592 * performance reasons.
4593 */
4594 zil_commit(zfsvfs->z_log, zp->z_id);
4595 }
4596
4597 ZFS_EXIT(zfsvfs);
4598 return (err);
4599 }
4600
4601 /*
4602 * Update the system attributes when the inode has been dirtied. For the
4603 * moment we only update the mode, atime, mtime, and ctime.
4604 */
4605 int
4606 zfs_dirty_inode(struct inode *ip, int flags)
4607 {
4608 znode_t *zp = ITOZ(ip);
4609 zfsvfs_t *zfsvfs = ITOZSB(ip);
4610 dmu_tx_t *tx;
4611 uint64_t mode, atime[2], mtime[2], ctime[2];
4612 sa_bulk_attr_t bulk[4];
4613 int error = 0;
4614 int cnt = 0;
4615
4616 if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
4617 return (0);
4618
4619 ZFS_ENTER(zfsvfs);
4620 ZFS_VERIFY_ZP(zp);
4621
4622 #ifdef I_DIRTY_TIME
4623 /*
4624 * This is the lazytime semantic indroduced in Linux 4.0
4625 * This flag will only be called from update_time when lazytime is set.
4626 * (Note, I_DIRTY_SYNC will also set if not lazytime)
4627 * Fortunately mtime and ctime are managed within ZFS itself, so we
4628 * only need to dirty atime.
4629 */
4630 if (flags == I_DIRTY_TIME) {
4631 zp->z_atime_dirty = 1;
4632 goto out;
4633 }
4634 #endif
4635
4636 tx = dmu_tx_create(zfsvfs->z_os);
4637
4638 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4639 zfs_sa_upgrade_txholds(tx, zp);
4640
4641 error = dmu_tx_assign(tx, TXG_WAIT);
4642 if (error) {
4643 dmu_tx_abort(tx);
4644 goto out;
4645 }
4646
4647 mutex_enter(&zp->z_lock);
4648 zp->z_atime_dirty = 0;
4649
4650 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
4651 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
4652 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4653 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
4654
4655 /* Preserve the mode, mtime and ctime provided by the inode */
4656 ZFS_TIME_ENCODE(&ip->i_atime, atime);
4657 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4658 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
4659 mode = ip->i_mode;
4660
4661 zp->z_mode = mode;
4662
4663 error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4664 mutex_exit(&zp->z_lock);
4665
4666 dmu_tx_commit(tx);
4667 out:
4668 ZFS_EXIT(zfsvfs);
4669 return (error);
4670 }
4671
4672 /*ARGSUSED*/
4673 void
4674 zfs_inactive(struct inode *ip)
4675 {
4676 znode_t *zp = ITOZ(ip);
4677 zfsvfs_t *zfsvfs = ITOZSB(ip);
4678 uint64_t atime[2];
4679 int error;
4680 int need_unlock = 0;
4681
4682 /* Only read lock if we haven't already write locked, e.g. rollback */
4683 if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) {
4684 need_unlock = 1;
4685 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4686 }
4687 if (zp->z_sa_hdl == NULL) {
4688 if (need_unlock)
4689 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4690 return;
4691 }
4692
4693 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4694 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4695
4696 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4697 zfs_sa_upgrade_txholds(tx, zp);
4698 error = dmu_tx_assign(tx, TXG_WAIT);
4699 if (error) {
4700 dmu_tx_abort(tx);
4701 } else {
4702 ZFS_TIME_ENCODE(&ip->i_atime, atime);
4703 mutex_enter(&zp->z_lock);
4704 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4705 (void *)&atime, sizeof (atime), tx);
4706 zp->z_atime_dirty = 0;
4707 mutex_exit(&zp->z_lock);
4708 dmu_tx_commit(tx);
4709 }
4710 }
4711
4712 zfs_zinactive(zp);
4713 if (need_unlock)
4714 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4715 }
4716
4717 /*
4718 * Bounds-check the seek operation.
4719 *
4720 * IN: ip - inode seeking within
4721 * ooff - old file offset
4722 * noffp - pointer to new file offset
4723 * ct - caller context
4724 *
4725 * RETURN: 0 if success
4726 * EINVAL if new offset invalid
4727 */
4728 /* ARGSUSED */
4729 int
4730 zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp)
4731 {
4732 if (S_ISDIR(ip->i_mode))
4733 return (0);
4734 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4735 }
4736
4737 /*
4738 * Fill pages with data from the disk.
4739 */
4740 static int
4741 zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
4742 {
4743 znode_t *zp = ITOZ(ip);
4744 zfsvfs_t *zfsvfs = ITOZSB(ip);
4745 objset_t *os;
4746 struct page *cur_pp;
4747 u_offset_t io_off, total;
4748 size_t io_len;
4749 loff_t i_size;
4750 unsigned page_idx;
4751 int err;
4752
4753 os = zfsvfs->z_os;
4754 io_len = nr_pages << PAGE_SHIFT;
4755 i_size = i_size_read(ip);
4756 io_off = page_offset(pl[0]);
4757
4758 if (io_off + io_len > i_size)
4759 io_len = i_size - io_off;
4760
4761 /*
4762 * Iterate over list of pages and read each page individually.
4763 */
4764 page_idx = 0;
4765 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4766 caddr_t va;
4767
4768 cur_pp = pl[page_idx++];
4769 va = kmap(cur_pp);
4770 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4771 DMU_READ_PREFETCH);
4772 kunmap(cur_pp);
4773 if (err) {
4774 /* convert checksum errors into IO errors */
4775 if (err == ECKSUM)
4776 err = SET_ERROR(EIO);
4777 return (err);
4778 }
4779 }
4780
4781 return (0);
4782 }
4783
4784 /*
4785 * Uses zfs_fillpage to read data from the file and fill the pages.
4786 *
4787 * IN: ip - inode of file to get data from.
4788 * pl - list of pages to read
4789 * nr_pages - number of pages to read
4790 *
4791 * RETURN: 0 on success, error code on failure.
4792 *
4793 * Timestamps:
4794 * vp - atime updated
4795 */
4796 /* ARGSUSED */
4797 int
4798 zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages)
4799 {
4800 znode_t *zp = ITOZ(ip);
4801 zfsvfs_t *zfsvfs = ITOZSB(ip);
4802 int err;
4803
4804 if (pl == NULL)
4805 return (0);
4806
4807 ZFS_ENTER(zfsvfs);
4808 ZFS_VERIFY_ZP(zp);
4809
4810 err = zfs_fillpage(ip, pl, nr_pages);
4811
4812 ZFS_EXIT(zfsvfs);
4813 return (err);
4814 }
4815
4816 /*
4817 * Check ZFS specific permissions to memory map a section of a file.
4818 *
4819 * IN: ip - inode of the file to mmap
4820 * off - file offset
4821 * addrp - start address in memory region
4822 * len - length of memory region
4823 * vm_flags- address flags
4824 *
4825 * RETURN: 0 if success
4826 * error code if failure
4827 */
4828 /*ARGSUSED*/
4829 int
4830 zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
4831 unsigned long vm_flags)
4832 {
4833 znode_t *zp = ITOZ(ip);
4834 zfsvfs_t *zfsvfs = ITOZSB(ip);
4835
4836 ZFS_ENTER(zfsvfs);
4837 ZFS_VERIFY_ZP(zp);
4838
4839 if ((vm_flags & VM_WRITE) && (zp->z_pflags &
4840 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4841 ZFS_EXIT(zfsvfs);
4842 return (SET_ERROR(EPERM));
4843 }
4844
4845 if ((vm_flags & (VM_READ | VM_EXEC)) &&
4846 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4847 ZFS_EXIT(zfsvfs);
4848 return (SET_ERROR(EACCES));
4849 }
4850
4851 if (off < 0 || len > MAXOFFSET_T - off) {
4852 ZFS_EXIT(zfsvfs);
4853 return (SET_ERROR(ENXIO));
4854 }
4855
4856 ZFS_EXIT(zfsvfs);
4857 return (0);
4858 }
4859
4860 /*
4861 * convoff - converts the given data (start, whence) to the
4862 * given whence.
4863 */
4864 int
4865 convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
4866 {
4867 vattr_t vap;
4868 int error;
4869
4870 if ((lckdat->l_whence == SEEK_END) || (whence == SEEK_END)) {
4871 if ((error = zfs_getattr(ip, &vap, 0, CRED())))
4872 return (error);
4873 }
4874
4875 switch (lckdat->l_whence) {
4876 case SEEK_CUR:
4877 lckdat->l_start += offset;
4878 break;
4879 case SEEK_END:
4880 lckdat->l_start += vap.va_size;
4881 /* FALLTHRU */
4882 case SEEK_SET:
4883 break;
4884 default:
4885 return (SET_ERROR(EINVAL));
4886 }
4887
4888 if (lckdat->l_start < 0)
4889 return (SET_ERROR(EINVAL));
4890
4891 switch (whence) {
4892 case SEEK_CUR:
4893 lckdat->l_start -= offset;
4894 break;
4895 case SEEK_END:
4896 lckdat->l_start -= vap.va_size;
4897 /* FALLTHRU */
4898 case SEEK_SET:
4899 break;
4900 default:
4901 return (SET_ERROR(EINVAL));
4902 }
4903
4904 lckdat->l_whence = (short)whence;
4905 return (0);
4906 }
4907
4908 /*
4909 * Free or allocate space in a file. Currently, this function only
4910 * supports the `F_FREESP' command. However, this command is somewhat
4911 * misnamed, as its functionality includes the ability to allocate as
4912 * well as free space.
4913 *
4914 * IN: ip - inode of file to free data in.
4915 * cmd - action to take (only F_FREESP supported).
4916 * bfp - section of file to free/alloc.
4917 * flag - current file open mode flags.
4918 * offset - current file offset.
4919 * cr - credentials of caller.
4920 *
4921 * RETURN: 0 on success, error code on failure.
4922 *
4923 * Timestamps:
4924 * ip - ctime|mtime updated
4925 */
4926 /* ARGSUSED */
4927 int
4928 zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
4929 offset_t offset, cred_t *cr)
4930 {
4931 znode_t *zp = ITOZ(ip);
4932 zfsvfs_t *zfsvfs = ITOZSB(ip);
4933 uint64_t off, len;
4934 int error;
4935
4936 ZFS_ENTER(zfsvfs);
4937 ZFS_VERIFY_ZP(zp);
4938
4939 if (cmd != F_FREESP) {
4940 ZFS_EXIT(zfsvfs);
4941 return (SET_ERROR(EINVAL));
4942 }
4943
4944 /*
4945 * Callers might not be able to detect properly that we are read-only,
4946 * so check it explicitly here.
4947 */
4948 if (zfs_is_readonly(zfsvfs)) {
4949 ZFS_EXIT(zfsvfs);
4950 return (SET_ERROR(EROFS));
4951 }
4952
4953 if ((error = convoff(ip, bfp, SEEK_SET, offset))) {
4954 ZFS_EXIT(zfsvfs);
4955 return (error);
4956 }
4957
4958 if (bfp->l_len < 0) {
4959 ZFS_EXIT(zfsvfs);
4960 return (SET_ERROR(EINVAL));
4961 }
4962
4963 /*
4964 * Permissions aren't checked on Solaris because on this OS
4965 * zfs_space() can only be called with an opened file handle.
4966 * On Linux we can get here through truncate_range() which
4967 * operates directly on inodes, so we need to check access rights.
4968 */
4969 if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
4970 ZFS_EXIT(zfsvfs);
4971 return (error);
4972 }
4973
4974 off = bfp->l_start;
4975 len = bfp->l_len; /* 0 means from off to end of file */
4976
4977 error = zfs_freesp(zp, off, len, flag, TRUE);
4978
4979 ZFS_EXIT(zfsvfs);
4980 return (error);
4981 }
4982
4983 /*ARGSUSED*/
4984 int
4985 zfs_fid(struct inode *ip, fid_t *fidp)
4986 {
4987 znode_t *zp = ITOZ(ip);
4988 zfsvfs_t *zfsvfs = ITOZSB(ip);
4989 uint32_t gen;
4990 uint64_t gen64;
4991 uint64_t object = zp->z_id;
4992 zfid_short_t *zfid;
4993 int size, i, error;
4994
4995 ZFS_ENTER(zfsvfs);
4996 ZFS_VERIFY_ZP(zp);
4997
4998 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
4999 &gen64, sizeof (uint64_t))) != 0) {
5000 ZFS_EXIT(zfsvfs);
5001 return (error);
5002 }
5003
5004 gen = (uint32_t)gen64;
5005
5006 size = SHORT_FID_LEN;
5007
5008 zfid = (zfid_short_t *)fidp;
5009
5010 zfid->zf_len = size;
5011
5012 for (i = 0; i < sizeof (zfid->zf_object); i++)
5013 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
5014
5015 /* Must have a non-zero generation number to distinguish from .zfs */
5016 if (gen == 0)
5017 gen = 1;
5018 for (i = 0; i < sizeof (zfid->zf_gen); i++)
5019 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
5020
5021 ZFS_EXIT(zfsvfs);
5022 return (0);
5023 }
5024
5025 /*ARGSUSED*/
5026 int
5027 zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
5028 {
5029 znode_t *zp = ITOZ(ip);
5030 zfsvfs_t *zfsvfs = ITOZSB(ip);
5031 int error;
5032 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5033
5034 ZFS_ENTER(zfsvfs);
5035 ZFS_VERIFY_ZP(zp);
5036 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
5037 ZFS_EXIT(zfsvfs);
5038
5039 return (error);
5040 }
5041
5042 /*ARGSUSED*/
5043 int
5044 zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
5045 {
5046 znode_t *zp = ITOZ(ip);
5047 zfsvfs_t *zfsvfs = ITOZSB(ip);
5048 int error;
5049 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5050 zilog_t *zilog = zfsvfs->z_log;
5051
5052 ZFS_ENTER(zfsvfs);
5053 ZFS_VERIFY_ZP(zp);
5054
5055 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
5056
5057 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
5058 zil_commit(zilog, 0);
5059
5060 ZFS_EXIT(zfsvfs);
5061 return (error);
5062 }
5063
5064 #ifdef HAVE_UIO_ZEROCOPY
5065 /*
5066 * Tunable, both must be a power of 2.
5067 *
5068 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
5069 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
5070 * an arcbuf for a partial block read
5071 */
5072 int zcr_blksz_min = (1 << 10); /* 1K */
5073 int zcr_blksz_max = (1 << 17); /* 128K */
5074
5075 /*ARGSUSED*/
5076 static int
5077 zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
5078 {
5079 znode_t *zp = ITOZ(ip);
5080 zfsvfs_t *zfsvfs = ITOZSB(ip);
5081 int max_blksz = zfsvfs->z_max_blksz;
5082 uio_t *uio = &xuio->xu_uio;
5083 ssize_t size = uio->uio_resid;
5084 offset_t offset = uio->uio_loffset;
5085 int blksz;
5086 int fullblk, i;
5087 arc_buf_t *abuf;
5088 ssize_t maxsize;
5089 int preamble, postamble;
5090
5091 if (xuio->xu_type != UIOTYPE_ZEROCOPY)
5092 return (SET_ERROR(EINVAL));
5093
5094 ZFS_ENTER(zfsvfs);
5095 ZFS_VERIFY_ZP(zp);
5096 switch (ioflag) {
5097 case UIO_WRITE:
5098 /*
5099 * Loan out an arc_buf for write if write size is bigger than
5100 * max_blksz, and the file's block size is also max_blksz.
5101 */
5102 blksz = max_blksz;
5103 if (size < blksz || zp->z_blksz != blksz) {
5104 ZFS_EXIT(zfsvfs);
5105 return (SET_ERROR(EINVAL));
5106 }
5107 /*
5108 * Caller requests buffers for write before knowing where the
5109 * write offset might be (e.g. NFS TCP write).
5110 */
5111 if (offset == -1) {
5112 preamble = 0;
5113 } else {
5114 preamble = P2PHASE(offset, blksz);
5115 if (preamble) {
5116 preamble = blksz - preamble;
5117 size -= preamble;
5118 }
5119 }
5120
5121 postamble = P2PHASE(size, blksz);
5122 size -= postamble;
5123
5124 fullblk = size / blksz;
5125 (void) dmu_xuio_init(xuio,
5126 (preamble != 0) + fullblk + (postamble != 0));
5127
5128 /*
5129 * Have to fix iov base/len for partial buffers. They
5130 * currently represent full arc_buf's.
5131 */
5132 if (preamble) {
5133 /* data begins in the middle of the arc_buf */
5134 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5135 blksz);
5136 ASSERT(abuf);
5137 (void) dmu_xuio_add(xuio, abuf,
5138 blksz - preamble, preamble);
5139 }
5140
5141 for (i = 0; i < fullblk; i++) {
5142 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5143 blksz);
5144 ASSERT(abuf);
5145 (void) dmu_xuio_add(xuio, abuf, 0, blksz);
5146 }
5147
5148 if (postamble) {
5149 /* data ends in the middle of the arc_buf */
5150 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5151 blksz);
5152 ASSERT(abuf);
5153 (void) dmu_xuio_add(xuio, abuf, 0, postamble);
5154 }
5155 break;
5156 case UIO_READ:
5157 /*
5158 * Loan out an arc_buf for read if the read size is larger than
5159 * the current file block size. Block alignment is not
5160 * considered. Partial arc_buf will be loaned out for read.
5161 */
5162 blksz = zp->z_blksz;
5163 if (blksz < zcr_blksz_min)
5164 blksz = zcr_blksz_min;
5165 if (blksz > zcr_blksz_max)
5166 blksz = zcr_blksz_max;
5167 /* avoid potential complexity of dealing with it */
5168 if (blksz > max_blksz) {
5169 ZFS_EXIT(zfsvfs);
5170 return (SET_ERROR(EINVAL));
5171 }
5172
5173 maxsize = zp->z_size - uio->uio_loffset;
5174 if (size > maxsize)
5175 size = maxsize;
5176
5177 if (size < blksz) {
5178 ZFS_EXIT(zfsvfs);
5179 return (SET_ERROR(EINVAL));
5180 }
5181 break;
5182 default:
5183 ZFS_EXIT(zfsvfs);
5184 return (SET_ERROR(EINVAL));
5185 }
5186
5187 uio->uio_extflg = UIO_XUIO;
5188 XUIO_XUZC_RW(xuio) = ioflag;
5189 ZFS_EXIT(zfsvfs);
5190 return (0);
5191 }
5192
5193 /*ARGSUSED*/
5194 static int
5195 zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
5196 {
5197 int i;
5198 arc_buf_t *abuf;
5199 int ioflag = XUIO_XUZC_RW(xuio);
5200
5201 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
5202
5203 i = dmu_xuio_cnt(xuio);
5204 while (i-- > 0) {
5205 abuf = dmu_xuio_arcbuf(xuio, i);
5206 /*
5207 * if abuf == NULL, it must be a write buffer
5208 * that has been returned in zfs_write().
5209 */
5210 if (abuf)
5211 dmu_return_arcbuf(abuf);
5212 ASSERT(abuf || ioflag == UIO_WRITE);
5213 }
5214
5215 dmu_xuio_fini(xuio);
5216 return (0);
5217 }
5218 #endif /* HAVE_UIO_ZEROCOPY */
5219
5220 #if defined(_KERNEL)
5221 EXPORT_SYMBOL(zfs_open);
5222 EXPORT_SYMBOL(zfs_close);
5223 EXPORT_SYMBOL(zfs_read);
5224 EXPORT_SYMBOL(zfs_write);
5225 EXPORT_SYMBOL(zfs_access);
5226 EXPORT_SYMBOL(zfs_lookup);
5227 EXPORT_SYMBOL(zfs_create);
5228 EXPORT_SYMBOL(zfs_tmpfile);
5229 EXPORT_SYMBOL(zfs_remove);
5230 EXPORT_SYMBOL(zfs_mkdir);
5231 EXPORT_SYMBOL(zfs_rmdir);
5232 EXPORT_SYMBOL(zfs_readdir);
5233 EXPORT_SYMBOL(zfs_fsync);
5234 EXPORT_SYMBOL(zfs_getattr);
5235 EXPORT_SYMBOL(zfs_getattr_fast);
5236 EXPORT_SYMBOL(zfs_setattr);
5237 EXPORT_SYMBOL(zfs_rename);
5238 EXPORT_SYMBOL(zfs_symlink);
5239 EXPORT_SYMBOL(zfs_readlink);
5240 EXPORT_SYMBOL(zfs_link);
5241 EXPORT_SYMBOL(zfs_inactive);
5242 EXPORT_SYMBOL(zfs_space);
5243 EXPORT_SYMBOL(zfs_fid);
5244 EXPORT_SYMBOL(zfs_getsecattr);
5245 EXPORT_SYMBOL(zfs_setsecattr);
5246 EXPORT_SYMBOL(zfs_getpage);
5247 EXPORT_SYMBOL(zfs_putpage);
5248 EXPORT_SYMBOL(zfs_dirty_inode);
5249 EXPORT_SYMBOL(zfs_map);
5250
5251 /* CSTYLED */
5252 module_param(zfs_delete_blocks, ulong, 0644);
5253 MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
5254 module_param(zfs_read_chunk_size, long, 0644);
5255 MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");
5256 #endif