]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/zfs_vnops.c
OpenZFS 8997 - ztest assertion failure in zil_lwb_write_issue
[mirror_zfs.git] / module / zfs / zfs_vnops.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
9b7b9cd3 21
34dc7c2f 22/*
428870ff 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
19d55079 24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
5475aada 25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
9b7b9cd3 26 * Copyright 2017 Nexenta Systems, Inc.
34dc7c2f
BB
27 */
28
29/* Portions Copyright 2007 Jeremy Teo */
428870ff 30/* Portions Copyright 2010 Robert Milkowski */
34dc7c2f 31
60101509 32
34dc7c2f
BB
33#include <sys/types.h>
34#include <sys/param.h>
35#include <sys/time.h>
36#include <sys/systm.h>
37#include <sys/sysmacros.h>
38#include <sys/resource.h>
39#include <sys/vfs.h>
40#include <sys/vfs_opreg.h>
34dc7c2f
BB
41#include <sys/file.h>
42#include <sys/stat.h>
43#include <sys/kmem.h>
44#include <sys/taskq.h>
45#include <sys/uio.h>
46#include <sys/vmsystm.h>
47#include <sys/atomic.h>
34dc7c2f 48#include <vm/pvn.h>
34dc7c2f
BB
49#include <sys/pathname.h>
50#include <sys/cmn_err.h>
51#include <sys/errno.h>
52#include <sys/unistd.h>
53#include <sys/zfs_dir.h>
54#include <sys/zfs_acl.h>
55#include <sys/zfs_ioctl.h>
56#include <sys/fs/zfs.h>
57#include <sys/dmu.h>
428870ff 58#include <sys/dmu_objset.h>
34dc7c2f
BB
59#include <sys/spa.h>
60#include <sys/txg.h>
61#include <sys/dbuf.h>
62#include <sys/zap.h>
428870ff 63#include <sys/sa.h>
34dc7c2f
BB
64#include <sys/dirent.h>
65#include <sys/policy.h>
66#include <sys/sunddi.h>
b128c09f 67#include <sys/sid.h>
bcf30822 68#include <sys/mode.h>
34dc7c2f 69#include "fs/fs_subr.h"
ebe7e575 70#include <sys/zfs_ctldir.h>
34dc7c2f 71#include <sys/zfs_fuid.h>
428870ff 72#include <sys/zfs_sa.h>
e5c39b95 73#include <sys/zfs_vnops.h>
34dc7c2f
BB
74#include <sys/dnlc.h>
75#include <sys/zfs_rlock.h>
76#include <sys/extdirent.h>
77#include <sys/kidmap.h>
428870ff 78#include <sys/cred.h>
34dc7c2f 79#include <sys/attr.h>
218b8eaf 80#include <sys/zpl.h>
34dc7c2f
BB
81
82/*
83 * Programming rules.
84 *
85 * Each vnode op performs some logical unit of work. To do this, the ZPL must
86 * properly lock its in-core state, create a DMU transaction, do the work,
87 * record this work in the intent log (ZIL), commit the DMU transaction,
88 * and wait for the intent log to commit if it is a synchronous operation.
89 * Moreover, the vnode ops must work in both normal and log replay context.
90 * The ordering of events is important to avoid deadlocks and references
91 * to freed memory. The example below illustrates the following Big Rules:
92 *
93 * (1) A check must be made in each zfs thread for a mounted file system.
0037b49e
BB
94 * This is done avoiding races using ZFS_ENTER(zfsvfs).
95 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
34dc7c2f
BB
96 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
97 * can return EIO from the calling function.
98 *
3558fd73 99 * (2) iput() should always be the last thing except for zil_commit()
34dc7c2f
BB
100 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
101 * First, if it's the last reference, the vnode/znode
102 * can be freed, so the zp may point to freed memory. Second, the last
103 * reference will call zfs_zinactive(), which may induce a lot of work --
104 * pushing cached pages (which acquires range locks) and syncing out
105 * cached atime changes. Third, zfs_zinactive() may require a new tx,
106 * which could deadlock the system if you were already holding one.
0a50679c 107 * If you must call iput() within a tx then use zfs_iput_async().
34dc7c2f
BB
108 *
109 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
110 * as they can span dmu_tx_assign() calls.
111 *
384f8a09
MA
112 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
113 * dmu_tx_assign(). This is critical because we don't want to block
114 * while holding locks.
115 *
116 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
117 * reduces lock contention and CPU usage when we must wait (note that if
118 * throughput is constrained by the storage, nearly every transaction
119 * must wait).
120 *
121 * Note, in particular, that if a lock is sometimes acquired before
122 * the tx assigns, and sometimes after (e.g. z_lock), then failing
123 * to use a non-blocking assign can deadlock the system. The scenario:
34dc7c2f
BB
124 *
125 * Thread A has grabbed a lock before calling dmu_tx_assign().
126 * Thread B is in an already-assigned tx, and blocks for this lock.
127 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
128 * forever, because the previous txg can't quiesce until B's tx commits.
129 *
0037b49e 130 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
e8b96c60 131 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
ef7a7948 132 * calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
e8b96c60
MA
133 * to indicate that this operation has already called dmu_tx_wait().
134 * This will ensure that we don't retry forever, waiting a short bit
135 * each time.
34dc7c2f
BB
136 *
137 * (5) If the operation succeeded, generate the intent log entry for it
138 * before dropping locks. This ensures that the ordering of events
139 * in the intent log matches the order in which they actually occurred.
d3cc8b15 140 * During ZIL replay the zfs_log_* functions will update the sequence
fb5f0bc8 141 * number to indicate the zil transaction has replayed.
34dc7c2f
BB
142 *
143 * (6) At the end of each vnode op, the DMU tx must always commit,
144 * regardless of whether there were any errors.
145 *
572e2857 146 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
34dc7c2f
BB
147 * to ensure that synchronous semantics are provided when necessary.
148 *
149 * In general, this is how things should be ordered in each vnode op:
150 *
0037b49e 151 * ZFS_ENTER(zfsvfs); // exit if unmounted
34dc7c2f 152 * top:
3558fd73 153 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
34dc7c2f
BB
154 * rw_enter(...); // grab any other locks you need
155 * tx = dmu_tx_create(...); // get DMU tx
156 * dmu_tx_hold_*(); // hold each object you might modify
ef7a7948 157 * error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
34dc7c2f
BB
158 * if (error) {
159 * rw_exit(...); // drop locks
160 * zfs_dirent_unlock(dl); // unlock directory entry
3558fd73 161 * iput(...); // release held vnodes
fb5f0bc8 162 * if (error == ERESTART) {
e8b96c60 163 * waited = B_TRUE;
34dc7c2f
BB
164 * dmu_tx_wait(tx);
165 * dmu_tx_abort(tx);
166 * goto top;
167 * }
168 * dmu_tx_abort(tx); // abort DMU tx
0037b49e 169 * ZFS_EXIT(zfsvfs); // finished in zfs
34dc7c2f
BB
170 * return (error); // really out of space
171 * }
172 * error = do_real_work(); // do whatever this VOP does
173 * if (error == 0)
174 * zfs_log_*(...); // on success, make ZIL entry
175 * dmu_tx_commit(tx); // commit DMU tx -- error or not
176 * rw_exit(...); // drop locks
177 * zfs_dirent_unlock(dl); // unlock directory entry
3558fd73 178 * iput(...); // release held vnodes
572e2857 179 * zil_commit(zilog, foid); // synchronous when necessary
0037b49e 180 * ZFS_EXIT(zfsvfs); // finished in zfs
34dc7c2f
BB
181 * return (error); // done, report error
182 */
183
126400a1
BB
184/*
185 * Virus scanning is unsupported. It would be possible to add a hook
186 * here to performance the required virus scan. This could be done
187 * entirely in the kernel or potentially as an update to invoke a
188 * scanning utility.
189 */
190static int
191zfs_vscan(struct inode *ip, cred_t *cr, int async)
192{
193 return (0);
194}
195
196/* ARGSUSED */
197int
198zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
199{
200 znode_t *zp = ITOZ(ip);
0037b49e 201 zfsvfs_t *zfsvfs = ITOZSB(ip);
126400a1 202
0037b49e 203 ZFS_ENTER(zfsvfs);
126400a1
BB
204 ZFS_VERIFY_ZP(zp);
205
206 /* Honor ZFS_APPENDONLY file attribute */
207 if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
208 ((flag & O_APPEND) == 0)) {
0037b49e 209 ZFS_EXIT(zfsvfs);
2e528b49 210 return (SET_ERROR(EPERM));
126400a1
BB
211 }
212
213 /* Virus scan eligible files on open */
0037b49e 214 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
126400a1
BB
215 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
216 if (zfs_vscan(ip, cr, 0) != 0) {
0037b49e 217 ZFS_EXIT(zfsvfs);
2e528b49 218 return (SET_ERROR(EACCES));
126400a1
BB
219 }
220 }
221
222 /* Keep a count of the synchronous opens in the znode */
223 if (flag & O_SYNC)
224 atomic_inc_32(&zp->z_sync_cnt);
225
0037b49e 226 ZFS_EXIT(zfsvfs);
126400a1
BB
227 return (0);
228}
126400a1
BB
229
230/* ARGSUSED */
231int
232zfs_close(struct inode *ip, int flag, cred_t *cr)
233{
234 znode_t *zp = ITOZ(ip);
0037b49e 235 zfsvfs_t *zfsvfs = ITOZSB(ip);
126400a1 236
0037b49e 237 ZFS_ENTER(zfsvfs);
126400a1
BB
238 ZFS_VERIFY_ZP(zp);
239
7dc71949 240 /* Decrement the synchronous opens in the znode */
126400a1 241 if (flag & O_SYNC)
7dc71949 242 atomic_dec_32(&zp->z_sync_cnt);
126400a1 243
0037b49e 244 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
126400a1
BB
245 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
246 VERIFY(zfs_vscan(ip, cr, 1) == 0);
247
0037b49e 248 ZFS_EXIT(zfsvfs);
8780c539 249 return (0);
126400a1 250}
126400a1 251
802e7b5f 252#if defined(SEEK_HOLE) && defined(SEEK_DATA)
cf91b2b6 253/*
802e7b5f
LD
254 * Lseek support for finding holes (cmd == SEEK_HOLE) and
255 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
cf91b2b6
MA
256 */
257static int
802e7b5f 258zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
cf91b2b6 259{
802e7b5f 260 znode_t *zp = ITOZ(ip);
cf91b2b6
MA
261 uint64_t noff = (uint64_t)*off; /* new offset */
262 uint64_t file_sz;
263 int error;
264 boolean_t hole;
265
266 file_sz = zp->z_size;
267 if (noff >= file_sz) {
2e528b49 268 return (SET_ERROR(ENXIO));
cf91b2b6
MA
269 }
270
802e7b5f 271 if (cmd == SEEK_HOLE)
cf91b2b6
MA
272 hole = B_TRUE;
273 else
274 hole = B_FALSE;
275
802e7b5f 276 error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
cf91b2b6 277
d97aa48f 278 if (error == ESRCH)
2e528b49 279 return (SET_ERROR(ENXIO));
d97aa48f 280
6e03ec4f
DB
281 /* file was dirty, so fall back to using generic logic */
282 if (error == EBUSY) {
283 if (hole)
284 *off = file_sz;
285
286 return (0);
287 }
66aca247 288
d97aa48f
MA
289 /*
290 * We could find a hole that begins after the logical end-of-file,
291 * because dmu_offset_next() only works on whole blocks. If the
292 * EOF falls mid-block, then indicate that the "virtual hole"
293 * at the end of the file begins at the logical EOF, rather than
294 * at the end of the last block.
295 */
296 if (noff > file_sz) {
297 ASSERT(hole);
298 noff = file_sz;
cf91b2b6
MA
299 }
300
301 if (noff < *off)
302 return (error);
303 *off = noff;
304 return (error);
305}
802e7b5f
LD
306
307int
308zfs_holey(struct inode *ip, int cmd, loff_t *off)
309{
310 znode_t *zp = ITOZ(ip);
0037b49e 311 zfsvfs_t *zfsvfs = ITOZSB(ip);
802e7b5f
LD
312 int error;
313
0037b49e 314 ZFS_ENTER(zfsvfs);
802e7b5f
LD
315 ZFS_VERIFY_ZP(zp);
316
317 error = zfs_holey_common(ip, cmd, off);
318
0037b49e 319 ZFS_EXIT(zfsvfs);
802e7b5f
LD
320 return (error);
321}
802e7b5f 322#endif /* SEEK_HOLE && SEEK_DATA */
cf91b2b6 323
c0d35759 324#if defined(_KERNEL)
34dc7c2f
BB
325/*
326 * When a file is memory mapped, we must keep the IO data synchronized
327 * between the DMU cache and the memory mapped pages. What this means:
328 *
329 * On Write: If we find a memory mapped page, we write to *both*
330 * the page and the dmu buffer.
34dc7c2f 331 */
d164b209 332static void
c0d35759
BB
333update_pages(struct inode *ip, int64_t start, int len,
334 objset_t *os, uint64_t oid)
34dc7c2f 335{
c0d35759
BB
336 struct address_space *mp = ip->i_mapping;
337 struct page *pp;
338 uint64_t nbytes;
d164b209 339 int64_t off;
c0d35759 340 void *pb;
34dc7c2f 341
8b1899d3
BB
342 off = start & (PAGE_SIZE-1);
343 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
344 nbytes = MIN(PAGE_SIZE - off, len);
34dc7c2f 345
8b1899d3 346 pp = find_lock_page(mp, start >> PAGE_SHIFT);
c0d35759
BB
347 if (pp) {
348 if (mapping_writably_mapped(mp))
349 flush_dcache_page(pp);
34dc7c2f 350
c0d35759
BB
351 pb = kmap(pp);
352 (void) dmu_read(os, oid, start+off, nbytes, pb+off,
9babb374 353 DMU_READ_PREFETCH);
c0d35759
BB
354 kunmap(pp);
355
356 if (mapping_writably_mapped(mp))
357 flush_dcache_page(pp);
358
359 mark_page_accessed(pp);
360 SetPageUptodate(pp);
361 ClearPageError(pp);
362 unlock_page(pp);
8b1899d3 363 put_page(pp);
34dc7c2f 364 }
c0d35759 365
d164b209 366 len -= nbytes;
34dc7c2f 367 off = 0;
34dc7c2f 368 }
34dc7c2f
BB
369}
370
371/*
372 * When a file is memory mapped, we must keep the IO data synchronized
373 * between the DMU cache and the memory mapped pages. What this means:
374 *
375 * On Read: We "read" preferentially from memory mapped pages,
376 * else we default from the dmu buffer.
377 *
378 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
d3cc8b15 379 * the file is memory mapped.
34dc7c2f
BB
380 */
381static int
3558fd73 382mappedread(struct inode *ip, int nbytes, uio_t *uio)
34dc7c2f 383{
c0d35759
BB
384 struct address_space *mp = ip->i_mapping;
385 struct page *pp;
3558fd73 386 znode_t *zp = ITOZ(ip);
34dc7c2f 387 int64_t start, off;
c0d35759 388 uint64_t bytes;
34dc7c2f
BB
389 int len = nbytes;
390 int error = 0;
c0d35759 391 void *pb;
34dc7c2f
BB
392
393 start = uio->uio_loffset;
8b1899d3
BB
394 off = start & (PAGE_SIZE-1);
395 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
396 bytes = MIN(PAGE_SIZE - off, len);
c0d35759 397
8b1899d3 398 pp = find_lock_page(mp, start >> PAGE_SHIFT);
c0d35759
BB
399 if (pp) {
400 ASSERT(PageUptodate(pp));
63f33962 401 unlock_page(pp);
c0d35759
BB
402
403 pb = kmap(pp);
404 error = uiomove(pb + off, bytes, UIO_READ, uio);
405 kunmap(pp);
406
407 if (mapping_writably_mapped(mp))
408 flush_dcache_page(pp);
409
410 mark_page_accessed(pp);
8b1899d3 411 put_page(pp);
34dc7c2f 412 } else {
804e0504
MA
413 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
414 uio, bytes);
34dc7c2f 415 }
c0d35759 416
34dc7c2f
BB
417 len -= bytes;
418 off = 0;
419 if (error)
420 break;
421 }
422 return (error);
423}
c0d35759 424#endif /* _KERNEL */
34dc7c2f 425
c409e464 426unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
a966c564 427unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
34dc7c2f
BB
428
429/*
430 * Read bytes from specified file into supplied buffer.
431 *
3558fd73 432 * IN: ip - inode of file to be read from.
34dc7c2f
BB
433 * uio - structure supplying read location, range info,
434 * and return buffer.
c0d35759
BB
435 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
436 * O_DIRECT flag; used to bypass page cache.
34dc7c2f 437 * cr - credentials of caller.
34dc7c2f
BB
438 *
439 * OUT: uio - updated offset and range, buffer filled.
440 *
d3cc8b15 441 * RETURN: 0 on success, error code on failure.
34dc7c2f
BB
442 *
443 * Side Effects:
3558fd73 444 * inode - atime updated if byte count > 0
34dc7c2f
BB
445 */
446/* ARGSUSED */
e5c39b95 447int
3558fd73 448zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
34dc7c2f 449{
3558fd73 450 znode_t *zp = ITOZ(ip);
0037b49e 451 zfsvfs_t *zfsvfs = ITOZSB(ip);
34dc7c2f 452 ssize_t n, nbytes;
149e873a 453 int error = 0;
34dc7c2f 454 rl_t *rl;
3558fd73 455#ifdef HAVE_UIO_ZEROCOPY
428870ff 456 xuio_t *xuio = NULL;
3558fd73 457#endif /* HAVE_UIO_ZEROCOPY */
34dc7c2f 458
0037b49e 459 ZFS_ENTER(zfsvfs);
34dc7c2f 460 ZFS_VERIFY_ZP(zp);
34dc7c2f 461
428870ff 462 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
0037b49e 463 ZFS_EXIT(zfsvfs);
2e528b49 464 return (SET_ERROR(EACCES));
34dc7c2f
BB
465 }
466
467 /*
468 * Validate file offset
469 */
470 if (uio->uio_loffset < (offset_t)0) {
0037b49e 471 ZFS_EXIT(zfsvfs);
2e528b49 472 return (SET_ERROR(EINVAL));
34dc7c2f
BB
473 }
474
475 /*
476 * Fasttrack empty reads
477 */
478 if (uio->uio_resid == 0) {
0037b49e 479 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
480 return (0);
481 }
482
34dc7c2f
BB
483 /*
484 * If we're in FRSYNC mode, sync out this znode before reading it.
aec43188 485 * Only do this for non-snapshots.
34dc7c2f 486 */
aec43188
CC
487 if (zfsvfs->z_log &&
488 (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
0037b49e 489 zil_commit(zfsvfs->z_log, zp->z_id);
34dc7c2f
BB
490
491 /*
492 * Lock the range against changes.
493 */
d88895a0
CC
494 rl = zfs_range_lock(&zp->z_range_lock, uio->uio_loffset, uio->uio_resid,
495 RL_READER);
34dc7c2f
BB
496
497 /*
498 * If we are reading past end-of-file we can skip
499 * to the end; but we might still need to set atime.
500 */
428870ff 501 if (uio->uio_loffset >= zp->z_size) {
34dc7c2f
BB
502 error = 0;
503 goto out;
504 }
505
428870ff
BB
506 ASSERT(uio->uio_loffset < zp->z_size);
507 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
508
3558fd73 509#ifdef HAVE_UIO_ZEROCOPY
428870ff
BB
510 if ((uio->uio_extflg == UIO_XUIO) &&
511 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
512 int nblk;
513 int blksz = zp->z_blksz;
514 uint64_t offset = uio->uio_loffset;
515
516 xuio = (xuio_t *)uio;
517 if ((ISP2(blksz))) {
518 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
519 blksz)) / blksz;
520 } else {
521 ASSERT(offset + n <= blksz);
522 nblk = 1;
523 }
524 (void) dmu_xuio_init(xuio, nblk);
525
3558fd73 526 if (vn_has_cached_data(ip)) {
428870ff
BB
527 /*
528 * For simplicity, we always allocate a full buffer
529 * even if we only expect to read a portion of a block.
530 */
531 while (--nblk >= 0) {
532 (void) dmu_xuio_add(xuio,
533 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
534 blksz), 0, blksz);
535 }
536 }
537 }
3558fd73 538#endif /* HAVE_UIO_ZEROCOPY */
34dc7c2f
BB
539
540 while (n > 0) {
541 nbytes = MIN(n, zfs_read_chunk_size -
542 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
543
804e0504 544 if (zp->z_is_mapped && !(ioflag & O_DIRECT)) {
3558fd73 545 error = mappedread(ip, nbytes, uio);
804e0504
MA
546 } else {
547 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
548 uio, nbytes);
549 }
c0d35759 550
b128c09f
BB
551 if (error) {
552 /* convert checksum errors into IO errors */
553 if (error == ECKSUM)
2e528b49 554 error = SET_ERROR(EIO);
34dc7c2f 555 break;
b128c09f 556 }
34dc7c2f
BB
557
558 n -= nbytes;
559 }
34dc7c2f
BB
560out:
561 zfs_range_unlock(rl);
562
0037b49e 563 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
564 return (error);
565}
566
34dc7c2f
BB
567/*
568 * Write the bytes to a file.
569 *
3558fd73 570 * IN: ip - inode of file to be written to.
34dc7c2f
BB
571 * uio - structure supplying write location, range info,
572 * and data buffer.
573 * ioflag - FAPPEND flag set if in append mode.
c0d35759 574 * O_DIRECT flag; used to bypass page cache.
34dc7c2f 575 * cr - credentials of caller.
34dc7c2f
BB
576 *
577 * OUT: uio - updated offset and range.
578 *
579 * RETURN: 0 if success
580 * error code if failure
581 *
582 * Timestamps:
3558fd73 583 * ip - ctime|mtime updated if byte count > 0
34dc7c2f 584 */
428870ff 585
34dc7c2f 586/* ARGSUSED */
e5c39b95 587int
3558fd73 588zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
34dc7c2f 589{
3558fd73
BB
590 znode_t *zp = ITOZ(ip);
591 rlim64_t limit = uio->uio_limit;
34dc7c2f
BB
592 ssize_t start_resid = uio->uio_resid;
593 ssize_t tx_bytes;
594 uint64_t end_size;
595 dmu_tx_t *tx;
0037b49e 596 zfsvfs_t *zfsvfs = ZTOZSB(zp);
34dc7c2f
BB
597 zilog_t *zilog;
598 offset_t woff;
599 ssize_t n, nbytes;
600 rl_t *rl;
0037b49e 601 int max_blksz = zfsvfs->z_max_blksz;
3558fd73 602 int error = 0;
9babb374 603 arc_buf_t *abuf;
5475aada 604 const iovec_t *aiov = NULL;
428870ff 605 xuio_t *xuio = NULL;
428870ff
BB
606 int write_eof;
607 int count = 0;
608 sa_bulk_attr_t bulk[4];
609 uint64_t mtime[2], ctime[2];
2c6abf15 610 uint32_t uid;
5a6765cf 611#ifdef HAVE_UIO_ZEROCOPY
612 int i_iov = 0;
613 const iovec_t *iovp = uio->uio_iov;
3558fd73 614 ASSERTV(int iovcnt = uio->uio_iovcnt);
5a6765cf 615#endif
34dc7c2f 616
34dc7c2f
BB
617 /*
618 * Fasttrack empty write
619 */
620 n = start_resid;
621 if (n == 0)
622 return (0);
623
624 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
625 limit = MAXOFFSET_T;
626
0037b49e 627 ZFS_ENTER(zfsvfs);
34dc7c2f 628 ZFS_VERIFY_ZP(zp);
b128c09f 629
0037b49e
BB
630 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
631 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
632 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
633 &zp->z_size, 8);
634 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
428870ff
BB
635 &zp->z_pflags, 8);
636
f3c9dca0
MT
637 /*
638 * Callers might not be able to detect properly that we are read-only,
639 * so check it explicitly here.
640 */
0037b49e
BB
641 if (zfs_is_readonly(zfsvfs)) {
642 ZFS_EXIT(zfsvfs);
f3c9dca0
MT
643 return (SET_ERROR(EROFS));
644 }
645
b128c09f
BB
646 /*
647 * If immutable or not appending then return EPERM
648 */
428870ff
BB
649 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
650 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
651 (uio->uio_loffset < zp->z_size))) {
0037b49e 652 ZFS_EXIT(zfsvfs);
2e528b49 653 return (SET_ERROR(EPERM));
b128c09f
BB
654 }
655
0037b49e 656 zilog = zfsvfs->z_log;
34dc7c2f 657
428870ff
BB
658 /*
659 * Validate file offset
660 */
661 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
662 if (woff < 0) {
0037b49e 663 ZFS_EXIT(zfsvfs);
2e528b49 664 return (SET_ERROR(EINVAL));
428870ff
BB
665 }
666
34dc7c2f
BB
667 /*
668 * Pre-fault the pages to ensure slow (eg NFS) pages
669 * don't hold up txg.
428870ff 670 * Skip this if uio contains loaned arc_buf.
34dc7c2f 671 */
9cac042c 672#ifdef HAVE_UIO_ZEROCOPY
428870ff
BB
673 if ((uio->uio_extflg == UIO_XUIO) &&
674 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
675 xuio = (xuio_t *)uio;
676 else
9cac042c 677#endif
572e2857 678 uio_prefaultpages(MIN(n, max_blksz), uio);
34dc7c2f
BB
679
680 /*
681 * If in append mode, set the io offset pointer to eof.
682 */
683 if (ioflag & FAPPEND) {
684 /*
428870ff
BB
685 * Obtain an appending range lock to guarantee file append
686 * semantics. We reset the write offset once we have the lock.
34dc7c2f 687 */
d88895a0 688 rl = zfs_range_lock(&zp->z_range_lock, 0, n, RL_APPEND);
428870ff 689 woff = rl->r_off;
34dc7c2f 690 if (rl->r_len == UINT64_MAX) {
428870ff
BB
691 /*
692 * We overlocked the file because this write will cause
693 * the file block size to increase.
694 * Note that zp_size cannot change with this lock held.
695 */
696 woff = zp->z_size;
34dc7c2f 697 }
428870ff 698 uio->uio_loffset = woff;
34dc7c2f 699 } else {
34dc7c2f 700 /*
428870ff
BB
701 * Note that if the file block size will change as a result of
702 * this write, then this range lock will lock the entire file
703 * so that we can re-write the block safely.
34dc7c2f 704 */
d88895a0 705 rl = zfs_range_lock(&zp->z_range_lock, woff, n, RL_WRITER);
34dc7c2f
BB
706 }
707
708 if (woff >= limit) {
709 zfs_range_unlock(rl);
0037b49e 710 ZFS_EXIT(zfsvfs);
2e528b49 711 return (SET_ERROR(EFBIG));
34dc7c2f
BB
712 }
713
714 if ((woff + n) > limit || woff > (limit - n))
715 n = limit - woff;
716
428870ff
BB
717 /* Will this write extend the file length? */
718 write_eof = (woff + n > zp->z_size);
719
720 end_size = MAX(zp->z_size, woff + n);
34dc7c2f
BB
721
722 /*
723 * Write the file in reasonable size chunks. Each chunk is written
724 * in a separate transaction; this keeps the intent log records small
725 * and allows us to do more fine-grained space accounting.
726 */
727 while (n > 0) {
9babb374
BB
728 abuf = NULL;
729 woff = uio->uio_loffset;
0037b49e
BB
730 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
731 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
9babb374
BB
732 if (abuf != NULL)
733 dmu_return_arcbuf(abuf);
2e528b49 734 error = SET_ERROR(EDQUOT);
9babb374
BB
735 break;
736 }
737
428870ff 738 if (xuio && abuf == NULL) {
5a6765cf 739#ifdef HAVE_UIO_ZEROCOPY
428870ff 740 ASSERT(i_iov < iovcnt);
5475aada 741 ASSERT3U(uio->uio_segflg, !=, UIO_BVEC);
428870ff
BB
742 aiov = &iovp[i_iov];
743 abuf = dmu_xuio_arcbuf(xuio, i_iov);
744 dmu_xuio_clear(xuio, i_iov);
428870ff
BB
745 ASSERT((aiov->iov_base == abuf->b_data) ||
746 ((char *)aiov->iov_base - (char *)abuf->b_data +
747 aiov->iov_len == arc_buf_size(abuf)));
748 i_iov++;
5a6765cf 749#endif
428870ff
BB
750 } else if (abuf == NULL && n >= max_blksz &&
751 woff >= zp->z_size &&
9babb374
BB
752 P2PHASE(woff, max_blksz) == 0 &&
753 zp->z_blksz == max_blksz) {
428870ff
BB
754 /*
755 * This write covers a full block. "Borrow" a buffer
756 * from the dmu so that we can fill it before we enter
757 * a transaction. This avoids the possibility of
758 * holding up the transaction if the data copy hangs
759 * up on a pagefault (e.g., from an NFS server mapping).
760 */
9babb374
BB
761 size_t cbytes;
762
428870ff
BB
763 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
764 max_blksz);
9babb374
BB
765 ASSERT(abuf != NULL);
766 ASSERT(arc_buf_size(abuf) == max_blksz);
149e873a
BB
767 if ((error = uiocopy(abuf->b_data, max_blksz,
768 UIO_WRITE, uio, &cbytes))) {
9babb374
BB
769 dmu_return_arcbuf(abuf);
770 break;
771 }
772 ASSERT(cbytes == max_blksz);
773 }
774
34dc7c2f
BB
775 /*
776 * Start a transaction.
777 */
0037b49e 778 tx = dmu_tx_create(zfsvfs->z_os);
428870ff 779 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
34dc7c2f 780 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
428870ff 781 zfs_sa_upgrade_txholds(tx, zp);
384f8a09 782 error = dmu_tx_assign(tx, TXG_WAIT);
34dc7c2f 783 if (error) {
34dc7c2f 784 dmu_tx_abort(tx);
9babb374
BB
785 if (abuf != NULL)
786 dmu_return_arcbuf(abuf);
34dc7c2f
BB
787 break;
788 }
789
790 /*
791 * If zfs_range_lock() over-locked we grow the blocksize
792 * and then reduce the lock range. This will only happen
793 * on the first iteration since zfs_range_reduce() will
794 * shrink down r_len to the appropriate size.
795 */
796 if (rl->r_len == UINT64_MAX) {
797 uint64_t new_blksz;
798
799 if (zp->z_blksz > max_blksz) {
f1512ee6
MA
800 /*
801 * File's blocksize is already larger than the
802 * "recordsize" property. Only let it grow to
803 * the next power of 2.
804 */
34dc7c2f 805 ASSERT(!ISP2(zp->z_blksz));
f1512ee6
MA
806 new_blksz = MIN(end_size,
807 1 << highbit64(zp->z_blksz));
34dc7c2f
BB
808 } else {
809 new_blksz = MIN(end_size, max_blksz);
810 }
811 zfs_grow_blocksize(zp, new_blksz, tx);
812 zfs_range_reduce(rl, woff, n);
813 }
814
815 /*
816 * XXX - should we really limit each write to z_max_blksz?
817 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
818 */
819 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
34dc7c2f 820
9babb374
BB
821 if (abuf == NULL) {
822 tx_bytes = uio->uio_resid;
428870ff
BB
823 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
824 uio, nbytes, tx);
9babb374
BB
825 tx_bytes -= uio->uio_resid;
826 } else {
827 tx_bytes = nbytes;
428870ff
BB
828 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
829 /*
830 * If this is not a full block write, but we are
831 * extending the file past EOF and this data starts
832 * block-aligned, use assign_arcbuf(). Otherwise,
833 * write via dmu_write().
834 */
835 if (tx_bytes < max_blksz && (!write_eof ||
836 aiov->iov_base != abuf->b_data)) {
837 ASSERT(xuio);
0037b49e 838 dmu_write(zfsvfs->z_os, zp->z_id, woff,
aebc5df4 839 /* cppcheck-suppress nullPointer */
428870ff
BB
840 aiov->iov_len, aiov->iov_base, tx);
841 dmu_return_arcbuf(abuf);
842 xuio_stat_wbuf_copied();
843 } else {
844 ASSERT(xuio || tx_bytes == max_blksz);
845 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
846 woff, abuf, tx);
847 }
9babb374
BB
848 ASSERT(tx_bytes <= uio->uio_resid);
849 uioskip(uio, tx_bytes);
850 }
0037b49e
BB
851 if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT)) {
852 update_pages(ip, woff,
853 tx_bytes, zfsvfs->z_os, zp->z_id);
854 }
34dc7c2f
BB
855
856 /*
857 * If we made no progress, we're done. If we made even
858 * partial progress, update the znode and ZIL accordingly.
859 */
860 if (tx_bytes == 0) {
0037b49e 861 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
428870ff 862 (void *)&zp->z_size, sizeof (uint64_t), tx);
34dc7c2f
BB
863 dmu_tx_commit(tx);
864 ASSERT(error != 0);
865 break;
866 }
867
868 /*
869 * Clear Set-UID/Set-GID bits on successful write if not
4e33ba4c 870 * privileged and at least one of the execute bits is set.
34dc7c2f
BB
871 *
872 * It would be nice to to this after all writes have
873 * been done, but that would still expose the ISUID/ISGID
874 * to another app after the partial write is committed.
875 *
572e2857
BB
876 * Note: we don't call zfs_fuid_map_id() here because
877 * user 0 is not an ephemeral uid.
34dc7c2f
BB
878 */
879 mutex_enter(&zp->z_acl_lock);
2c6abf15 880 uid = KUID_TO_SUID(ip->i_uid);
428870ff 881 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
34dc7c2f 882 (S_IXUSR >> 6))) != 0 &&
428870ff 883 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
34dc7c2f 884 secpolicy_vnode_setid_retain(cr,
2c6abf15 885 ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
428870ff
BB
886 uint64_t newmode;
887 zp->z_mode &= ~(S_ISUID | S_ISGID);
12fa7f34 888 ip->i_mode = newmode = zp->z_mode;
0037b49e 889 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
428870ff 890 (void *)&newmode, sizeof (uint64_t), tx);
34dc7c2f
BB
891 }
892 mutex_exit(&zp->z_acl_lock);
893
0df9673f 894 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
34dc7c2f
BB
895
896 /*
897 * Update the file size (zp_size) if it has changed;
898 * account for possible concurrent updates.
899 */
428870ff
BB
900 while ((end_size = zp->z_size) < uio->uio_loffset) {
901 (void) atomic_cas_64(&zp->z_size, end_size,
34dc7c2f 902 uio->uio_loffset);
428870ff
BB
903 ASSERT(error == 0);
904 }
572e2857
BB
905 /*
906 * If we are replaying and eof is non zero then force
907 * the file size to the specified eof. Note, there's no
908 * concurrency during replay.
909 */
0037b49e
BB
910 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
911 zp->z_size = zfsvfs->z_replay_eof;
572e2857 912
428870ff
BB
913 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
914
119a394a
ED
915 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
916 NULL, NULL);
34dc7c2f
BB
917 dmu_tx_commit(tx);
918
919 if (error != 0)
920 break;
921 ASSERT(tx_bytes == nbytes);
922 n -= nbytes;
572e2857
BB
923
924 if (!xuio && n > 0)
925 uio_prefaultpages(MIN(n, max_blksz), uio);
34dc7c2f
BB
926 }
927
2a53e2da 928 zfs_inode_update(zp);
34dc7c2f
BB
929 zfs_range_unlock(rl);
930
931 /*
932 * If we're in replay mode, or we made no progress, return error.
933 * Otherwise, it's at least a partial write, so it's successful.
934 */
0037b49e
BB
935 if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
936 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
937 return (error);
938 }
939
428870ff 940 if (ioflag & (FSYNC | FDSYNC) ||
0037b49e 941 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
572e2857 942 zil_commit(zilog, zp->z_id);
34dc7c2f 943
0037b49e 944 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
945 return (0);
946}
947
7c502b0b 948/*
949 * Drop a reference on the passed inode asynchronously. This ensures
950 * that the caller will never drop the last reference on an inode in
951 * the current context. Doing so while holding open a tx could result
952 * in a deadlock if iput_final() re-enters the filesystem code.
953 */
0a50679c
BB
954void
955zfs_iput_async(struct inode *ip)
3558fd73 956{
0a50679c
BB
957 objset_t *os = ITOZSB(ip)->z_os;
958
3558fd73 959 ASSERT(atomic_read(&ip->i_count) > 0);
0a50679c
BB
960 ASSERT(os != NULL);
961
3558fd73 962 if (atomic_read(&ip->i_count) == 1)
7c502b0b 963 VERIFY(taskq_dispatch(dsl_pool_iput_taskq(dmu_objset_pool(os)),
48d3eb40 964 (task_func_t *)iput, ip, TQ_SLEEP) != TASKQID_INVALID);
3558fd73
BB
965 else
966 iput(ip);
967}
968
34dc7c2f 969void
428870ff 970zfs_get_done(zgd_t *zgd, int error)
34dc7c2f 971{
428870ff 972 znode_t *zp = zgd->zgd_private;
428870ff
BB
973
974 if (zgd->zgd_db)
975 dmu_buf_rele(zgd->zgd_db, zgd);
976
977 zfs_range_unlock(zgd->zgd_rl);
34dc7c2f 978
9babb374
BB
979 /*
980 * Release the vnode asynchronously as we currently have the
981 * txg stopped from syncing.
982 */
0a50679c 983 zfs_iput_async(ZTOI(zp));
428870ff
BB
984
985 if (error == 0 && zgd->zgd_bp)
986 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
987
34dc7c2f
BB
988 kmem_free(zgd, sizeof (zgd_t));
989}
990
45d1cae3
BB
991#ifdef DEBUG
992static int zil_fault_io = 0;
993#endif
994
34dc7c2f
BB
995/*
996 * Get data to generate a TX_WRITE intent log record.
997 */
998int
999zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1000{
0037b49e
BB
1001 zfsvfs_t *zfsvfs = arg;
1002 objset_t *os = zfsvfs->z_os;
34dc7c2f 1003 znode_t *zp;
428870ff
BB
1004 uint64_t object = lr->lr_foid;
1005 uint64_t offset = lr->lr_offset;
1006 uint64_t size = lr->lr_length;
34dc7c2f 1007 dmu_buf_t *db;
34dc7c2f 1008 zgd_t *zgd;
34dc7c2f
BB
1009 int error = 0;
1010
428870ff
BB
1011 ASSERT(zio != NULL);
1012 ASSERT(size != 0);
34dc7c2f
BB
1013
1014 /*
1015 * Nothing to do if the file has been removed
1016 */
0037b49e 1017 if (zfs_zget(zfsvfs, object, &zp) != 0)
2e528b49 1018 return (SET_ERROR(ENOENT));
34dc7c2f 1019 if (zp->z_unlinked) {
9babb374
BB
1020 /*
1021 * Release the vnode asynchronously as we currently have the
1022 * txg stopped from syncing.
1023 */
0a50679c 1024 zfs_iput_async(ZTOI(zp));
2e528b49 1025 return (SET_ERROR(ENOENT));
34dc7c2f
BB
1026 }
1027
79c76d5b 1028 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
0037b49e 1029 zgd->zgd_zilog = zfsvfs->z_log;
428870ff
BB
1030 zgd->zgd_private = zp;
1031
34dc7c2f
BB
1032 /*
1033 * Write records come in two flavors: immediate and indirect.
1034 * For small writes it's cheaper to store the data with the
1035 * log record (immediate); for large writes it's cheaper to
1036 * sync the data and get a pointer to it (indirect) so that
1037 * we don't have to write the data twice.
1038 */
1039 if (buf != NULL) { /* immediate write */
d88895a0
CC
1040 zgd->zgd_rl = zfs_range_lock(&zp->z_range_lock, offset, size,
1041 RL_READER);
34dc7c2f 1042 /* test for truncation needs to be done while range locked */
428870ff 1043 if (offset >= zp->z_size) {
2e528b49 1044 error = SET_ERROR(ENOENT);
428870ff
BB
1045 } else {
1046 error = dmu_read(os, object, offset, size, buf,
1047 DMU_READ_NO_PREFETCH);
34dc7c2f 1048 }
428870ff 1049 ASSERT(error == 0 || error == ENOENT);
34dc7c2f 1050 } else { /* indirect write */
34dc7c2f
BB
1051 /*
1052 * Have to lock the whole block to ensure when it's
ae5b4a05 1053 * written out and its checksum is being calculated
34dc7c2f
BB
1054 * that no one can change the data. We need to re-check
1055 * blocksize after we get the lock in case it's changed!
1056 */
1057 for (;;) {
428870ff
BB
1058 uint64_t blkoff;
1059 size = zp->z_blksz;
1060 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1061 offset -= blkoff;
d88895a0
CC
1062 zgd->zgd_rl = zfs_range_lock(&zp->z_range_lock, offset,
1063 size, RL_READER);
428870ff 1064 if (zp->z_blksz == size)
34dc7c2f 1065 break;
428870ff
BB
1066 offset += blkoff;
1067 zfs_range_unlock(zgd->zgd_rl);
34dc7c2f
BB
1068 }
1069 /* test for truncation needs to be done while range locked */
428870ff 1070 if (lr->lr_offset >= zp->z_size)
2e528b49 1071 error = SET_ERROR(ENOENT);
45d1cae3
BB
1072#ifdef DEBUG
1073 if (zil_fault_io) {
2e528b49 1074 error = SET_ERROR(EIO);
45d1cae3 1075 zil_fault_io = 0;
45d1cae3 1076 }
45d1cae3 1077#endif
34dc7c2f 1078 if (error == 0)
428870ff
BB
1079 error = dmu_buf_hold(os, object, offset, zgd, &db,
1080 DMU_READ_NO_PREFETCH);
1081
1082 if (error == 0) {
02dc43bc 1083 blkptr_t *bp = &lr->lr_blkptr;
03c6040b 1084
428870ff
BB
1085 zgd->zgd_db = db;
1086 zgd->zgd_bp = bp;
1087
1088 ASSERT(db->db_offset == offset);
1089 ASSERT(db->db_size == size);
1090
1091 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1092 zfs_get_done, zgd);
61ca48ff 1093 ASSERT(error || lr->lr_length <= size);
428870ff
BB
1094
1095 /*
1096 * On success, we need to wait for the write I/O
1097 * initiated by dmu_sync() to complete before we can
1098 * release this dbuf. We will finish everything up
1099 * in the zfs_get_done() callback.
1100 */
1101 if (error == 0)
1102 return (0);
1103
1104 if (error == EALREADY) {
1105 lr->lr_common.lrc_txtype = TX_WRITE2;
1106 error = 0;
1107 }
1108 }
34dc7c2f 1109 }
428870ff
BB
1110
1111 zfs_get_done(zgd, error);
1112
34dc7c2f
BB
1113 return (error);
1114}
1115
1116/*ARGSUSED*/
3558fd73
BB
1117int
1118zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
34dc7c2f 1119{
3558fd73 1120 znode_t *zp = ITOZ(ip);
0037b49e 1121 zfsvfs_t *zfsvfs = ITOZSB(ip);
34dc7c2f
BB
1122 int error;
1123
0037b49e 1124 ZFS_ENTER(zfsvfs);
34dc7c2f
BB
1125 ZFS_VERIFY_ZP(zp);
1126
1127 if (flag & V_ACE_MASK)
1128 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1129 else
1130 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1131
0037b49e 1132 ZFS_EXIT(zfsvfs);
45d1cae3
BB
1133 return (error);
1134}
45d1cae3 1135
34dc7c2f
BB
1136/*
1137 * Lookup an entry in a directory, or an extended attribute directory.
3558fd73 1138 * If it exists, return a held inode reference for it.
34dc7c2f 1139 *
3558fd73 1140 * IN: dip - inode of directory to search.
34dc7c2f 1141 * nm - name of entry to lookup.
34dc7c2f 1142 * flags - LOOKUP_XATTR set if looking for an attribute.
34dc7c2f 1143 * cr - credentials of caller.
34dc7c2f
BB
1144 * direntflags - directory lookup flags
1145 * realpnp - returned pathname.
1146 *
3558fd73 1147 * OUT: ipp - inode of located entry, NULL if not found.
34dc7c2f 1148 *
d3cc8b15 1149 * RETURN: 0 on success, error code on failure.
34dc7c2f
BB
1150 *
1151 * Timestamps:
1152 * NA
1153 */
1154/* ARGSUSED */
e5c39b95 1155int
3558fd73
BB
1156zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
1157 cred_t *cr, int *direntflags, pathname_t *realpnp)
34dc7c2f 1158{
3558fd73 1159 znode_t *zdp = ITOZ(dip);
0037b49e 1160 zfsvfs_t *zfsvfs = ITOZSB(dip);
3558fd73 1161 int error = 0;
45d1cae3 1162
9b7b9cd3
GM
1163 /*
1164 * Fast path lookup, however we must skip DNLC lookup
1165 * for case folding or normalizing lookups because the
1166 * DNLC code only stores the passed in name. This means
1167 * creating 'a' and removing 'A' on a case insensitive
1168 * file system would work, but DNLC still thinks 'a'
1169 * exists and won't let you create it again on the next
1170 * pass through fast path.
1171 */
45d1cae3
BB
1172 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1173
3558fd73 1174 if (!S_ISDIR(dip->i_mode)) {
2e528b49 1175 return (SET_ERROR(ENOTDIR));
428870ff 1176 } else if (zdp->z_sa_hdl == NULL) {
2e528b49 1177 return (SET_ERROR(EIO));
45d1cae3
BB
1178 }
1179
1180 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1181 error = zfs_fastaccesschk_execute(zdp, cr);
1182 if (!error) {
3558fd73
BB
1183 *ipp = dip;
1184 igrab(*ipp);
45d1cae3
BB
1185 return (0);
1186 }
1187 return (error);
3558fd73 1188#ifdef HAVE_DNLC
9b7b9cd3
GM
1189 } else if (!zdp->z_zfsvfs->z_norm &&
1190 (zdp->z_zfsvfs->z_case == ZFS_CASE_SENSITIVE)) {
1191
45d1cae3
BB
1192 vnode_t *tvp = dnlc_lookup(dvp, nm);
1193
1194 if (tvp) {
1195 error = zfs_fastaccesschk_execute(zdp, cr);
1196 if (error) {
3558fd73 1197 iput(tvp);
45d1cae3
BB
1198 return (error);
1199 }
1200 if (tvp == DNLC_NO_VNODE) {
3558fd73 1201 iput(tvp);
2e528b49 1202 return (SET_ERROR(ENOENT));
45d1cae3
BB
1203 } else {
1204 *vpp = tvp;
1205 return (specvp_check(vpp, cr));
1206 }
1207 }
3558fd73 1208#endif /* HAVE_DNLC */
45d1cae3
BB
1209 }
1210 }
1211
0037b49e 1212 ZFS_ENTER(zfsvfs);
34dc7c2f
BB
1213 ZFS_VERIFY_ZP(zdp);
1214
3558fd73 1215 *ipp = NULL;
34dc7c2f
BB
1216
1217 if (flags & LOOKUP_XATTR) {
34dc7c2f
BB
1218 /*
1219 * We don't allow recursive attributes..
1220 * Maybe someday we will.
1221 */
428870ff 1222 if (zdp->z_pflags & ZFS_XATTR) {
0037b49e 1223 ZFS_EXIT(zfsvfs);
2e528b49 1224 return (SET_ERROR(EINVAL));
34dc7c2f
BB
1225 }
1226
3558fd73 1227 if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
0037b49e 1228 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1229 return (error);
1230 }
1231
1232 /*
1233 * Do we have permission to get into attribute directory?
1234 */
1235
3558fd73 1236 if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0,
149e873a 1237 B_FALSE, cr))) {
3558fd73
BB
1238 iput(*ipp);
1239 *ipp = NULL;
34dc7c2f
BB
1240 }
1241
0037b49e 1242 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1243 return (error);
1244 }
1245
3558fd73 1246 if (!S_ISDIR(dip->i_mode)) {
0037b49e 1247 ZFS_EXIT(zfsvfs);
2e528b49 1248 return (SET_ERROR(ENOTDIR));
34dc7c2f
BB
1249 }
1250
1251 /*
1252 * Check accessibility of directory.
1253 */
1254
149e873a 1255 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
0037b49e 1256 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1257 return (error);
1258 }
1259
0037b49e 1260 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
34dc7c2f 1261 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
0037b49e 1262 ZFS_EXIT(zfsvfs);
2e528b49 1263 return (SET_ERROR(EILSEQ));
34dc7c2f
BB
1264 }
1265
3558fd73
BB
1266 error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
1267 if ((error == 0) && (*ipp))
1268 zfs_inode_update(ITOZ(*ipp));
34dc7c2f 1269
0037b49e 1270 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1271 return (error);
1272}
1273
1274/*
1275 * Attempt to create a new entry in a directory. If the entry
1276 * already exists, truncate the file if permissible, else return
3558fd73 1277 * an error. Return the ip of the created or trunc'd file.
34dc7c2f 1278 *
3558fd73 1279 * IN: dip - inode of directory to put new file entry in.
34dc7c2f
BB
1280 * name - name of new file entry.
1281 * vap - attributes of new file.
1282 * excl - flag indicating exclusive or non-exclusive mode.
1283 * mode - mode to open file with.
1284 * cr - credentials of caller.
1285 * flag - large file flag [UNUSED].
3558fd73 1286 * vsecp - ACL to be set
34dc7c2f 1287 *
3558fd73 1288 * OUT: ipp - inode of created or trunc'd entry.
34dc7c2f 1289 *
d3cc8b15 1290 * RETURN: 0 on success, error code on failure.
34dc7c2f
BB
1291 *
1292 * Timestamps:
3558fd73
BB
1293 * dip - ctime|mtime updated if new entry created
1294 * ip - ctime|mtime always, atime if new
34dc7c2f
BB
1295 */
1296
1297/* ARGSUSED */
e5c39b95 1298int
3558fd73
BB
1299zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
1300 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
34dc7c2f 1301{
3558fd73 1302 znode_t *zp, *dzp = ITOZ(dip);
0037b49e 1303 zfsvfs_t *zfsvfs = ITOZSB(dip);
34dc7c2f
BB
1304 zilog_t *zilog;
1305 objset_t *os;
1306 zfs_dirlock_t *dl;
1307 dmu_tx_t *tx;
1308 int error;
b128c09f 1309 uid_t uid;
149e873a 1310 gid_t gid;
428870ff 1311 zfs_acl_ids_t acl_ids;
9babb374 1312 boolean_t fuid_dirtied;
428870ff 1313 boolean_t have_acl = B_FALSE;
e8b96c60 1314 boolean_t waited = B_FALSE;
34dc7c2f
BB
1315
1316 /*
1317 * If we have an ephemeral id, ACL, or XVATTR then
1318 * make sure file system is at proper version
1319 */
1320
149e873a 1321 gid = crgetgid(cr);
3558fd73 1322 uid = crgetuid(cr);
b128c09f 1323
0037b49e 1324 if (zfsvfs->z_use_fuids == B_FALSE &&
3558fd73 1325 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
2e528b49 1326 return (SET_ERROR(EINVAL));
34dc7c2f 1327
32dec7bd 1328 if (name == NULL)
1329 return (SET_ERROR(EINVAL));
1330
0037b49e 1331 ZFS_ENTER(zfsvfs);
34dc7c2f 1332 ZFS_VERIFY_ZP(dzp);
0037b49e
BB
1333 os = zfsvfs->z_os;
1334 zilog = zfsvfs->z_log;
34dc7c2f 1335
0037b49e 1336 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
34dc7c2f 1337 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
0037b49e 1338 ZFS_EXIT(zfsvfs);
2e528b49 1339 return (SET_ERROR(EILSEQ));
34dc7c2f
BB
1340 }
1341
5484965a 1342 if (vap->va_mask & ATTR_XVATTR) {
34dc7c2f 1343 if ((error = secpolicy_xvattr((xvattr_t *)vap,
3558fd73 1344 crgetuid(cr), cr, vap->va_mode)) != 0) {
0037b49e 1345 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1346 return (error);
1347 }
1348 }
34dc7c2f 1349
3558fd73
BB
1350top:
1351 *ipp = NULL;
34dc7c2f
BB
1352 if (*name == '\0') {
1353 /*
1354 * Null component name refers to the directory itself.
1355 */
3558fd73 1356 igrab(dip);
34dc7c2f
BB
1357 zp = dzp;
1358 dl = NULL;
1359 error = 0;
1360 } else {
3558fd73 1361 /* possible igrab(zp) */
34dc7c2f
BB
1362 int zflg = 0;
1363
1364 if (flag & FIGNORECASE)
1365 zflg |= ZCILOOK;
1366
1367 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1368 NULL, NULL);
1369 if (error) {
572e2857
BB
1370 if (have_acl)
1371 zfs_acl_ids_free(&acl_ids);
34dc7c2f 1372 if (strcmp(name, "..") == 0)
2e528b49 1373 error = SET_ERROR(EISDIR);
0037b49e 1374 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1375 return (error);
1376 }
1377 }
428870ff 1378
34dc7c2f
BB
1379 if (zp == NULL) {
1380 uint64_t txtype;
1381
1382 /*
1383 * Create a new file object and update the directory
1384 * to reference it.
1385 */
149e873a 1386 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
572e2857
BB
1387 if (have_acl)
1388 zfs_acl_ids_free(&acl_ids);
34dc7c2f
BB
1389 goto out;
1390 }
1391
1392 /*
1393 * We only support the creation of regular files in
1394 * extended attribute directories.
1395 */
428870ff 1396
3558fd73 1397 if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
572e2857
BB
1398 if (have_acl)
1399 zfs_acl_ids_free(&acl_ids);
2e528b49 1400 error = SET_ERROR(EINVAL);
34dc7c2f
BB
1401 goto out;
1402 }
1403
428870ff
BB
1404 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1405 cr, vsecp, &acl_ids)) != 0)
9babb374 1406 goto out;
428870ff
BB
1407 have_acl = B_TRUE;
1408
0037b49e 1409 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
45d1cae3 1410 zfs_acl_ids_free(&acl_ids);
2e528b49 1411 error = SET_ERROR(EDQUOT);
9babb374
BB
1412 goto out;
1413 }
1414
34dc7c2f 1415 tx = dmu_tx_create(os);
428870ff
BB
1416
1417 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1418 ZFS_SA_BASE_ATTR_SIZE);
1419
0037b49e 1420 fuid_dirtied = zfsvfs->z_fuid_dirty;
9babb374 1421 if (fuid_dirtied)
0037b49e 1422 zfs_fuid_txhold(zfsvfs, tx);
34dc7c2f 1423 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
428870ff 1424 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
0037b49e 1425 if (!zfsvfs->z_use_sa &&
428870ff 1426 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
34dc7c2f 1427 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
428870ff 1428 0, acl_ids.z_aclp->z_acl_bytes);
34dc7c2f 1429 }
ef7a7948
PS
1430 error = dmu_tx_assign(tx,
1431 (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
34dc7c2f
BB
1432 if (error) {
1433 zfs_dirent_unlock(dl);
fb5f0bc8 1434 if (error == ERESTART) {
e8b96c60 1435 waited = B_TRUE;
34dc7c2f
BB
1436 dmu_tx_wait(tx);
1437 dmu_tx_abort(tx);
1438 goto top;
1439 }
428870ff 1440 zfs_acl_ids_free(&acl_ids);
34dc7c2f 1441 dmu_tx_abort(tx);
0037b49e 1442 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1443 return (error);
1444 }
428870ff 1445 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
9babb374
BB
1446
1447 if (fuid_dirtied)
0037b49e 1448 zfs_fuid_sync(zfsvfs, tx);
9babb374 1449
9a2e90c9 1450 (void) zfs_link_create(dl, zp, tx, ZNEW);
34dc7c2f
BB
1451 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1452 if (flag & FIGNORECASE)
1453 txtype |= TX_CI;
1454 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
9babb374
BB
1455 vsecp, acl_ids.z_fuidp, vap);
1456 zfs_acl_ids_free(&acl_ids);
34dc7c2f
BB
1457 dmu_tx_commit(tx);
1458 } else {
1459 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1460
572e2857
BB
1461 if (have_acl)
1462 zfs_acl_ids_free(&acl_ids);
1463 have_acl = B_FALSE;
1464
34dc7c2f
BB
1465 /*
1466 * A directory entry already exists for this name.
1467 */
1468 /*
1469 * Can't truncate an existing file if in exclusive mode.
1470 */
3558fd73 1471 if (excl) {
2e528b49 1472 error = SET_ERROR(EEXIST);
34dc7c2f
BB
1473 goto out;
1474 }
1475 /*
1476 * Can't open a directory for writing.
1477 */
3558fd73 1478 if (S_ISDIR(ZTOI(zp)->i_mode)) {
2e528b49 1479 error = SET_ERROR(EISDIR);
34dc7c2f
BB
1480 goto out;
1481 }
1482 /*
1483 * Verify requested access to file.
1484 */
1485 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1486 goto out;
1487 }
1488
1489 mutex_enter(&dzp->z_lock);
1490 dzp->z_seq++;
1491 mutex_exit(&dzp->z_lock);
1492
1493 /*
1494 * Truncate regular files if requested.
1495 */
3558fd73
BB
1496 if (S_ISREG(ZTOI(zp)->i_mode) &&
1497 (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
b128c09f 1498 /* we can't hold any locks when calling zfs_freesp() */
609603a5
B
1499 if (dl) {
1500 zfs_dirent_unlock(dl);
1501 dl = NULL;
1502 }
34dc7c2f 1503 error = zfs_freesp(zp, 0, 0, mode, TRUE);
34dc7c2f
BB
1504 }
1505 }
1506out:
1507
1508 if (dl)
1509 zfs_dirent_unlock(dl);
1510
1511 if (error) {
1512 if (zp)
3558fd73 1513 iput(ZTOI(zp));
34dc7c2f 1514 } else {
960e08fe
BB
1515 zfs_inode_update(dzp);
1516 zfs_inode_update(zp);
3558fd73 1517 *ipp = ZTOI(zp);
34dc7c2f 1518 }
34dc7c2f 1519
0037b49e 1520 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
572e2857 1521 zil_commit(zilog, 0);
428870ff 1522
0037b49e 1523 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1524 return (error);
1525}
1526
ace1eae8
CC
1527/* ARGSUSED */
1528int
1529zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
1530 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1531{
1532 znode_t *zp = NULL, *dzp = ITOZ(dip);
0037b49e 1533 zfsvfs_t *zfsvfs = ITOZSB(dip);
ace1eae8
CC
1534 objset_t *os;
1535 dmu_tx_t *tx;
1536 int error;
1537 uid_t uid;
1538 gid_t gid;
1539 zfs_acl_ids_t acl_ids;
1540 boolean_t fuid_dirtied;
1541 boolean_t have_acl = B_FALSE;
1542 boolean_t waited = B_FALSE;
1543
1544 /*
1545 * If we have an ephemeral id, ACL, or XVATTR then
1546 * make sure file system is at proper version
1547 */
1548
1549 gid = crgetgid(cr);
1550 uid = crgetuid(cr);
1551
0037b49e 1552 if (zfsvfs->z_use_fuids == B_FALSE &&
ace1eae8
CC
1553 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1554 return (SET_ERROR(EINVAL));
1555
0037b49e 1556 ZFS_ENTER(zfsvfs);
ace1eae8 1557 ZFS_VERIFY_ZP(dzp);
0037b49e 1558 os = zfsvfs->z_os;
ace1eae8
CC
1559
1560 if (vap->va_mask & ATTR_XVATTR) {
1561 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1562 crgetuid(cr), cr, vap->va_mode)) != 0) {
0037b49e 1563 ZFS_EXIT(zfsvfs);
ace1eae8
CC
1564 return (error);
1565 }
1566 }
1567
1568top:
1569 *ipp = NULL;
1570
1571 /*
1572 * Create a new file object and update the directory
1573 * to reference it.
1574 */
1575 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1576 if (have_acl)
1577 zfs_acl_ids_free(&acl_ids);
1578 goto out;
1579 }
1580
1581 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1582 cr, vsecp, &acl_ids)) != 0)
1583 goto out;
1584 have_acl = B_TRUE;
1585
0037b49e 1586 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
ace1eae8
CC
1587 zfs_acl_ids_free(&acl_ids);
1588 error = SET_ERROR(EDQUOT);
1589 goto out;
1590 }
1591
1592 tx = dmu_tx_create(os);
1593
1594 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1595 ZFS_SA_BASE_ATTR_SIZE);
0037b49e 1596 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
ace1eae8 1597
0037b49e 1598 fuid_dirtied = zfsvfs->z_fuid_dirty;
ace1eae8 1599 if (fuid_dirtied)
0037b49e
BB
1600 zfs_fuid_txhold(zfsvfs, tx);
1601 if (!zfsvfs->z_use_sa &&
ace1eae8
CC
1602 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1603 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1604 0, acl_ids.z_aclp->z_acl_bytes);
1605 }
ef7a7948 1606 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
ace1eae8
CC
1607 if (error) {
1608 if (error == ERESTART) {
1609 waited = B_TRUE;
1610 dmu_tx_wait(tx);
1611 dmu_tx_abort(tx);
1612 goto top;
1613 }
1614 zfs_acl_ids_free(&acl_ids);
1615 dmu_tx_abort(tx);
0037b49e 1616 ZFS_EXIT(zfsvfs);
ace1eae8
CC
1617 return (error);
1618 }
1619 zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids);
1620
1621 if (fuid_dirtied)
0037b49e 1622 zfs_fuid_sync(zfsvfs, tx);
ace1eae8
CC
1623
1624 /* Add to unlinked set */
1625 zp->z_unlinked = 1;
1626 zfs_unlinked_add(zp, tx);
1627 zfs_acl_ids_free(&acl_ids);
1628 dmu_tx_commit(tx);
1629out:
1630
1631 if (error) {
1632 if (zp)
1633 iput(ZTOI(zp));
1634 } else {
1635 zfs_inode_update(dzp);
1636 zfs_inode_update(zp);
1637 *ipp = ZTOI(zp);
1638 }
1639
0037b49e 1640 ZFS_EXIT(zfsvfs);
ace1eae8
CC
1641 return (error);
1642}
1643
34dc7c2f
BB
1644/*
1645 * Remove an entry from a directory.
1646 *
3558fd73 1647 * IN: dip - inode of directory to remove entry from.
34dc7c2f
BB
1648 * name - name of entry to remove.
1649 * cr - credentials of caller.
34dc7c2f
BB
1650 *
1651 * RETURN: 0 if success
1652 * error code if failure
1653 *
1654 * Timestamps:
3558fd73
BB
1655 * dip - ctime|mtime
1656 * ip - ctime (if nlink > 0)
34dc7c2f 1657 */
428870ff
BB
1658
1659uint64_t null_xattr = 0;
1660
34dc7c2f 1661/*ARGSUSED*/
e5c39b95 1662int
da5e151f 1663zfs_remove(struct inode *dip, char *name, cred_t *cr, int flags)
34dc7c2f 1664{
3558fd73 1665 znode_t *zp, *dzp = ITOZ(dip);
572e2857 1666 znode_t *xzp;
3558fd73 1667 struct inode *ip;
0037b49e 1668 zfsvfs_t *zfsvfs = ITOZSB(dip);
34dc7c2f 1669 zilog_t *zilog;
a966c564 1670 uint64_t acl_obj, xattr_obj;
3558fd73 1671 uint64_t xattr_obj_unlinked = 0;
572e2857 1672 uint64_t obj = 0;
dfbc8630 1673 uint64_t links;
34dc7c2f
BB
1674 zfs_dirlock_t *dl;
1675 dmu_tx_t *tx;
a966c564
K
1676 boolean_t may_delete_now, delete_now = FALSE;
1677 boolean_t unlinked, toobig = FALSE;
34dc7c2f
BB
1678 uint64_t txtype;
1679 pathname_t *realnmp = NULL;
1680 pathname_t realnm;
1681 int error;
1682 int zflg = ZEXISTS;
e8b96c60 1683 boolean_t waited = B_FALSE;
34dc7c2f 1684
32dec7bd 1685 if (name == NULL)
1686 return (SET_ERROR(EINVAL));
1687
0037b49e 1688 ZFS_ENTER(zfsvfs);
34dc7c2f 1689 ZFS_VERIFY_ZP(dzp);
0037b49e 1690 zilog = zfsvfs->z_log;
34dc7c2f
BB
1691
1692 if (flags & FIGNORECASE) {
1693 zflg |= ZCILOOK;
1694 pn_alloc(&realnm);
1695 realnmp = &realnm;
1696 }
1697
1698top:
572e2857
BB
1699 xattr_obj = 0;
1700 xzp = NULL;
34dc7c2f
BB
1701 /*
1702 * Attempt to lock directory; fail if entry doesn't exist.
1703 */
149e873a
BB
1704 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1705 NULL, realnmp))) {
34dc7c2f
BB
1706 if (realnmp)
1707 pn_free(realnmp);
0037b49e 1708 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1709 return (error);
1710 }
1711
3558fd73 1712 ip = ZTOI(zp);
34dc7c2f 1713
149e873a 1714 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
34dc7c2f
BB
1715 goto out;
1716 }
1717
1718 /*
1719 * Need to use rmdir for removing directories.
1720 */
3558fd73 1721 if (S_ISDIR(ip->i_mode)) {
2e528b49 1722 error = SET_ERROR(EPERM);
34dc7c2f
BB
1723 goto out;
1724 }
1725
3558fd73 1726#ifdef HAVE_DNLC
34dc7c2f
BB
1727 if (realnmp)
1728 dnlc_remove(dvp, realnmp->pn_buf);
1729 else
1730 dnlc_remove(dvp, name);
3558fd73 1731#endif /* HAVE_DNLC */
34dc7c2f 1732
19d55079
MA
1733 mutex_enter(&zp->z_lock);
1734 may_delete_now = atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped);
1735 mutex_exit(&zp->z_lock);
1736
34dc7c2f 1737 /*
a966c564
K
1738 * We may delete the znode now, or we may put it in the unlinked set;
1739 * it depends on whether we're the last link, and on whether there are
1740 * other holds on the inode. So we dmu_tx_hold() the right things to
1741 * allow for either case.
34dc7c2f 1742 */
572e2857 1743 obj = zp->z_id;
0037b49e 1744 tx = dmu_tx_create(zfsvfs->z_os);
34dc7c2f 1745 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
428870ff
BB
1746 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1747 zfs_sa_upgrade_txholds(tx, zp);
1748 zfs_sa_upgrade_txholds(tx, dzp);
a966c564
K
1749 if (may_delete_now) {
1750 toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks;
1751 /* if the file is too big, only hold_free a token amount */
1752 dmu_tx_hold_free(tx, zp->z_id, 0,
1753 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1754 }
34dc7c2f
BB
1755
1756 /* are there any extended attributes? */
0037b49e 1757 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
428870ff 1758 &xattr_obj, sizeof (xattr_obj));
572e2857 1759 if (error == 0 && xattr_obj) {
0037b49e 1760 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
c99c9001 1761 ASSERT0(error);
428870ff
BB
1762 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1763 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
34dc7c2f
BB
1764 }
1765
a966c564
K
1766 mutex_enter(&zp->z_lock);
1767 if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1768 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1769 mutex_exit(&zp->z_lock);
1770
34dc7c2f 1771 /* charge as an update -- would be nice not to charge at all */
0037b49e 1772 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
34dc7c2f 1773
19d55079 1774 /*
1a04bab3 1775 * Mark this transaction as typically resulting in a net free of space
19d55079 1776 */
1a04bab3 1777 dmu_tx_mark_netfree(tx);
19d55079 1778
ef7a7948 1779 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
34dc7c2f
BB
1780 if (error) {
1781 zfs_dirent_unlock(dl);
fb5f0bc8 1782 if (error == ERESTART) {
e8b96c60 1783 waited = B_TRUE;
34dc7c2f
BB
1784 dmu_tx_wait(tx);
1785 dmu_tx_abort(tx);
ea7e86d8
BB
1786 iput(ip);
1787 if (xzp)
1788 iput(ZTOI(xzp));
34dc7c2f
BB
1789 goto top;
1790 }
1791 if (realnmp)
1792 pn_free(realnmp);
1793 dmu_tx_abort(tx);
ea7e86d8
BB
1794 iput(ip);
1795 if (xzp)
1796 iput(ZTOI(xzp));
0037b49e 1797 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1798 return (error);
1799 }
1800
1801 /*
1802 * Remove the directory entry.
1803 */
1804 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1805
1806 if (error) {
1807 dmu_tx_commit(tx);
1808 goto out;
1809 }
1810
1811 if (unlinked) {
572e2857
BB
1812 /*
1813 * Hold z_lock so that we can make sure that the ACL obj
1814 * hasn't changed. Could have been deleted due to
1815 * zfs_sa_upgrade().
1816 */
1817 mutex_enter(&zp->z_lock);
0037b49e 1818 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
428870ff 1819 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
a966c564
K
1820 delete_now = may_delete_now && !toobig &&
1821 atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped) &&
1822 xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
1823 acl_obj;
1824 }
1825
1826 if (delete_now) {
1827 if (xattr_obj_unlinked) {
dfbc8630 1828 ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2);
a966c564
K
1829 mutex_enter(&xzp->z_lock);
1830 xzp->z_unlinked = 1;
dfbc8630
CD
1831 clear_nlink(ZTOI(xzp));
1832 links = 0;
0037b49e 1833 error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
dfbc8630 1834 &links, sizeof (links), tx);
a966c564
K
1835 ASSERT3U(error, ==, 0);
1836 mutex_exit(&xzp->z_lock);
1837 zfs_unlinked_add(xzp, tx);
1838
1839 if (zp->z_is_sa)
1840 error = sa_remove(zp->z_sa_hdl,
0037b49e 1841 SA_ZPL_XATTR(zfsvfs), tx);
a966c564
K
1842 else
1843 error = sa_update(zp->z_sa_hdl,
0037b49e 1844 SA_ZPL_XATTR(zfsvfs), &null_xattr,
a966c564
K
1845 sizeof (uint64_t), tx);
1846 ASSERT0(error);
1847 }
1848 /*
1849 * Add to the unlinked set because a new reference could be
1850 * taken concurrently resulting in a deferred destruction.
1851 */
1852 zfs_unlinked_add(zp, tx);
1853 mutex_exit(&zp->z_lock);
a966c564 1854 } else if (unlinked) {
572e2857 1855 mutex_exit(&zp->z_lock);
34dc7c2f
BB
1856 zfs_unlinked_add(zp, tx);
1857 }
1858
1859 txtype = TX_REMOVE;
1860 if (flags & FIGNORECASE)
1861 txtype |= TX_CI;
572e2857 1862 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
34dc7c2f
BB
1863
1864 dmu_tx_commit(tx);
1865out:
1866 if (realnmp)
1867 pn_free(realnmp);
1868
1869 zfs_dirent_unlock(dl);
960e08fe 1870 zfs_inode_update(dzp);
ea7e86d8 1871 zfs_inode_update(zp);
34dc7c2f 1872
ea7e86d8
BB
1873 if (delete_now)
1874 iput(ip);
1875 else
a966c564 1876 zfs_iput_async(ip);
a966c564
K
1877
1878 if (xzp) {
1879 zfs_inode_update(xzp);
1880 zfs_iput_async(ZTOI(xzp));
1881 }
428870ff 1882
0037b49e 1883 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
572e2857 1884 zil_commit(zilog, 0);
34dc7c2f 1885
0037b49e 1886 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1887 return (error);
1888}
1889
1890/*
3558fd73 1891 * Create a new directory and insert it into dip using the name
34dc7c2f
BB
1892 * provided. Return a pointer to the inserted directory.
1893 *
3558fd73 1894 * IN: dip - inode of directory to add subdir to.
34dc7c2f
BB
1895 * dirname - name of new directory.
1896 * vap - attributes of new directory.
1897 * cr - credentials of caller.
34dc7c2f
BB
1898 * vsecp - ACL to be set
1899 *
3558fd73 1900 * OUT: ipp - inode of created directory.
34dc7c2f
BB
1901 *
1902 * RETURN: 0 if success
1903 * error code if failure
1904 *
1905 * Timestamps:
3558fd73
BB
1906 * dip - ctime|mtime updated
1907 * ipp - ctime|mtime|atime updated
34dc7c2f
BB
1908 */
1909/*ARGSUSED*/
e5c39b95 1910int
3558fd73
BB
1911zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
1912 cred_t *cr, int flags, vsecattr_t *vsecp)
34dc7c2f 1913{
3558fd73 1914 znode_t *zp, *dzp = ITOZ(dip);
0037b49e 1915 zfsvfs_t *zfsvfs = ITOZSB(dip);
34dc7c2f
BB
1916 zilog_t *zilog;
1917 zfs_dirlock_t *dl;
1918 uint64_t txtype;
1919 dmu_tx_t *tx;
1920 int error;
34dc7c2f 1921 int zf = ZNEW;
b128c09f
BB
1922 uid_t uid;
1923 gid_t gid = crgetgid(cr);
428870ff 1924 zfs_acl_ids_t acl_ids;
9babb374 1925 boolean_t fuid_dirtied;
e8b96c60 1926 boolean_t waited = B_FALSE;
34dc7c2f 1927
3558fd73 1928 ASSERT(S_ISDIR(vap->va_mode));
34dc7c2f
BB
1929
1930 /*
1931 * If we have an ephemeral id, ACL, or XVATTR then
1932 * make sure file system is at proper version
1933 */
1934
3558fd73 1935 uid = crgetuid(cr);
0037b49e 1936 if (zfsvfs->z_use_fuids == B_FALSE &&
3558fd73 1937 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
2e528b49 1938 return (SET_ERROR(EINVAL));
34dc7c2f 1939
32dec7bd 1940 if (dirname == NULL)
1941 return (SET_ERROR(EINVAL));
1942
0037b49e 1943 ZFS_ENTER(zfsvfs);
34dc7c2f 1944 ZFS_VERIFY_ZP(dzp);
0037b49e 1945 zilog = zfsvfs->z_log;
34dc7c2f 1946
428870ff 1947 if (dzp->z_pflags & ZFS_XATTR) {
0037b49e 1948 ZFS_EXIT(zfsvfs);
2e528b49 1949 return (SET_ERROR(EINVAL));
34dc7c2f
BB
1950 }
1951
0037b49e 1952 if (zfsvfs->z_utf8 && u8_validate(dirname,
34dc7c2f 1953 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
0037b49e 1954 ZFS_EXIT(zfsvfs);
2e528b49 1955 return (SET_ERROR(EILSEQ));
34dc7c2f
BB
1956 }
1957 if (flags & FIGNORECASE)
1958 zf |= ZCILOOK;
1959
5484965a 1960 if (vap->va_mask & ATTR_XVATTR) {
34dc7c2f 1961 if ((error = secpolicy_xvattr((xvattr_t *)vap,
3558fd73 1962 crgetuid(cr), cr, vap->va_mode)) != 0) {
0037b49e 1963 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1964 return (error);
1965 }
428870ff 1966 }
34dc7c2f 1967
428870ff
BB
1968 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1969 vsecp, &acl_ids)) != 0) {
0037b49e 1970 ZFS_EXIT(zfsvfs);
428870ff
BB
1971 return (error);
1972 }
34dc7c2f
BB
1973 /*
1974 * First make sure the new directory doesn't exist.
428870ff
BB
1975 *
1976 * Existence is checked first to make sure we don't return
1977 * EACCES instead of EEXIST which can cause some applications
1978 * to fail.
34dc7c2f
BB
1979 */
1980top:
3558fd73 1981 *ipp = NULL;
34dc7c2f 1982
149e873a
BB
1983 if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1984 NULL, NULL))) {
428870ff 1985 zfs_acl_ids_free(&acl_ids);
0037b49e 1986 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1987 return (error);
1988 }
1989
149e873a 1990 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
428870ff 1991 zfs_acl_ids_free(&acl_ids);
34dc7c2f 1992 zfs_dirent_unlock(dl);
0037b49e 1993 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
1994 return (error);
1995 }
1996
0037b49e 1997 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
45d1cae3 1998 zfs_acl_ids_free(&acl_ids);
9babb374 1999 zfs_dirent_unlock(dl);
0037b49e 2000 ZFS_EXIT(zfsvfs);
2e528b49 2001 return (SET_ERROR(EDQUOT));
9babb374
BB
2002 }
2003
34dc7c2f
BB
2004 /*
2005 * Add a new entry to the directory.
2006 */
0037b49e 2007 tx = dmu_tx_create(zfsvfs->z_os);
34dc7c2f
BB
2008 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
2009 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
0037b49e 2010 fuid_dirtied = zfsvfs->z_fuid_dirty;
9babb374 2011 if (fuid_dirtied)
0037b49e
BB
2012 zfs_fuid_txhold(zfsvfs, tx);
2013 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
428870ff
BB
2014 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
2015 acl_ids.z_aclp->z_acl_bytes);
2016 }
2017
2018 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
2019 ZFS_SA_BASE_ATTR_SIZE);
2020
ef7a7948 2021 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
34dc7c2f
BB
2022 if (error) {
2023 zfs_dirent_unlock(dl);
fb5f0bc8 2024 if (error == ERESTART) {
e8b96c60 2025 waited = B_TRUE;
34dc7c2f
BB
2026 dmu_tx_wait(tx);
2027 dmu_tx_abort(tx);
2028 goto top;
2029 }
428870ff 2030 zfs_acl_ids_free(&acl_ids);
34dc7c2f 2031 dmu_tx_abort(tx);
0037b49e 2032 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
2033 return (error);
2034 }
2035
2036 /*
2037 * Create new node.
2038 */
428870ff 2039 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
34dc7c2f 2040
9a2e90c9
TH
2041 if (fuid_dirtied)
2042 zfs_fuid_sync(zfsvfs, tx);
2043
34dc7c2f
BB
2044 /*
2045 * Now put new name in parent dir.
2046 */
9a2e90c9 2047 (void) zfs_link_create(dl, zp, tx, ZNEW);
34dc7c2f 2048
3558fd73 2049 *ipp = ZTOI(zp);
34dc7c2f
BB
2050
2051 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
2052 if (flags & FIGNORECASE)
2053 txtype |= TX_CI;
9babb374
BB
2054 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
2055 acl_ids.z_fuidp, vap);
34dc7c2f 2056
9babb374 2057 zfs_acl_ids_free(&acl_ids);
428870ff 2058
34dc7c2f
BB
2059 dmu_tx_commit(tx);
2060
2061 zfs_dirent_unlock(dl);
2062
0037b49e 2063 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
572e2857 2064 zil_commit(zilog, 0);
428870ff 2065
9a2e90c9
TH
2066 zfs_inode_update(dzp);
2067 zfs_inode_update(zp);
0037b49e 2068 ZFS_EXIT(zfsvfs);
9a2e90c9 2069 return (0);
34dc7c2f
BB
2070}
2071
2072/*
2073 * Remove a directory subdir entry. If the current working
2074 * directory is the same as the subdir to be removed, the
2075 * remove will fail.
2076 *
3558fd73 2077 * IN: dip - inode of directory to remove from.
34dc7c2f 2078 * name - name of directory to be removed.
3558fd73 2079 * cwd - inode of current working directory.
34dc7c2f 2080 * cr - credentials of caller.
34dc7c2f
BB
2081 * flags - case flags
2082 *
d3cc8b15 2083 * RETURN: 0 on success, error code on failure.
34dc7c2f
BB
2084 *
2085 * Timestamps:
3558fd73 2086 * dip - ctime|mtime updated
34dc7c2f
BB
2087 */
2088/*ARGSUSED*/
e5c39b95 2089int
3558fd73
BB
2090zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr,
2091 int flags)
34dc7c2f 2092{
3558fd73 2093 znode_t *dzp = ITOZ(dip);
34dc7c2f 2094 znode_t *zp;
3558fd73 2095 struct inode *ip;
0037b49e 2096 zfsvfs_t *zfsvfs = ITOZSB(dip);
34dc7c2f
BB
2097 zilog_t *zilog;
2098 zfs_dirlock_t *dl;
2099 dmu_tx_t *tx;
2100 int error;
2101 int zflg = ZEXISTS;
e8b96c60 2102 boolean_t waited = B_FALSE;
34dc7c2f 2103
32dec7bd 2104 if (name == NULL)
2105 return (SET_ERROR(EINVAL));
2106
0037b49e 2107 ZFS_ENTER(zfsvfs);
34dc7c2f 2108 ZFS_VERIFY_ZP(dzp);
0037b49e 2109 zilog = zfsvfs->z_log;
34dc7c2f
BB
2110
2111 if (flags & FIGNORECASE)
2112 zflg |= ZCILOOK;
2113top:
2114 zp = NULL;
2115
2116 /*
2117 * Attempt to lock directory; fail if entry doesn't exist.
2118 */
149e873a
BB
2119 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
2120 NULL, NULL))) {
0037b49e 2121 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
2122 return (error);
2123 }
2124
3558fd73 2125 ip = ZTOI(zp);
34dc7c2f 2126
149e873a 2127 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
34dc7c2f
BB
2128 goto out;
2129 }
2130
3558fd73 2131 if (!S_ISDIR(ip->i_mode)) {
2e528b49 2132 error = SET_ERROR(ENOTDIR);
34dc7c2f
BB
2133 goto out;
2134 }
2135
3558fd73 2136 if (ip == cwd) {
2e528b49 2137 error = SET_ERROR(EINVAL);
34dc7c2f
BB
2138 goto out;
2139 }
2140
34dc7c2f 2141 /*
4e33ba4c 2142 * Grab a lock on the directory to make sure that no one is
34dc7c2f
BB
2143 * trying to add (or lookup) entries while we are removing it.
2144 */
2145 rw_enter(&zp->z_name_lock, RW_WRITER);
2146
2147 /*
2148 * Grab a lock on the parent pointer to make sure we play well
2149 * with the treewalk and directory rename code.
2150 */
2151 rw_enter(&zp->z_parent_lock, RW_WRITER);
2152
0037b49e 2153 tx = dmu_tx_create(zfsvfs->z_os);
34dc7c2f 2154 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
428870ff 2155 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
0037b49e 2156 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
428870ff
BB
2157 zfs_sa_upgrade_txholds(tx, zp);
2158 zfs_sa_upgrade_txholds(tx, dzp);
db707ad0 2159 dmu_tx_mark_netfree(tx);
ef7a7948 2160 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
34dc7c2f
BB
2161 if (error) {
2162 rw_exit(&zp->z_parent_lock);
2163 rw_exit(&zp->z_name_lock);
2164 zfs_dirent_unlock(dl);
fb5f0bc8 2165 if (error == ERESTART) {
e8b96c60 2166 waited = B_TRUE;
34dc7c2f
BB
2167 dmu_tx_wait(tx);
2168 dmu_tx_abort(tx);
ea7e86d8 2169 iput(ip);
34dc7c2f
BB
2170 goto top;
2171 }
2172 dmu_tx_abort(tx);
ea7e86d8 2173 iput(ip);
0037b49e 2174 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
2175 return (error);
2176 }
2177
2178 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
2179
2180 if (error == 0) {
2181 uint64_t txtype = TX_RMDIR;
2182 if (flags & FIGNORECASE)
2183 txtype |= TX_CI;
572e2857 2184 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
34dc7c2f
BB
2185 }
2186
2187 dmu_tx_commit(tx);
2188
2189 rw_exit(&zp->z_parent_lock);
2190 rw_exit(&zp->z_name_lock);
2191out:
2192 zfs_dirent_unlock(dl);
2193
59157910
BB
2194 zfs_inode_update(dzp);
2195 zfs_inode_update(zp);
3558fd73 2196 iput(ip);
34dc7c2f 2197
0037b49e 2198 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
572e2857 2199 zil_commit(zilog, 0);
428870ff 2200
0037b49e 2201 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
2202 return (error);
2203}
2204
2205/*
2206 * Read as many directory entries as will fit into the provided
3558fd73 2207 * dirent buffer from the given directory cursor position.
34dc7c2f 2208 *
3558fd73
BB
2209 * IN: ip - inode of directory to read.
2210 * dirent - buffer for directory entries.
34dc7c2f 2211 *
3558fd73 2212 * OUT: dirent - filler buffer of directory entries.
34dc7c2f
BB
2213 *
2214 * RETURN: 0 if success
2215 * error code if failure
2216 *
2217 * Timestamps:
3558fd73 2218 * ip - atime updated
34dc7c2f
BB
2219 *
2220 * Note that the low 4 bits of the cookie returned by zap is always zero.
2221 * This allows us to use the low range for "special" directory entries:
2222 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2223 * we use the offset 2 for the '.zfs' directory.
2224 */
2225/* ARGSUSED */
3558fd73 2226int
0ee12919 2227zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr)
34dc7c2f 2228{
3558fd73 2229 znode_t *zp = ITOZ(ip);
0037b49e 2230 zfsvfs_t *zfsvfs = ITOZSB(ip);
34dc7c2f 2231 objset_t *os;
34dc7c2f
BB
2232 zap_cursor_t zc;
2233 zap_attribute_t zap;
34dc7c2f
BB
2234 int error;
2235 uint8_t prefetch;
c12e3a59 2236 uint8_t type;
3558fd73
BB
2237 int done = 0;
2238 uint64_t parent;
c12e3a59 2239 uint64_t offset; /* must be unsigned; checks for < 1 */
34dc7c2f 2240
0037b49e 2241 ZFS_ENTER(zfsvfs);
34dc7c2f
BB
2242 ZFS_VERIFY_ZP(zp);
2243
0037b49e 2244 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
3558fd73
BB
2245 &parent, sizeof (parent))) != 0)
2246 goto out;
34dc7c2f
BB
2247
2248 /*
2249 * Quit if directory has been removed (posix)
2250 */
3558fd73
BB
2251 if (zp->z_unlinked)
2252 goto out;
2253
c12e3a59 2254 error = 0;
0037b49e 2255 os = zfsvfs->z_os;
c12e3a59 2256 offset = ctx->pos;
34dc7c2f
BB
2257 prefetch = zp->z_zn_prefetch;
2258
2259 /*
2260 * Initialize the iterator cursor.
2261 */
c12e3a59 2262 if (offset <= 3) {
34dc7c2f
BB
2263 /*
2264 * Start iteration from the beginning of the directory.
2265 */
2266 zap_cursor_init(&zc, os, zp->z_id);
2267 } else {
2268 /*
2269 * The offset is a serialized cursor.
2270 */
c12e3a59 2271 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
34dc7c2f
BB
2272 }
2273
34dc7c2f
BB
2274 /*
2275 * Transform to file-system independent format
2276 */
3558fd73
BB
2277 while (!done) {
2278 uint64_t objnum;
34dc7c2f
BB
2279 /*
2280 * Special case `.', `..', and `.zfs'.
2281 */
c12e3a59 2282 if (offset == 0) {
34dc7c2f
BB
2283 (void) strcpy(zap.za_name, ".");
2284 zap.za_normalization_conflict = 0;
2285 objnum = zp->z_id;
c12e3a59
RY
2286 type = DT_DIR;
2287 } else if (offset == 1) {
34dc7c2f
BB
2288 (void) strcpy(zap.za_name, "..");
2289 zap.za_normalization_conflict = 0;
428870ff 2290 objnum = parent;
c12e3a59
RY
2291 type = DT_DIR;
2292 } else if (offset == 2 && zfs_show_ctldir(zp)) {
34dc7c2f
BB
2293 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2294 zap.za_normalization_conflict = 0;
2295 objnum = ZFSCTL_INO_ROOT;
c12e3a59 2296 type = DT_DIR;
34dc7c2f
BB
2297 } else {
2298 /*
2299 * Grab next entry.
2300 */
3558fd73
BB
2301 if ((error = zap_cursor_retrieve(&zc, &zap))) {
2302 if (error == ENOENT)
34dc7c2f
BB
2303 break;
2304 else
2305 goto update;
2306 }
2307
0c5dde49
BB
2308 /*
2309 * Allow multiple entries provided the first entry is
2310 * the object id. Non-zpl consumers may safely make
2311 * use of the additional space.
2312 *
2313 * XXX: This should be a feature flag for compatibility
2314 */
34dc7c2f 2315 if (zap.za_integer_length != 8 ||
0c5dde49 2316 zap.za_num_integers == 0) {
34dc7c2f 2317 cmn_err(CE_WARN, "zap_readdir: bad directory "
0c5dde49
BB
2318 "entry, obj = %lld, offset = %lld, "
2319 "length = %d, num = %lld\n",
34dc7c2f 2320 (u_longlong_t)zp->z_id,
c12e3a59 2321 (u_longlong_t)offset,
0c5dde49
BB
2322 zap.za_integer_length,
2323 (u_longlong_t)zap.za_num_integers);
2e528b49 2324 error = SET_ERROR(ENXIO);
34dc7c2f
BB
2325 goto update;
2326 }
2327
2328 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
c12e3a59 2329 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
34dc7c2f 2330 }
0f37d0c8 2331
0ee12919 2332 done = !zpl_dir_emit(ctx, zap.za_name, strlen(zap.za_name),
c12e3a59 2333 objnum, type);
0f37d0c8 2334 if (done)
34dc7c2f 2335 break;
34dc7c2f
BB
2336
2337 /* Prefetch znode */
3558fd73 2338 if (prefetch) {
fcff0f35
PD
2339 dmu_prefetch(os, objnum, 0, 0, 0,
2340 ZIO_PRIORITY_SYNC_READ);
3558fd73 2341 }
34dc7c2f 2342
c12e3a59
RY
2343 /*
2344 * Move to the next entry, fill in the previous offset.
2345 */
2346 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
34dc7c2f 2347 zap_cursor_advance(&zc);
c12e3a59 2348 offset = zap_cursor_serialize(&zc);
34dc7c2f 2349 } else {
c12e3a59 2350 offset += 1;
34dc7c2f 2351 }
c12e3a59 2352 ctx->pos = offset;
34dc7c2f
BB
2353 }
2354 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2355
34dc7c2f
BB
2356update:
2357 zap_cursor_fini(&zc);
34dc7c2f
BB
2358 if (error == ENOENT)
2359 error = 0;
3558fd73 2360out:
0037b49e 2361 ZFS_EXIT(zfsvfs);
34dc7c2f 2362
34dc7c2f
BB
2363 return (error);
2364}
2365
d5446cfc
BB
2366ulong_t zfs_fsync_sync_cnt = 4;
2367
e5c39b95 2368int
3558fd73 2369zfs_fsync(struct inode *ip, int syncflag, cred_t *cr)
34dc7c2f 2370{
3558fd73 2371 znode_t *zp = ITOZ(ip);
0037b49e 2372 zfsvfs_t *zfsvfs = ITOZSB(ip);
34dc7c2f 2373
d5446cfc
BB
2374 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2375
0037b49e
BB
2376 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2377 ZFS_ENTER(zfsvfs);
428870ff 2378 ZFS_VERIFY_ZP(zp);
0037b49e
BB
2379 zil_commit(zfsvfs->z_log, zp->z_id);
2380 ZFS_EXIT(zfsvfs);
428870ff 2381 }
07012da6
CC
2382 tsd_set(zfs_fsyncer_key, NULL);
2383
34dc7c2f
BB
2384 return (0);
2385}
2386
2387
2388/*
2389 * Get the requested file attributes and place them in the provided
2390 * vattr structure.
2391 *
3558fd73 2392 * IN: ip - inode of file.
5484965a
BB
2393 * vap - va_mask identifies requested attributes.
2394 * If ATTR_XVATTR set, then optional attrs are requested
34dc7c2f
BB
2395 * flags - ATTR_NOACLCHECK (CIFS server context)
2396 * cr - credentials of caller.
34dc7c2f 2397 *
5484965a
BB
2398 * OUT: vap - attribute values.
2399 *
2400 * RETURN: 0 (always succeeds)
34dc7c2f
BB
2401 */
2402/* ARGSUSED */
e5c39b95 2403int
5484965a 2404zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
34dc7c2f 2405{
3558fd73 2406 znode_t *zp = ITOZ(ip);
0037b49e 2407 zfsvfs_t *zfsvfs = ITOZSB(ip);
34dc7c2f
BB
2408 int error = 0;
2409 uint64_t links;
0df9673f 2410 uint64_t atime[2], mtime[2], ctime[2];
5484965a
BB
2411 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2412 xoptattr_t *xoap = NULL;
34dc7c2f 2413 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
0df9673f 2414 sa_bulk_attr_t bulk[3];
428870ff 2415 int count = 0;
34dc7c2f 2416
0037b49e 2417 ZFS_ENTER(zfsvfs);
34dc7c2f 2418 ZFS_VERIFY_ZP(zp);
428870ff 2419
5484965a 2420 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
572e2857 2421
0037b49e
BB
2422 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
2423 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2424 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
428870ff
BB
2425
2426 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
0037b49e 2427 ZFS_EXIT(zfsvfs);
428870ff
BB
2428 return (error);
2429 }
34dc7c2f 2430
34dc7c2f
BB
2431 /*
2432 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2433 * Also, if we are the owner don't bother, since owner should
2434 * always be allowed to read basic attributes of file.
2435 */
572e2857 2436 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
5484965a 2437 (vap->va_uid != crgetuid(cr))) {
149e873a
BB
2438 if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2439 skipaclchk, cr))) {
0037b49e 2440 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
2441 return (error);
2442 }
2443 }
2444
2445 /*
2446 * Return all attributes. It's cheaper to provide the answer
2447 * than to determine whether we were asked the question.
2448 */
2449
9babb374 2450 mutex_enter(&zp->z_lock);
5484965a
BB
2451 vap->va_type = vn_mode_to_vtype(zp->z_mode);
2452 vap->va_mode = zp->z_mode;
53cf50e0 2453 vap->va_fsid = ZTOI(zp)->i_sb->s_dev;
5484965a 2454 vap->va_nodeid = zp->z_id;
0037b49e 2455 if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
dfbc8630 2456 links = ZTOI(zp)->i_nlink + 1;
34dc7c2f 2457 else
dfbc8630 2458 links = ZTOI(zp)->i_nlink;
5484965a
BB
2459 vap->va_nlink = MIN(links, ZFS_LINK_MAX);
2460 vap->va_size = i_size_read(ip);
2461 vap->va_rdev = ip->i_rdev;
2462 vap->va_seq = ip->i_generation;
2463
2464 /*
2465 * Add in any requested optional attributes and the create time.
2466 * Also set the corresponding bits in the returned attribute bitmap.
2467 */
0037b49e 2468 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
5484965a
BB
2469 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2470 xoap->xoa_archive =
2471 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2472 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2473 }
2474
2475 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2476 xoap->xoa_readonly =
2477 ((zp->z_pflags & ZFS_READONLY) != 0);
2478 XVA_SET_RTN(xvap, XAT_READONLY);
2479 }
2480
2481 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2482 xoap->xoa_system =
2483 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2484 XVA_SET_RTN(xvap, XAT_SYSTEM);
2485 }
2486
2487 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2488 xoap->xoa_hidden =
2489 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2490 XVA_SET_RTN(xvap, XAT_HIDDEN);
2491 }
2492
2493 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2494 xoap->xoa_nounlink =
2495 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2496 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2497 }
2498
2499 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2500 xoap->xoa_immutable =
2501 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2502 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2503 }
2504
2505 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2506 xoap->xoa_appendonly =
2507 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2508 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2509 }
2510
2511 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2512 xoap->xoa_nodump =
2513 ((zp->z_pflags & ZFS_NODUMP) != 0);
2514 XVA_SET_RTN(xvap, XAT_NODUMP);
2515 }
2516
2517 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2518 xoap->xoa_opaque =
2519 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2520 XVA_SET_RTN(xvap, XAT_OPAQUE);
2521 }
2522
2523 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2524 xoap->xoa_av_quarantined =
2525 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2526 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2527 }
2528
2529 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2530 xoap->xoa_av_modified =
2531 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2532 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2533 }
2534
2535 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2536 S_ISREG(ip->i_mode)) {
2537 zfs_sa_get_scanstamp(zp, xvap);
2538 }
34dc7c2f 2539
5484965a
BB
2540 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2541 uint64_t times[2];
2542
0037b49e 2543 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
5484965a
BB
2544 times, sizeof (times));
2545 ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2546 XVA_SET_RTN(xvap, XAT_CREATETIME);
2547 }
2548
2549 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2550 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2551 XVA_SET_RTN(xvap, XAT_REPARSE);
2552 }
2553 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
278f2236 2554 xoap->xoa_generation = ip->i_generation;
5484965a
BB
2555 XVA_SET_RTN(xvap, XAT_GEN);
2556 }
2557
2558 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2559 xoap->xoa_offline =
2560 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2561 XVA_SET_RTN(xvap, XAT_OFFLINE);
2562 }
2563
2564 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2565 xoap->xoa_sparse =
2566 ((zp->z_pflags & ZFS_SPARSE) != 0);
2567 XVA_SET_RTN(xvap, XAT_SPARSE);
2568 }
2569 }
2570
0df9673f 2571 ZFS_TIME_DECODE(&vap->va_atime, atime);
5484965a
BB
2572 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2573 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
34dc7c2f
BB
2574
2575 mutex_exit(&zp->z_lock);
2576
5484965a 2577 sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
34dc7c2f
BB
2578
2579 if (zp->z_blksz == 0) {
2580 /*
2581 * Block size hasn't been set; suggest maximal I/O transfers.
2582 */
0037b49e 2583 vap->va_blksize = zfsvfs->z_max_blksz;
34dc7c2f
BB
2584 }
2585
0037b49e 2586 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
2587 return (0);
2588}
2589
057e8eee
BB
2590/*
2591 * Get the basic file attributes and place them in the provided kstat
2592 * structure. The inode is assumed to be the authoritative source
2593 * for most of the attributes. However, the znode currently has the
2594 * authoritative atime, blksize, and block count.
2595 *
2596 * IN: ip - inode of file.
2597 *
2598 * OUT: sp - kstat values.
2599 *
2600 * RETURN: 0 (always succeeds)
2601 */
2602/* ARGSUSED */
2603int
2604zfs_getattr_fast(struct inode *ip, struct kstat *sp)
2605{
2606 znode_t *zp = ITOZ(ip);
0037b49e 2607 zfsvfs_t *zfsvfs = ITOZSB(ip);
b585bc4a
BB
2608 uint32_t blksize;
2609 u_longlong_t nblocks;
057e8eee 2610
0037b49e 2611 ZFS_ENTER(zfsvfs);
a7b125e9
GB
2612 ZFS_VERIFY_ZP(zp);
2613
057e8eee
BB
2614 mutex_enter(&zp->z_lock);
2615
2616 generic_fillattr(ip, sp);
057e8eee 2617
b585bc4a
BB
2618 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
2619 sp->blksize = blksize;
2620 sp->blocks = nblocks;
2621
057e8eee
BB
2622 if (unlikely(zp->z_blksz == 0)) {
2623 /*
2624 * Block size hasn't been set; suggest maximal I/O transfers.
2625 */
0037b49e 2626 sp->blksize = zfsvfs->z_max_blksz;
057e8eee
BB
2627 }
2628
2629 mutex_exit(&zp->z_lock);
2630
aa9b2708
AV
2631 /*
2632 * Required to prevent NFS client from detecting different inode
2633 * numbers of snapshot root dentry before and after snapshot mount.
2634 */
0037b49e 2635 if (zfsvfs->z_issnap) {
aa9b2708
AV
2636 if (ip->i_sb->s_root->d_inode == ip)
2637 sp->ino = ZFSCTL_INO_SNAPDIRS -
0037b49e 2638 dmu_objset_id(zfsvfs->z_os);
aa9b2708
AV
2639 }
2640
0037b49e 2641 ZFS_EXIT(zfsvfs);
a7b125e9 2642
057e8eee
BB
2643 return (0);
2644}
057e8eee 2645
34dc7c2f
BB
2646/*
2647 * Set the file attributes to the values contained in the
2648 * vattr structure.
2649 *
3558fd73 2650 * IN: ip - inode of file to be modified.
34dc7c2f 2651 * vap - new attribute values.
5484965a 2652 * If ATTR_XVATTR set, then optional attrs are being set
34dc7c2f
BB
2653 * flags - ATTR_UTIME set if non-default time values provided.
2654 * - ATTR_NOACLCHECK (CIFS context only).
2655 * cr - credentials of caller.
34dc7c2f
BB
2656 *
2657 * RETURN: 0 if success
2658 * error code if failure
2659 *
2660 * Timestamps:
3558fd73 2661 * ip - ctime updated, mtime updated if size changed.
34dc7c2f
BB
2662 */
2663/* ARGSUSED */
e5c39b95 2664int
5484965a 2665zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
34dc7c2f 2666{
3558fd73 2667 znode_t *zp = ITOZ(ip);
0037b49e 2668 zfsvfs_t *zfsvfs = ITOZSB(ip);
34dc7c2f
BB
2669 zilog_t *zilog;
2670 dmu_tx_t *tx;
2671 vattr_t oldva;
f4ea75d4 2672 xvattr_t *tmpxvattr;
5484965a 2673 uint_t mask = vap->va_mask;
a117a6d6 2674 uint_t saved_mask = 0;
34dc7c2f
BB
2675 int trim_mask = 0;
2676 uint64_t new_mode;
64aefee1 2677 uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid;
572e2857 2678 uint64_t xattr_obj;
0df9673f 2679 uint64_t mtime[2], ctime[2], atime[2];
34dc7c2f
BB
2680 znode_t *attrzp;
2681 int need_policy = FALSE;
428870ff 2682 int err, err2;
34dc7c2f 2683 zfs_fuid_info_t *fuidp = NULL;
5484965a
BB
2684 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2685 xoptattr_t *xoap;
2686 zfs_acl_t *aclp;
34dc7c2f 2687 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
428870ff 2688 boolean_t fuid_dirtied = B_FALSE;
17c37660 2689 sa_bulk_attr_t *bulk, *xattr_bulk;
428870ff 2690 int count = 0, xattr_count = 0;
34dc7c2f
BB
2691
2692 if (mask == 0)
2693 return (0);
2694
0037b49e 2695 ZFS_ENTER(zfsvfs);
34dc7c2f
BB
2696 ZFS_VERIFY_ZP(zp);
2697
0037b49e 2698 zilog = zfsvfs->z_log;
34dc7c2f
BB
2699
2700 /*
2701 * Make sure that if we have ephemeral uid/gid or xvattr specified
2702 * that file system is at proper version level
2703 */
5484965a 2704
0037b49e 2705 if (zfsvfs->z_use_fuids == B_FALSE &&
5484965a
BB
2706 (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2707 ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2708 (mask & ATTR_XVATTR))) {
0037b49e 2709 ZFS_EXIT(zfsvfs);
2e528b49 2710 return (SET_ERROR(EINVAL));
34dc7c2f
BB
2711 }
2712
3558fd73 2713 if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
0037b49e 2714 ZFS_EXIT(zfsvfs);
2e528b49 2715 return (SET_ERROR(EISDIR));
34dc7c2f
BB
2716 }
2717
3558fd73 2718 if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
0037b49e 2719 ZFS_EXIT(zfsvfs);
2e528b49 2720 return (SET_ERROR(EINVAL));
34dc7c2f
BB
2721 }
2722
5484965a
BB
2723 /*
2724 * If this is an xvattr_t, then get a pointer to the structure of
2725 * optional attributes. If this is NULL, then we have a vattr_t.
2726 */
2727 xoap = xva_getxoptattr(xvap);
2728
d1d7e268 2729 tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
f4ea75d4 2730 xva_init(tmpxvattr);
5484965a 2731
d1d7e268
MK
2732 bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP);
2733 xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP);
17c37660 2734
5484965a
BB
2735 /*
2736 * Immutable files can only alter immutable bit and atime
2737 */
2738 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2739 ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
2740 ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
f4ea75d4
BB
2741 err = EPERM;
2742 goto out3;
5484965a
BB
2743 }
2744
3558fd73 2745 if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
f4ea75d4
BB
2746 err = EPERM;
2747 goto out3;
34dc7c2f
BB
2748 }
2749
5484965a
BB
2750 /*
2751 * Verify timestamps doesn't overflow 32 bits.
2752 * ZFS can handle large timestamps, but 32bit syscalls can't
2753 * handle times greater than 2039. This check should be removed
2754 * once large timestamps are fully supported.
2755 */
2756 if (mask & (ATTR_ATIME | ATTR_MTIME)) {
d1d7e268
MK
2757 if (((mask & ATTR_ATIME) &&
2758 TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2759 ((mask & ATTR_MTIME) &&
2760 TIMESPEC_OVERFLOW(&vap->va_mtime))) {
f4ea75d4
BB
2761 err = EOVERFLOW;
2762 goto out3;
5484965a
BB
2763 }
2764 }
2765
34dc7c2f
BB
2766top:
2767 attrzp = NULL;
572e2857 2768 aclp = NULL;
34dc7c2f 2769
45d1cae3 2770 /* Can this be moved to before the top label? */
0037b49e 2771 if (zfs_is_readonly(zfsvfs)) {
f4ea75d4
BB
2772 err = EROFS;
2773 goto out3;
34dc7c2f
BB
2774 }
2775
2776 /*
2777 * First validate permissions
2778 */
2779
3558fd73 2780 if (mask & ATTR_SIZE) {
34dc7c2f 2781 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
f4ea75d4
BB
2782 if (err)
2783 goto out3;
2784
34dc7c2f
BB
2785 /*
2786 * XXX - Note, we are not providing any open
2787 * mode flags here (like FNDELAY), so we may
2788 * block if there are locks present... this
2789 * should be addressed in openat().
2790 */
b128c09f 2791 /* XXX - would it be OK to generate a log record here? */
5484965a 2792 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
f4ea75d4
BB
2793 if (err)
2794 goto out3;
428870ff 2795 }
34dc7c2f 2796
5484965a
BB
2797 if (mask & (ATTR_ATIME|ATTR_MTIME) ||
2798 ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2799 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2800 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2801 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2802 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2803 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2804 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2805 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2806 skipaclchk, cr);
2807 }
2808
3558fd73
BB
2809 if (mask & (ATTR_UID|ATTR_GID)) {
2810 int idmask = (mask & (ATTR_UID|ATTR_GID));
34dc7c2f
BB
2811 int take_owner;
2812 int take_group;
2813
2814 /*
2815 * NOTE: even if a new mode is being set,
2816 * we may clear S_ISUID/S_ISGID bits.
2817 */
2818
3558fd73 2819 if (!(mask & ATTR_MODE))
5484965a 2820 vap->va_mode = zp->z_mode;
34dc7c2f
BB
2821
2822 /*
2823 * Take ownership or chgrp to group we are a member of
2824 */
2825
5484965a 2826 take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
3558fd73 2827 take_group = (mask & ATTR_GID) &&
0037b49e 2828 zfs_groupmember(zfsvfs, vap->va_gid, cr);
34dc7c2f
BB
2829
2830 /*
5484965a 2831 * If both ATTR_UID and ATTR_GID are set then take_owner and
34dc7c2f
BB
2832 * take_group must both be set in order to allow taking
2833 * ownership.
2834 *
2835 * Otherwise, send the check through secpolicy_vnode_setattr()
2836 *
2837 */
2838
3558fd73
BB
2839 if (((idmask == (ATTR_UID|ATTR_GID)) &&
2840 take_owner && take_group) ||
2841 ((idmask == ATTR_UID) && take_owner) ||
2842 ((idmask == ATTR_GID) && take_group)) {
34dc7c2f
BB
2843 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2844 skipaclchk, cr) == 0) {
2845 /*
2846 * Remove setuid/setgid for non-privileged users
2847 */
5484965a 2848 (void) secpolicy_setid_clear(vap, cr);
3558fd73 2849 trim_mask = (mask & (ATTR_UID|ATTR_GID));
34dc7c2f
BB
2850 } else {
2851 need_policy = TRUE;
2852 }
2853 } else {
2854 need_policy = TRUE;
2855 }
2856 }
2857
2858 mutex_enter(&zp->z_lock);
428870ff 2859 oldva.va_mode = zp->z_mode;
572e2857 2860 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
5484965a
BB
2861 if (mask & ATTR_XVATTR) {
2862 /*
2863 * Update xvattr mask to include only those attributes
2864 * that are actually changing.
2865 *
2866 * the bits will be restored prior to actually setting
2867 * the attributes so the caller thinks they were set.
2868 */
2869 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2870 if (xoap->xoa_appendonly !=
2871 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2872 need_policy = TRUE;
2873 } else {
2874 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
f4ea75d4 2875 XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
5484965a
BB
2876 }
2877 }
2878
2879 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2880 if (xoap->xoa_nounlink !=
2881 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2882 need_policy = TRUE;
2883 } else {
2884 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
f4ea75d4 2885 XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
5484965a
BB
2886 }
2887 }
2888
2889 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2890 if (xoap->xoa_immutable !=
2891 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2892 need_policy = TRUE;
2893 } else {
2894 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
f4ea75d4 2895 XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
5484965a
BB
2896 }
2897 }
2898
2899 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2900 if (xoap->xoa_nodump !=
2901 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2902 need_policy = TRUE;
2903 } else {
2904 XVA_CLR_REQ(xvap, XAT_NODUMP);
f4ea75d4 2905 XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
5484965a
BB
2906 }
2907 }
2908
2909 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2910 if (xoap->xoa_av_modified !=
2911 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2912 need_policy = TRUE;
2913 } else {
2914 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
f4ea75d4 2915 XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
5484965a
BB
2916 }
2917 }
2918
2919 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2920 if ((!S_ISREG(ip->i_mode) &&
2921 xoap->xoa_av_quarantined) ||
2922 xoap->xoa_av_quarantined !=
2923 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2924 need_policy = TRUE;
2925 } else {
2926 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
f4ea75d4 2927 XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
5484965a
BB
2928 }
2929 }
2930
2931 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2932 mutex_exit(&zp->z_lock);
f4ea75d4
BB
2933 err = EPERM;
2934 goto out3;
5484965a
BB
2935 }
2936
2937 if (need_policy == FALSE &&
2938 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2939 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2940 need_policy = TRUE;
2941 }
2942 }
34dc7c2f
BB
2943
2944 mutex_exit(&zp->z_lock);
2945
3558fd73 2946 if (mask & ATTR_MODE) {
34dc7c2f 2947 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
5484965a 2948 err = secpolicy_setid_setsticky_clear(ip, vap,
34dc7c2f 2949 &oldva, cr);
f4ea75d4
BB
2950 if (err)
2951 goto out3;
2952
3558fd73 2953 trim_mask |= ATTR_MODE;
34dc7c2f
BB
2954 } else {
2955 need_policy = TRUE;
2956 }
2957 }
2958
2959 if (need_policy) {
2960 /*
2961 * If trim_mask is set then take ownership
2962 * has been granted or write_acl is present and user
2963 * has the ability to modify mode. In that case remove
2964 * UID|GID and or MODE from mask so that
2965 * secpolicy_vnode_setattr() doesn't revoke it.
2966 */
2967
2968 if (trim_mask) {
5484965a
BB
2969 saved_mask = vap->va_mask;
2970 vap->va_mask &= ~trim_mask;
34dc7c2f 2971 }
5484965a 2972 err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
34dc7c2f 2973 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
f4ea75d4
BB
2974 if (err)
2975 goto out3;
34dc7c2f
BB
2976
2977 if (trim_mask)
5484965a 2978 vap->va_mask |= saved_mask;
34dc7c2f
BB
2979 }
2980
2981 /*
2982 * secpolicy_vnode_setattr, or take ownership may have
2983 * changed va_mask
2984 */
5484965a 2985 mask = vap->va_mask;
34dc7c2f 2986
3558fd73 2987 if ((mask & (ATTR_UID | ATTR_GID))) {
0037b49e 2988 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
572e2857 2989 &xattr_obj, sizeof (xattr_obj));
428870ff 2990
572e2857 2991 if (err == 0 && xattr_obj) {
3558fd73 2992 err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
428870ff
BB
2993 if (err)
2994 goto out2;
2995 }
3558fd73 2996 if (mask & ATTR_UID) {
0037b49e 2997 new_kuid = zfs_fuid_create(zfsvfs,
5484965a 2998 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
64aefee1 2999 if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
0037b49e 3000 zfs_fuid_overquota(zfsvfs, B_FALSE, new_kuid)) {
572e2857 3001 if (attrzp)
3558fd73 3002 iput(ZTOI(attrzp));
428870ff
BB
3003 err = EDQUOT;
3004 goto out2;
3005 }
3006 }
3007
3558fd73 3008 if (mask & ATTR_GID) {
0037b49e
BB
3009 new_kgid = zfs_fuid_create(zfsvfs,
3010 (uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
64aefee1 3011 if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
0037b49e 3012 zfs_fuid_overquota(zfsvfs, B_TRUE, new_kgid)) {
572e2857 3013 if (attrzp)
3558fd73 3014 iput(ZTOI(attrzp));
428870ff
BB
3015 err = EDQUOT;
3016 goto out2;
3017 }
3018 }
3019 }
0037b49e 3020 tx = dmu_tx_create(zfsvfs->z_os);
34dc7c2f 3021
3558fd73 3022 if (mask & ATTR_MODE) {
428870ff 3023 uint64_t pmode = zp->z_mode;
572e2857 3024 uint64_t acl_obj;
5484965a 3025 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
34dc7c2f 3026
572e2857 3027 zfs_acl_chmod_setattr(zp, &aclp, new_mode);
428870ff 3028
572e2857
BB
3029 mutex_enter(&zp->z_lock);
3030 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
428870ff
BB
3031 /*
3032 * Are we upgrading ACL from old V0 format
3033 * to V1 format?
3034 */
0037b49e 3035 if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
572e2857 3036 zfs_znode_acl_version(zp) ==
34dc7c2f 3037 ZFS_ACL_VERSION_INITIAL) {
572e2857 3038 dmu_tx_hold_free(tx, acl_obj, 0,
34dc7c2f
BB
3039 DMU_OBJECT_END);
3040 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3041 0, aclp->z_acl_bytes);
3042 } else {
572e2857 3043 dmu_tx_hold_write(tx, acl_obj, 0,
34dc7c2f
BB
3044 aclp->z_acl_bytes);
3045 }
428870ff 3046 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
34dc7c2f
BB
3047 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3048 0, aclp->z_acl_bytes);
3049 }
572e2857 3050 mutex_exit(&zp->z_lock);
428870ff
BB
3051 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3052 } else {
5484965a
BB
3053 if ((mask & ATTR_XVATTR) &&
3054 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3055 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3056 else
3057 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
34dc7c2f
BB
3058 }
3059
428870ff
BB
3060 if (attrzp) {
3061 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
34dc7c2f
BB
3062 }
3063
0037b49e 3064 fuid_dirtied = zfsvfs->z_fuid_dirty;
428870ff 3065 if (fuid_dirtied)
0037b49e 3066 zfs_fuid_txhold(zfsvfs, tx);
428870ff
BB
3067
3068 zfs_sa_upgrade_txholds(tx, zp);
3069
384f8a09
MA
3070 err = dmu_tx_assign(tx, TXG_WAIT);
3071 if (err)
9babb374 3072 goto out;
34dc7c2f 3073
428870ff 3074 count = 0;
34dc7c2f
BB
3075 /*
3076 * Set each attribute requested.
3077 * We group settings according to the locks they need to acquire.
3078 *
3079 * Note: you cannot set ctime directly, although it will be
3080 * updated as a side-effect of calling this function.
3081 */
3082
572e2857 3083
3558fd73 3084 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
572e2857 3085 mutex_enter(&zp->z_acl_lock);
34dc7c2f
BB
3086 mutex_enter(&zp->z_lock);
3087
0037b49e 3088 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
428870ff
BB
3089 &zp->z_pflags, sizeof (zp->z_pflags));
3090
3091 if (attrzp) {
3558fd73 3092 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
572e2857 3093 mutex_enter(&attrzp->z_acl_lock);
428870ff
BB
3094 mutex_enter(&attrzp->z_lock);
3095 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
0037b49e 3096 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
428870ff
BB
3097 sizeof (attrzp->z_pflags));
3098 }
3099
3558fd73 3100 if (mask & (ATTR_UID|ATTR_GID)) {
428870ff 3101
3558fd73 3102 if (mask & ATTR_UID) {
64aefee1
NB
3103 ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid);
3104 new_uid = zfs_uid_read(ZTOI(zp));
0037b49e 3105 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
428870ff 3106 &new_uid, sizeof (new_uid));
428870ff
BB
3107 if (attrzp) {
3108 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
0037b49e 3109 SA_ZPL_UID(zfsvfs), NULL, &new_uid,
428870ff 3110 sizeof (new_uid));
2c6abf15 3111 ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid);
428870ff
BB
3112 }
3113 }
3114
3558fd73 3115 if (mask & ATTR_GID) {
64aefee1
NB
3116 ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid);
3117 new_gid = zfs_gid_read(ZTOI(zp));
0037b49e 3118 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
428870ff 3119 NULL, &new_gid, sizeof (new_gid));
428870ff
BB
3120 if (attrzp) {
3121 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
0037b49e 3122 SA_ZPL_GID(zfsvfs), NULL, &new_gid,
428870ff 3123 sizeof (new_gid));
64aefee1 3124 ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid);
428870ff
BB
3125 }
3126 }
3558fd73 3127 if (!(mask & ATTR_MODE)) {
0037b49e 3128 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
428870ff
BB
3129 NULL, &new_mode, sizeof (new_mode));
3130 new_mode = zp->z_mode;
3131 }
3132 err = zfs_acl_chown_setattr(zp);
3133 ASSERT(err == 0);
3134 if (attrzp) {
3135 err = zfs_acl_chown_setattr(attrzp);
3136 ASSERT(err == 0);
3137 }
3138 }
3139
3558fd73 3140 if (mask & ATTR_MODE) {
0037b49e 3141 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
428870ff 3142 &new_mode, sizeof (new_mode));
12fa7f34 3143 zp->z_mode = ZTOI(zp)->i_mode = new_mode;
99c564bc 3144 ASSERT3P(aclp, !=, NULL);
9babb374 3145 err = zfs_aclset_common(zp, aclp, cr, tx);
c99c9001 3146 ASSERT0(err);
572e2857
BB
3147 if (zp->z_acl_cached)
3148 zfs_acl_free(zp->z_acl_cached);
45d1cae3
BB
3149 zp->z_acl_cached = aclp;
3150 aclp = NULL;
34dc7c2f
BB
3151 }
3152
704cd075
CC
3153 if ((mask & ATTR_ATIME) || zp->z_atime_dirty) {
3154 zp->z_atime_dirty = 0;
3155 ZFS_TIME_ENCODE(&ip->i_atime, atime);
0037b49e 3156 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
0df9673f 3157 &atime, sizeof (atime));
34dc7c2f
BB
3158 }
3159
fedc1d96 3160 if (mask & (ATTR_MTIME | ATTR_SIZE)) {
5484965a 3161 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
f79c0de2 3162 ZTOI(zp)->i_mtime = zpl_inode_timespec_trunc(vap->va_mtime,
87f9371a
NB
3163 ZTOI(zp)->i_sb->s_time_gran);
3164
0037b49e 3165 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
428870ff 3166 mtime, sizeof (mtime));
34dc7c2f
BB
3167 }
3168
fedc1d96 3169 if (mask & (ATTR_CTIME | ATTR_SIZE)) {
87f9371a 3170 ZFS_TIME_ENCODE(&vap->va_ctime, ctime);
f79c0de2 3171 ZTOI(zp)->i_ctime = zpl_inode_timespec_trunc(vap->va_ctime,
87f9371a 3172 ZTOI(zp)->i_sb->s_time_gran);
0037b49e 3173 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
87f9371a 3174 ctime, sizeof (ctime));
428870ff 3175 }
87f9371a
NB
3176
3177 if (attrzp && mask) {
3178 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
0037b49e 3179 SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
87f9371a
NB
3180 sizeof (ctime));
3181 }
3182
34dc7c2f
BB
3183 /*
3184 * Do this after setting timestamps to prevent timestamp
3185 * update from toggling bit
3186 */
3187
5484965a
BB
3188 if (xoap && (mask & ATTR_XVATTR)) {
3189
3190 /*
3191 * restore trimmed off masks
3192 * so that return masks can be set for caller.
3193 */
3194
f4ea75d4 3195 if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
5484965a
BB
3196 XVA_SET_REQ(xvap, XAT_APPENDONLY);
3197 }
f4ea75d4 3198 if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
5484965a
BB
3199 XVA_SET_REQ(xvap, XAT_NOUNLINK);
3200 }
f4ea75d4 3201 if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
5484965a
BB
3202 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3203 }
f4ea75d4 3204 if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
5484965a
BB
3205 XVA_SET_REQ(xvap, XAT_NODUMP);
3206 }
f4ea75d4 3207 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
5484965a
BB
3208 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3209 }
f4ea75d4 3210 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
5484965a
BB
3211 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3212 }
3213
3214 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3215 ASSERT(S_ISREG(ip->i_mode));
3216
3217 zfs_xvattr_set(zp, xvap, tx);
3218 }
3219
9babb374 3220 if (fuid_dirtied)
0037b49e 3221 zfs_fuid_sync(zfsvfs, tx);
9babb374 3222
34dc7c2f 3223 if (mask != 0)
5484965a 3224 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
34dc7c2f 3225
34dc7c2f 3226 mutex_exit(&zp->z_lock);
3558fd73 3227 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
572e2857 3228 mutex_exit(&zp->z_acl_lock);
34dc7c2f 3229
572e2857 3230 if (attrzp) {
3558fd73 3231 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
572e2857
BB
3232 mutex_exit(&attrzp->z_acl_lock);
3233 mutex_exit(&attrzp->z_lock);
3234 }
9babb374 3235out:
428870ff
BB
3236 if (err == 0 && attrzp) {
3237 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3238 xattr_count, tx);
3239 ASSERT(err2 == 0);
3240 }
3241
45d1cae3 3242 if (aclp)
9babb374 3243 zfs_acl_free(aclp);
9babb374
BB
3244
3245 if (fuidp) {
3246 zfs_fuid_info_free(fuidp);
3247 fuidp = NULL;
3248 }
3249
428870ff 3250 if (err) {
9babb374 3251 dmu_tx_abort(tx);
ea7e86d8
BB
3252 if (attrzp)
3253 iput(ZTOI(attrzp));
428870ff
BB
3254 if (err == ERESTART)
3255 goto top;
3256 } else {
3257 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
9babb374 3258 dmu_tx_commit(tx);
ea7e86d8
BB
3259 if (attrzp)
3260 iput(ZTOI(attrzp));
037849f8 3261 zfs_inode_update(zp);
428870ff
BB
3262 }
3263
428870ff 3264out2:
0037b49e 3265 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
572e2857 3266 zil_commit(zilog, 0);
34dc7c2f 3267
f4ea75d4 3268out3:
d1d7e268
MK
3269 kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * 7);
3270 kmem_free(bulk, sizeof (sa_bulk_attr_t) * 7);
3271 kmem_free(tmpxvattr, sizeof (xvattr_t));
0037b49e 3272 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
3273 return (err);
3274}
3275
3276typedef struct zfs_zlock {
3277 krwlock_t *zl_rwlock; /* lock we acquired */
3278 znode_t *zl_znode; /* znode we held */
3279 struct zfs_zlock *zl_next; /* next in list */
3280} zfs_zlock_t;
3281
3282/*
3283 * Drop locks and release vnodes that were held by zfs_rename_lock().
3284 */
3285static void
3286zfs_rename_unlock(zfs_zlock_t **zlpp)
3287{
3288 zfs_zlock_t *zl;
3289
3290 while ((zl = *zlpp) != NULL) {
3291 if (zl->zl_znode != NULL)
ea7e86d8 3292 zfs_iput_async(ZTOI(zl->zl_znode));
34dc7c2f
BB
3293 rw_exit(zl->zl_rwlock);
3294 *zlpp = zl->zl_next;
3295 kmem_free(zl, sizeof (*zl));
3296 }
3297}
3298
3299/*
3300 * Search back through the directory tree, using the ".." entries.
3301 * Lock each directory in the chain to prevent concurrent renames.
3302 * Fail any attempt to move a directory into one of its own descendants.
3303 * XXX - z_parent_lock can overlap with map or grow locks
3304 */
3305static int
3306zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3307{
3308 zfs_zlock_t *zl;
3309 znode_t *zp = tdzp;
3558fd73 3310 uint64_t rootid = ZTOZSB(zp)->z_root;
428870ff 3311 uint64_t oidp = zp->z_id;
34dc7c2f
BB
3312 krwlock_t *rwlp = &szp->z_parent_lock;
3313 krw_t rw = RW_WRITER;
3314
3315 /*
3316 * First pass write-locks szp and compares to zp->z_id.
3317 * Later passes read-lock zp and compare to zp->z_parent.
3318 */
3319 do {
3320 if (!rw_tryenter(rwlp, rw)) {
3321 /*
3322 * Another thread is renaming in this path.
3323 * Note that if we are a WRITER, we don't have any
3324 * parent_locks held yet.
3325 */
3326 if (rw == RW_READER && zp->z_id > szp->z_id) {
3327 /*
3328 * Drop our locks and restart
3329 */
3330 zfs_rename_unlock(&zl);
3331 *zlpp = NULL;
3332 zp = tdzp;
428870ff 3333 oidp = zp->z_id;
34dc7c2f
BB
3334 rwlp = &szp->z_parent_lock;
3335 rw = RW_WRITER;
3336 continue;
3337 } else {
3338 /*
3339 * Wait for other thread to drop its locks
3340 */
3341 rw_enter(rwlp, rw);
3342 }
3343 }
3344
3345 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3346 zl->zl_rwlock = rwlp;
3347 zl->zl_znode = NULL;
3348 zl->zl_next = *zlpp;
3349 *zlpp = zl;
3350
428870ff 3351 if (oidp == szp->z_id) /* We're a descendant of szp */
2e528b49 3352 return (SET_ERROR(EINVAL));
34dc7c2f 3353
428870ff 3354 if (oidp == rootid) /* We've hit the top */
34dc7c2f
BB
3355 return (0);
3356
3357 if (rw == RW_READER) { /* i.e. not the first pass */
3558fd73 3358 int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
34dc7c2f
BB
3359 if (error)
3360 return (error);
3361 zl->zl_znode = zp;
3362 }
3558fd73 3363 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
428870ff 3364 &oidp, sizeof (oidp));
34dc7c2f
BB
3365 rwlp = &zp->z_parent_lock;
3366 rw = RW_READER;
3367
3368 } while (zp->z_id != sdzp->z_id);
3369
3370 return (0);
3371}
3372
3373/*
3374 * Move an entry from the provided source directory to the target
3375 * directory. Change the entry name as indicated.
3376 *
3558fd73 3377 * IN: sdip - Source directory containing the "old entry".
34dc7c2f 3378 * snm - Old entry name.
3558fd73 3379 * tdip - Target directory to contain the "new entry".
34dc7c2f
BB
3380 * tnm - New entry name.
3381 * cr - credentials of caller.
34dc7c2f
BB
3382 * flags - case flags
3383 *
d3cc8b15 3384 * RETURN: 0 on success, error code on failure.
34dc7c2f
BB
3385 *
3386 * Timestamps:
3558fd73 3387 * sdip,tdip - ctime|mtime updated
34dc7c2f
BB
3388 */
3389/*ARGSUSED*/
e5c39b95 3390int
3558fd73
BB
3391zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
3392 cred_t *cr, int flags)
34dc7c2f
BB
3393{
3394 znode_t *tdzp, *szp, *tzp;
3558fd73 3395 znode_t *sdzp = ITOZ(sdip);
0037b49e 3396 zfsvfs_t *zfsvfs = ITOZSB(sdip);
34dc7c2f 3397 zilog_t *zilog;
34dc7c2f
BB
3398 zfs_dirlock_t *sdl, *tdl;
3399 dmu_tx_t *tx;
3400 zfs_zlock_t *zl;
3401 int cmp, serr, terr;
3402 int error = 0;
3403 int zflg = 0;
e8b96c60 3404 boolean_t waited = B_FALSE;
34dc7c2f 3405
32dec7bd 3406 if (snm == NULL || tnm == NULL)
3407 return (SET_ERROR(EINVAL));
3408
0037b49e 3409 ZFS_ENTER(zfsvfs);
34dc7c2f 3410 ZFS_VERIFY_ZP(sdzp);
0037b49e 3411 zilog = zfsvfs->z_log;
34dc7c2f 3412
812e91a7
MT
3413 tdzp = ITOZ(tdip);
3414 ZFS_VERIFY_ZP(tdzp);
3415
3416 /*
3417 * We check i_sb because snapshots and the ctldir must have different
3418 * super blocks.
3419 */
c0ebc844 3420 if (tdip->i_sb != sdip->i_sb || zfsctl_is_node(tdip)) {
0037b49e 3421 ZFS_EXIT(zfsvfs);
2e528b49 3422 return (SET_ERROR(EXDEV));
34dc7c2f
BB
3423 }
3424
0037b49e 3425 if (zfsvfs->z_utf8 && u8_validate(tnm,
34dc7c2f 3426 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
0037b49e 3427 ZFS_EXIT(zfsvfs);
2e528b49 3428 return (SET_ERROR(EILSEQ));
34dc7c2f
BB
3429 }
3430
3431 if (flags & FIGNORECASE)
3432 zflg |= ZCILOOK;
3433
3434top:
3435 szp = NULL;
3436 tzp = NULL;
3437 zl = NULL;
3438
3439 /*
3440 * This is to prevent the creation of links into attribute space
3441 * by renaming a linked file into/outof an attribute directory.
3442 * See the comment in zfs_link() for why this is considered bad.
3443 */
428870ff 3444 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
0037b49e 3445 ZFS_EXIT(zfsvfs);
2e528b49 3446 return (SET_ERROR(EINVAL));
34dc7c2f
BB
3447 }
3448
3449 /*
3450 * Lock source and target directory entries. To prevent deadlock,
3451 * a lock ordering must be defined. We lock the directory with
3452 * the smallest object id first, or if it's a tie, the one with
3453 * the lexically first name.
3454 */
3455 if (sdzp->z_id < tdzp->z_id) {
3456 cmp = -1;
3457 } else if (sdzp->z_id > tdzp->z_id) {
3458 cmp = 1;
3459 } else {
3460 /*
3461 * First compare the two name arguments without
3462 * considering any case folding.
3463 */
0037b49e 3464 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
34dc7c2f
BB
3465
3466 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
0037b49e 3467 ASSERT(error == 0 || !zfsvfs->z_utf8);
34dc7c2f
BB
3468 if (cmp == 0) {
3469 /*
3470 * POSIX: "If the old argument and the new argument
3471 * both refer to links to the same existing file,
3472 * the rename() function shall return successfully
3473 * and perform no other action."
3474 */
0037b49e 3475 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
3476 return (0);
3477 }
3478 /*
3479 * If the file system is case-folding, then we may
3480 * have some more checking to do. A case-folding file
3481 * system is either supporting mixed case sensitivity
3482 * access or is completely case-insensitive. Note
3483 * that the file system is always case preserving.
3484 *
3485 * In mixed sensitivity mode case sensitive behavior
3486 * is the default. FIGNORECASE must be used to
3487 * explicitly request case insensitive behavior.
3488 *
3489 * If the source and target names provided differ only
3490 * by case (e.g., a request to rename 'tim' to 'Tim'),
3491 * we will treat this as a special case in the
3492 * case-insensitive mode: as long as the source name
3493 * is an exact match, we will allow this to proceed as
3494 * a name-change request.
3495 */
0037b49e
BB
3496 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
3497 (zfsvfs->z_case == ZFS_CASE_MIXED &&
34dc7c2f 3498 flags & FIGNORECASE)) &&
0037b49e 3499 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
34dc7c2f
BB
3500 &error) == 0) {
3501 /*
3502 * case preserving rename request, require exact
3503 * name matches
3504 */
3505 zflg |= ZCIEXACT;
3506 zflg &= ~ZCILOOK;
3507 }
3508 }
3509
428870ff
BB
3510 /*
3511 * If the source and destination directories are the same, we should
3512 * grab the z_name_lock of that directory only once.
3513 */
3514 if (sdzp == tdzp) {
3515 zflg |= ZHAVELOCK;
3516 rw_enter(&sdzp->z_name_lock, RW_READER);
3517 }
3518
34dc7c2f
BB
3519 if (cmp < 0) {
3520 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3521 ZEXISTS | zflg, NULL, NULL);
3522 terr = zfs_dirent_lock(&tdl,
3523 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3524 } else {
3525 terr = zfs_dirent_lock(&tdl,
3526 tdzp, tnm, &tzp, zflg, NULL, NULL);
3527 serr = zfs_dirent_lock(&sdl,
3528 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3529 NULL, NULL);
3530 }
3531
3532 if (serr) {
3533 /*
3534 * Source entry invalid or not there.
3535 */
3536 if (!terr) {
3537 zfs_dirent_unlock(tdl);
3538 if (tzp)
3558fd73 3539 iput(ZTOI(tzp));
34dc7c2f 3540 }
428870ff
BB
3541
3542 if (sdzp == tdzp)
3543 rw_exit(&sdzp->z_name_lock);
3544
34dc7c2f
BB
3545 if (strcmp(snm, "..") == 0)
3546 serr = EINVAL;
0037b49e 3547 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
3548 return (serr);
3549 }
3550 if (terr) {
3551 zfs_dirent_unlock(sdl);
3558fd73 3552 iput(ZTOI(szp));
428870ff
BB
3553
3554 if (sdzp == tdzp)
3555 rw_exit(&sdzp->z_name_lock);
3556
34dc7c2f
BB
3557 if (strcmp(tnm, "..") == 0)
3558 terr = EINVAL;
0037b49e 3559 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
3560 return (terr);
3561 }
3562
3563 /*
3564 * Must have write access at the source to remove the old entry
3565 * and write access at the target to create the new entry.
3566 * Note that if target and source are the same, this can be
3567 * done in a single check.
3568 */
3569
149e873a 3570 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
34dc7c2f
BB
3571 goto out;
3572
3558fd73 3573 if (S_ISDIR(ZTOI(szp)->i_mode)) {
34dc7c2f
BB
3574 /*
3575 * Check to make sure rename is valid.
3576 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3577 */
149e873a 3578 if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
34dc7c2f
BB
3579 goto out;
3580 }
3581
3582 /*
3583 * Does target exist?
3584 */
3585 if (tzp) {
3586 /*
3587 * Source and target must be the same type.
3588 */
3558fd73
BB
3589 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3590 if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
2e528b49 3591 error = SET_ERROR(ENOTDIR);
34dc7c2f
BB
3592 goto out;
3593 }
3594 } else {
3558fd73 3595 if (S_ISDIR(ZTOI(tzp)->i_mode)) {
2e528b49 3596 error = SET_ERROR(EISDIR);
34dc7c2f
BB
3597 goto out;
3598 }
3599 }
3600 /*
3601 * POSIX dictates that when the source and target
3602 * entries refer to the same file object, rename
3603 * must do nothing and exit without error.
3604 */
3605 if (szp->z_id == tzp->z_id) {
3606 error = 0;
3607 goto out;
3608 }
3609 }
3610
0037b49e 3611 tx = dmu_tx_create(zfsvfs->z_os);
428870ff
BB
3612 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3613 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
34dc7c2f
BB
3614 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3615 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
428870ff
BB
3616 if (sdzp != tdzp) {
3617 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3618 zfs_sa_upgrade_txholds(tx, tdzp);
3619 }
3620 if (tzp) {
3621 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3622 zfs_sa_upgrade_txholds(tx, tzp);
3623 }
3624
3625 zfs_sa_upgrade_txholds(tx, szp);
0037b49e 3626 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
ef7a7948 3627 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
34dc7c2f
BB
3628 if (error) {
3629 if (zl != NULL)
3630 zfs_rename_unlock(&zl);
3631 zfs_dirent_unlock(sdl);
3632 zfs_dirent_unlock(tdl);
428870ff
BB
3633
3634 if (sdzp == tdzp)
3635 rw_exit(&sdzp->z_name_lock);
3636
fb5f0bc8 3637 if (error == ERESTART) {
e8b96c60 3638 waited = B_TRUE;
34dc7c2f
BB
3639 dmu_tx_wait(tx);
3640 dmu_tx_abort(tx);
ea7e86d8
BB
3641 iput(ZTOI(szp));
3642 if (tzp)
3643 iput(ZTOI(tzp));
34dc7c2f
BB
3644 goto top;
3645 }
3646 dmu_tx_abort(tx);
ea7e86d8
BB
3647 iput(ZTOI(szp));
3648 if (tzp)
3649 iput(ZTOI(tzp));
0037b49e 3650 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
3651 return (error);
3652 }
3653
3654 if (tzp) /* Attempt to remove the existing target */
3655 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3656
3657 if (error == 0) {
3658 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3659 if (error == 0) {
428870ff 3660 szp->z_pflags |= ZFS_AV_MODIFIED;
34dc7c2f 3661
0037b49e 3662 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
428870ff 3663 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
c99c9001 3664 ASSERT0(error);
34dc7c2f 3665
428870ff
BB
3666 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3667 if (error == 0) {
3668 zfs_log_rename(zilog, tx, TX_RENAME |
572e2857
BB
3669 (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3670 sdl->dl_name, tdzp, tdl->dl_name, szp);
428870ff
BB
3671 } else {
3672 /*
3673 * At this point, we have successfully created
3674 * the target name, but have failed to remove
3675 * the source name. Since the create was done
3676 * with the ZRENAMING flag, there are
3677 * complications; for one, the link count is
3678 * wrong. The easiest way to deal with this
3679 * is to remove the newly created target, and
3680 * return the original error. This must
3681 * succeed; fortunately, it is very unlikely to
3682 * fail, since we just created it.
3683 */
3684 VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3685 ZRENAMING, NULL), ==, 0);
3686 }
34dc7c2f
BB
3687 }
3688 }
3689
3690 dmu_tx_commit(tx);
3691out:
3692 if (zl != NULL)
3693 zfs_rename_unlock(&zl);
3694
3695 zfs_dirent_unlock(sdl);
3696 zfs_dirent_unlock(tdl);
3697
960e08fe 3698 zfs_inode_update(sdzp);
428870ff
BB
3699 if (sdzp == tdzp)
3700 rw_exit(&sdzp->z_name_lock);
3701
960e08fe
BB
3702 if (sdzp != tdzp)
3703 zfs_inode_update(tdzp);
428870ff 3704
960e08fe 3705 zfs_inode_update(szp);
3558fd73 3706 iput(ZTOI(szp));
960e08fe
BB
3707 if (tzp) {
3708 zfs_inode_update(tzp);
3558fd73 3709 iput(ZTOI(tzp));
960e08fe 3710 }
34dc7c2f 3711
0037b49e 3712 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
572e2857 3713 zil_commit(zilog, 0);
428870ff 3714
0037b49e 3715 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
3716 return (error);
3717}
3718
3719/*
3720 * Insert the indicated symbolic reference entry into the directory.
3721 *
3558fd73 3722 * IN: dip - Directory to contain new symbolic link.
34dc7c2f
BB
3723 * link - Name for new symlink entry.
3724 * vap - Attributes of new entry.
3725 * target - Target path of new symlink.
3558fd73 3726 *
34dc7c2f 3727 * cr - credentials of caller.
34dc7c2f
BB
3728 * flags - case flags
3729 *
d3cc8b15 3730 * RETURN: 0 on success, error code on failure.
34dc7c2f
BB
3731 *
3732 * Timestamps:
3558fd73 3733 * dip - ctime|mtime updated
34dc7c2f
BB
3734 */
3735/*ARGSUSED*/
e5c39b95 3736int
3558fd73
BB
3737zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
3738 struct inode **ipp, cred_t *cr, int flags)
34dc7c2f 3739{
3558fd73 3740 znode_t *zp, *dzp = ITOZ(dip);
34dc7c2f
BB
3741 zfs_dirlock_t *dl;
3742 dmu_tx_t *tx;
0037b49e 3743 zfsvfs_t *zfsvfs = ITOZSB(dip);
34dc7c2f 3744 zilog_t *zilog;
428870ff 3745 uint64_t len = strlen(link);
34dc7c2f
BB
3746 int error;
3747 int zflg = ZNEW;
9babb374
BB
3748 zfs_acl_ids_t acl_ids;
3749 boolean_t fuid_dirtied;
428870ff 3750 uint64_t txtype = TX_SYMLINK;
e8b96c60 3751 boolean_t waited = B_FALSE;
34dc7c2f 3752
3558fd73 3753 ASSERT(S_ISLNK(vap->va_mode));
34dc7c2f 3754
32dec7bd 3755 if (name == NULL)
3756 return (SET_ERROR(EINVAL));
3757
0037b49e 3758 ZFS_ENTER(zfsvfs);
34dc7c2f 3759 ZFS_VERIFY_ZP(dzp);
0037b49e 3760 zilog = zfsvfs->z_log;
34dc7c2f 3761
0037b49e 3762 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
34dc7c2f 3763 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
0037b49e 3764 ZFS_EXIT(zfsvfs);
2e528b49 3765 return (SET_ERROR(EILSEQ));
34dc7c2f
BB
3766 }
3767 if (flags & FIGNORECASE)
3768 zflg |= ZCILOOK;
34dc7c2f
BB
3769
3770 if (len > MAXPATHLEN) {
0037b49e 3771 ZFS_EXIT(zfsvfs);
2e528b49 3772 return (SET_ERROR(ENAMETOOLONG));
34dc7c2f
BB
3773 }
3774
428870ff
BB
3775 if ((error = zfs_acl_ids_create(dzp, 0,
3776 vap, cr, NULL, &acl_ids)) != 0) {
0037b49e 3777 ZFS_EXIT(zfsvfs);
428870ff
BB
3778 return (error);
3779 }
3780top:
3558fd73
BB
3781 *ipp = NULL;
3782
34dc7c2f
BB
3783 /*
3784 * Attempt to lock directory; fail if entry already exists.
3785 */
3786 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3787 if (error) {
428870ff 3788 zfs_acl_ids_free(&acl_ids);
0037b49e 3789 ZFS_EXIT(zfsvfs);
428870ff
BB
3790 return (error);
3791 }
3792
149e873a 3793 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
428870ff
BB
3794 zfs_acl_ids_free(&acl_ids);
3795 zfs_dirent_unlock(dl);
0037b49e 3796 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
3797 return (error);
3798 }
3799
0037b49e 3800 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
9babb374
BB
3801 zfs_acl_ids_free(&acl_ids);
3802 zfs_dirent_unlock(dl);
0037b49e 3803 ZFS_EXIT(zfsvfs);
2e528b49 3804 return (SET_ERROR(EDQUOT));
9babb374 3805 }
0037b49e
BB
3806 tx = dmu_tx_create(zfsvfs->z_os);
3807 fuid_dirtied = zfsvfs->z_fuid_dirty;
34dc7c2f 3808 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
34dc7c2f 3809 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
428870ff
BB
3810 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3811 ZFS_SA_BASE_ATTR_SIZE + len);
3812 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
0037b49e 3813 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
428870ff
BB
3814 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3815 acl_ids.z_aclp->z_acl_bytes);
3816 }
9babb374 3817 if (fuid_dirtied)
0037b49e 3818 zfs_fuid_txhold(zfsvfs, tx);
ef7a7948 3819 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
34dc7c2f
BB
3820 if (error) {
3821 zfs_dirent_unlock(dl);
fb5f0bc8 3822 if (error == ERESTART) {
e8b96c60 3823 waited = B_TRUE;
34dc7c2f
BB
3824 dmu_tx_wait(tx);
3825 dmu_tx_abort(tx);
3826 goto top;
3827 }
428870ff 3828 zfs_acl_ids_free(&acl_ids);
34dc7c2f 3829 dmu_tx_abort(tx);
0037b49e 3830 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
3831 return (error);
3832 }
3833
34dc7c2f
BB
3834 /*
3835 * Create a new object for the symlink.
428870ff 3836 * for version 4 ZPL datsets the symlink will be an SA attribute
34dc7c2f 3837 */
428870ff 3838 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
9babb374 3839
428870ff 3840 if (fuid_dirtied)
0037b49e 3841 zfs_fuid_sync(zfsvfs, tx);
34dc7c2f 3842
572e2857 3843 mutex_enter(&zp->z_lock);
428870ff 3844 if (zp->z_is_sa)
0037b49e 3845 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
428870ff
BB
3846 link, len, tx);
3847 else
3848 zfs_sa_symlink(zp, link, len, tx);
572e2857 3849 mutex_exit(&zp->z_lock);
34dc7c2f 3850
428870ff 3851 zp->z_size = len;
0037b49e 3852 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
428870ff 3853 &zp->z_size, sizeof (zp->z_size), tx);
34dc7c2f
BB
3854 /*
3855 * Insert the new object into the directory.
3856 */
9a2e90c9 3857 (void) zfs_link_create(dl, zp, tx, ZNEW);
9babb374 3858
9a2e90c9
TH
3859 if (flags & FIGNORECASE)
3860 txtype |= TX_CI;
3861 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3862
3863 zfs_inode_update(dzp);
3864 zfs_inode_update(zp);
960e08fe 3865
9babb374 3866 zfs_acl_ids_free(&acl_ids);
34dc7c2f
BB
3867
3868 dmu_tx_commit(tx);
3869
3870 zfs_dirent_unlock(dl);
3871
9a2e90c9 3872 *ipp = ZTOI(zp);
34dc7c2f 3873
9a2e90c9
TH
3874 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3875 zil_commit(zilog, 0);
428870ff 3876
0037b49e 3877 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
3878 return (error);
3879}
3880
3881/*
3882 * Return, in the buffer contained in the provided uio structure,
3558fd73 3883 * the symbolic path referred to by ip.
34dc7c2f 3884 *
8b4f9a2d
BB
3885 * IN: ip - inode of symbolic link
3886 * uio - structure to contain the link path.
3887 * cr - credentials of caller.
34dc7c2f
BB
3888 *
3889 * RETURN: 0 if success
3890 * error code if failure
3891 *
3892 * Timestamps:
3558fd73 3893 * ip - atime updated
34dc7c2f
BB
3894 */
3895/* ARGSUSED */
e5c39b95 3896int
8b4f9a2d 3897zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr)
34dc7c2f 3898{
3558fd73 3899 znode_t *zp = ITOZ(ip);
0037b49e 3900 zfsvfs_t *zfsvfs = ITOZSB(ip);
34dc7c2f
BB
3901 int error;
3902
0037b49e 3903 ZFS_ENTER(zfsvfs);
34dc7c2f
BB
3904 ZFS_VERIFY_ZP(zp);
3905
572e2857 3906 mutex_enter(&zp->z_lock);
428870ff 3907 if (zp->z_is_sa)
8b4f9a2d 3908 error = sa_lookup_uio(zp->z_sa_hdl,
0037b49e 3909 SA_ZPL_SYMLINK(zfsvfs), uio);
428870ff 3910 else
8b4f9a2d 3911 error = zfs_sa_readlink(zp, uio);
572e2857 3912 mutex_exit(&zp->z_lock);
34dc7c2f 3913
0037b49e 3914 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
3915 return (error);
3916}
3917
3918/*
3558fd73 3919 * Insert a new entry into directory tdip referencing sip.
34dc7c2f 3920 *
3558fd73
BB
3921 * IN: tdip - Directory to contain new entry.
3922 * sip - inode of new entry.
34dc7c2f
BB
3923 * name - name of new entry.
3924 * cr - credentials of caller.
34dc7c2f
BB
3925 *
3926 * RETURN: 0 if success
3927 * error code if failure
3928 *
3929 * Timestamps:
3558fd73
BB
3930 * tdip - ctime|mtime updated
3931 * sip - ctime updated
34dc7c2f
BB
3932 */
3933/* ARGSUSED */
e5c39b95 3934int
da5e151f
BB
3935zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr,
3936 int flags)
34dc7c2f 3937{
3558fd73 3938 znode_t *dzp = ITOZ(tdip);
34dc7c2f 3939 znode_t *tzp, *szp;
0037b49e 3940 zfsvfs_t *zfsvfs = ITOZSB(tdip);
34dc7c2f
BB
3941 zilog_t *zilog;
3942 zfs_dirlock_t *dl;
3943 dmu_tx_t *tx;
34dc7c2f
BB
3944 int error;
3945 int zf = ZNEW;
428870ff 3946 uint64_t parent;
572e2857 3947 uid_t owner;
e8b96c60 3948 boolean_t waited = B_FALSE;
ace1eae8
CC
3949 boolean_t is_tmpfile = 0;
3950 uint64_t txg;
3951#ifdef HAVE_TMPFILE
3952 is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE));
3953#endif
3558fd73 3954 ASSERT(S_ISDIR(tdip->i_mode));
34dc7c2f 3955
32dec7bd 3956 if (name == NULL)
3957 return (SET_ERROR(EINVAL));
3958
0037b49e 3959 ZFS_ENTER(zfsvfs);
34dc7c2f 3960 ZFS_VERIFY_ZP(dzp);
0037b49e 3961 zilog = zfsvfs->z_log;
34dc7c2f 3962
428870ff
BB
3963 /*
3964 * POSIX dictates that we return EPERM here.
3965 * Better choices include ENOTSUP or EISDIR.
3966 */
3558fd73 3967 if (S_ISDIR(sip->i_mode)) {
0037b49e 3968 ZFS_EXIT(zfsvfs);
2e528b49 3969 return (SET_ERROR(EPERM));
428870ff
BB
3970 }
3971
812e91a7
MT
3972 szp = ITOZ(sip);
3973 ZFS_VERIFY_ZP(szp);
3974
3975 /*
3976 * We check i_sb because snapshots and the ctldir must have different
3977 * super blocks.
3978 */
c0ebc844 3979 if (sip->i_sb != tdip->i_sb || zfsctl_is_node(sip)) {
0037b49e 3980 ZFS_EXIT(zfsvfs);
2e528b49 3981 return (SET_ERROR(EXDEV));
34dc7c2f 3982 }
428870ff 3983
428870ff
BB
3984 /* Prevent links to .zfs/shares files */
3985
0037b49e 3986 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
428870ff 3987 &parent, sizeof (uint64_t))) != 0) {
0037b49e 3988 ZFS_EXIT(zfsvfs);
428870ff
BB
3989 return (error);
3990 }
0037b49e
BB
3991 if (parent == zfsvfs->z_shares_dir) {
3992 ZFS_EXIT(zfsvfs);
2e528b49 3993 return (SET_ERROR(EPERM));
428870ff
BB
3994 }
3995
0037b49e 3996 if (zfsvfs->z_utf8 && u8_validate(name,
34dc7c2f 3997 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
0037b49e 3998 ZFS_EXIT(zfsvfs);
2e528b49 3999 return (SET_ERROR(EILSEQ));
34dc7c2f
BB
4000 }
4001 if (flags & FIGNORECASE)
4002 zf |= ZCILOOK;
4003
34dc7c2f
BB
4004 /*
4005 * We do not support links between attributes and non-attributes
4006 * because of the potential security risk of creating links
4007 * into "normal" file space in order to circumvent restrictions
4008 * imposed in attribute space.
4009 */
428870ff 4010 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
0037b49e 4011 ZFS_EXIT(zfsvfs);
2e528b49 4012 return (SET_ERROR(EINVAL));
34dc7c2f
BB
4013 }
4014
0037b49e
BB
4015 owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid),
4016 cr, ZFS_OWNER);
572e2857 4017 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
0037b49e 4018 ZFS_EXIT(zfsvfs);
2e528b49 4019 return (SET_ERROR(EPERM));
34dc7c2f
BB
4020 }
4021
149e873a 4022 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
0037b49e 4023 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
4024 return (error);
4025 }
4026
428870ff 4027top:
34dc7c2f
BB
4028 /*
4029 * Attempt to lock directory; fail if entry already exists.
4030 */
4031 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
4032 if (error) {
0037b49e 4033 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
4034 return (error);
4035 }
4036
0037b49e 4037 tx = dmu_tx_create(zfsvfs->z_os);
428870ff 4038 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
34dc7c2f 4039 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
ace1eae8 4040 if (is_tmpfile)
0037b49e 4041 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
ace1eae8 4042
428870ff
BB
4043 zfs_sa_upgrade_txholds(tx, szp);
4044 zfs_sa_upgrade_txholds(tx, dzp);
ef7a7948 4045 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
34dc7c2f
BB
4046 if (error) {
4047 zfs_dirent_unlock(dl);
fb5f0bc8 4048 if (error == ERESTART) {
e8b96c60 4049 waited = B_TRUE;
34dc7c2f
BB
4050 dmu_tx_wait(tx);
4051 dmu_tx_abort(tx);
4052 goto top;
4053 }
4054 dmu_tx_abort(tx);
0037b49e 4055 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
4056 return (error);
4057 }
ace1eae8
CC
4058 /* unmark z_unlinked so zfs_link_create will not reject */
4059 if (is_tmpfile)
4060 szp->z_unlinked = 0;
34dc7c2f
BB
4061 error = zfs_link_create(dl, szp, tx, 0);
4062
4063 if (error == 0) {
4064 uint64_t txtype = TX_LINK;
ace1eae8
CC
4065 /*
4066 * tmpfile is created to be in z_unlinkedobj, so remove it.
4067 * Also, we don't log in ZIL, be cause all previous file
4068 * operation on the tmpfile are ignored by ZIL. Instead we
4069 * always wait for txg to sync to make sure all previous
4070 * operation are sync safe.
4071 */
4072 if (is_tmpfile) {
0037b49e
BB
4073 VERIFY(zap_remove_int(zfsvfs->z_os,
4074 zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
ace1eae8
CC
4075 } else {
4076 if (flags & FIGNORECASE)
4077 txtype |= TX_CI;
4078 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4079 }
4080 } else if (is_tmpfile) {
4081 /* restore z_unlinked since when linking failed */
4082 szp->z_unlinked = 1;
34dc7c2f 4083 }
ace1eae8 4084 txg = dmu_tx_get_txg(tx);
34dc7c2f
BB
4085 dmu_tx_commit(tx);
4086
4087 zfs_dirent_unlock(dl);
4088
0037b49e 4089 if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
572e2857 4090 zil_commit(zilog, 0);
428870ff 4091
ace1eae8 4092 if (is_tmpfile)
0037b49e 4093 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg);
ace1eae8 4094
960e08fe
BB
4095 zfs_inode_update(dzp);
4096 zfs_inode_update(szp);
0037b49e 4097 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
4098 return (error);
4099}
4100
3c0e5c0f 4101static void
119a394a 4102zfs_putpage_commit_cb(void *arg)
3c0e5c0f
BB
4103{
4104 struct page *pp = arg;
4105
119a394a 4106 ClearPageError(pp);
3c0e5c0f
BB
4107 end_page_writeback(pp);
4108}
4109
34dc7c2f 4110/*
3c0e5c0f
BB
4111 * Push a page out to disk, once the page is on stable storage the
4112 * registered commit callback will be run as notification of completion.
34dc7c2f 4113 *
3c0e5c0f
BB
4114 * IN: ip - page mapped for inode.
4115 * pp - page to push (page is locked)
4116 * wbc - writeback control data
34dc7c2f
BB
4117 *
4118 * RETURN: 0 if success
4119 * error code if failure
4120 *
3c0e5c0f
BB
4121 * Timestamps:
4122 * ip - ctime|mtime updated
34dc7c2f
BB
4123 */
4124/* ARGSUSED */
3c0e5c0f
BB
4125int
4126zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
34dc7c2f 4127{
3c0e5c0f 4128 znode_t *zp = ITOZ(ip);
0037b49e 4129 zfsvfs_t *zfsvfs = ITOZSB(ip);
3c0e5c0f
BB
4130 loff_t offset;
4131 loff_t pgoff;
4c837f0d
BB
4132 unsigned int pglen;
4133 rl_t *rl;
3c0e5c0f
BB
4134 dmu_tx_t *tx;
4135 caddr_t va;
4136 int err = 0;
4137 uint64_t mtime[2], ctime[2];
4138 sa_bulk_attr_t bulk[3];
4139 int cnt = 0;
21a96fb6 4140 struct address_space *mapping;
3c0e5c0f 4141
0037b49e 4142 ZFS_ENTER(zfsvfs);
4c837f0d 4143 ZFS_VERIFY_ZP(zp);
d164b209 4144
3c0e5c0f
BB
4145 ASSERT(PageLocked(pp));
4146
d1d7e268
MK
4147 pgoff = page_offset(pp); /* Page byte-offset in file */
4148 offset = i_size_read(ip); /* File length in bytes */
8b1899d3
BB
4149 pglen = MIN(PAGE_SIZE, /* Page length in bytes */
4150 P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
3c0e5c0f
BB
4151
4152 /* Page is beyond end of file */
4153 if (pgoff >= offset) {
4154 unlock_page(pp);
0037b49e 4155 ZFS_EXIT(zfsvfs);
3c0e5c0f
BB
4156 return (0);
4157 }
4158
4159 /* Truncate page length to end of file */
4160 if (pgoff + pglen > offset)
4161 pglen = offset - pgoff;
4162
4163#if 0
34dc7c2f 4164 /*
3c0e5c0f
BB
4165 * FIXME: Allow mmap writes past its quota. The correct fix
4166 * is to register a page_mkwrite() handler to count the page
4167 * against its quota when it is about to be dirtied.
34dc7c2f 4168 */
0037b49e
BB
4169 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
4170 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
9babb374 4171 err = EDQUOT;
9babb374 4172 }
3c0e5c0f
BB
4173#endif
4174
d958324f
BB
4175 /*
4176 * The ordering here is critical and must adhere to the following
4177 * rules in order to avoid deadlocking in either zfs_read() or
4178 * zfs_free_range() due to a lock inversion.
4179 *
4180 * 1) The page must be unlocked prior to acquiring the range lock.
4181 * This is critical because zfs_read() calls find_lock_page()
4182 * which may block on the page lock while holding the range lock.
4183 *
4184 * 2) Before setting or clearing write back on a page the range lock
4185 * must be held in order to prevent a lock inversion with the
4186 * zfs_free_range() function.
21a96fb6
CC
4187 *
4188 * This presents a problem because upon entering this function the
4189 * page lock is already held. To safely acquire the range lock the
4190 * page lock must be dropped. This creates a window where another
4191 * process could truncate, invalidate, dirty, or write out the page.
4192 *
4193 * Therefore, after successfully reacquiring the range and page locks
4194 * the current page state is checked. In the common case everything
4195 * will be as is expected and it can be written out. However, if
4196 * the page state has changed it must be handled accordingly.
d958324f 4197 */
21a96fb6
CC
4198 mapping = pp->mapping;
4199 redirty_page_for_writepage(wbc, pp);
d958324f 4200 unlock_page(pp);
21a96fb6 4201
d88895a0 4202 rl = zfs_range_lock(&zp->z_range_lock, pgoff, pglen, RL_WRITER);
21a96fb6
CC
4203 lock_page(pp);
4204
4205 /* Page mapping changed or it was no longer dirty, we're done */
4206 if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
4207 unlock_page(pp);
4208 zfs_range_unlock(rl);
0037b49e 4209 ZFS_EXIT(zfsvfs);
21a96fb6
CC
4210 return (0);
4211 }
4212
4213 /* Another process started write block if required */
4214 if (PageWriteback(pp)) {
4215 unlock_page(pp);
4216 zfs_range_unlock(rl);
4217
4218 if (wbc->sync_mode != WB_SYNC_NONE)
4219 wait_on_page_writeback(pp);
4220
0037b49e 4221 ZFS_EXIT(zfsvfs);
21a96fb6
CC
4222 return (0);
4223 }
4224
4225 /* Clear the dirty flag the required locks are held */
4226 if (!clear_page_dirty_for_io(pp)) {
4227 unlock_page(pp);
4228 zfs_range_unlock(rl);
0037b49e 4229 ZFS_EXIT(zfsvfs);
21a96fb6
CC
4230 return (0);
4231 }
4232
4233 /*
4234 * Counterpart for redirty_page_for_writepage() above. This page
4235 * was in fact not skipped and should not be counted as if it were.
4236 */
4237 wbc->pages_skipped--;
3c0e5c0f 4238 set_page_writeback(pp);
21a96fb6 4239 unlock_page(pp);
3c0e5c0f 4240
0037b49e 4241 tx = dmu_tx_create(zfsvfs->z_os);
3c0e5c0f 4242 dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
428870ff
BB
4243 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4244 zfs_sa_upgrade_txholds(tx, zp);
d958324f 4245
fb5f0bc8 4246 err = dmu_tx_assign(tx, TXG_NOWAIT);
34dc7c2f 4247 if (err != 0) {
3c0e5c0f 4248 if (err == ERESTART)
34dc7c2f 4249 dmu_tx_wait(tx);
3c0e5c0f 4250
34dc7c2f 4251 dmu_tx_abort(tx);
119a394a
ED
4252 __set_page_dirty_nobuffers(pp);
4253 ClearPageError(pp);
4254 end_page_writeback(pp);
4c837f0d 4255 zfs_range_unlock(rl);
0037b49e 4256 ZFS_EXIT(zfsvfs);
3c0e5c0f 4257 return (err);
34dc7c2f
BB
4258 }
4259
dde471ef 4260 va = kmap(pp);
8b1899d3 4261 ASSERT3U(pglen, <=, PAGE_SIZE);
0037b49e 4262 dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx);
dde471ef 4263 kunmap(pp);
34dc7c2f 4264
0037b49e
BB
4265 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4266 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
4267 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL,
4268 &zp->z_pflags, 8);
428870ff 4269
d3aa3ea9
BB
4270 /* Preserve the mtime and ctime provided by the inode */
4271 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4272 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
4273 zp->z_atime_dirty = 0;
4274 zp->z_seq++;
4275
4276 err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4277
0037b49e 4278 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
119a394a 4279 zfs_putpage_commit_cb, pp);
45d1cae3 4280 dmu_tx_commit(tx);
d3aa3ea9 4281
4c837f0d 4282 zfs_range_unlock(rl);
34dc7c2f 4283
119a394a
ED
4284 if (wbc->sync_mode != WB_SYNC_NONE) {
4285 /*
4286 * Note that this is rarely called under writepages(), because
4287 * writepages() normally handles the entire commit for
4288 * performance reasons.
4289 */
0037b49e 4290 zil_commit(zfsvfs->z_log, zp->z_id);
2b286136 4291 }
3c0e5c0f 4292
0037b49e 4293 ZFS_EXIT(zfsvfs);
3c0e5c0f 4294 return (err);
34dc7c2f
BB
4295}
4296
8780c539
BB
4297/*
4298 * Update the system attributes when the inode has been dirtied. For the
023699cd 4299 * moment we only update the mode, atime, mtime, and ctime.
8780c539
BB
4300 */
4301int
4302zfs_dirty_inode(struct inode *ip, int flags)
4303{
4304 znode_t *zp = ITOZ(ip);
0037b49e 4305 zfsvfs_t *zfsvfs = ITOZSB(ip);
8780c539 4306 dmu_tx_t *tx;
023699cd
MM
4307 uint64_t mode, atime[2], mtime[2], ctime[2];
4308 sa_bulk_attr_t bulk[4];
704cd075 4309 int error = 0;
8780c539
BB
4310 int cnt = 0;
4311
0037b49e 4312 if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
c944be5d
BB
4313 return (0);
4314
0037b49e 4315 ZFS_ENTER(zfsvfs);
8780c539
BB
4316 ZFS_VERIFY_ZP(zp);
4317
704cd075
CC
4318#ifdef I_DIRTY_TIME
4319 /*
4320 * This is the lazytime semantic indroduced in Linux 4.0
4321 * This flag will only be called from update_time when lazytime is set.
4322 * (Note, I_DIRTY_SYNC will also set if not lazytime)
4323 * Fortunately mtime and ctime are managed within ZFS itself, so we
4324 * only need to dirty atime.
4325 */
4326 if (flags == I_DIRTY_TIME) {
4327 zp->z_atime_dirty = 1;
4328 goto out;
4329 }
4330#endif
4331
0037b49e 4332 tx = dmu_tx_create(zfsvfs->z_os);
8780c539
BB
4333
4334 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4335 zfs_sa_upgrade_txholds(tx, zp);
4336
4337 error = dmu_tx_assign(tx, TXG_WAIT);
4338 if (error) {
4339 dmu_tx_abort(tx);
4340 goto out;
4341 }
4342
4343 mutex_enter(&zp->z_lock);
704cd075
CC
4344 zp->z_atime_dirty = 0;
4345
0037b49e
BB
4346 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
4347 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
4348 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4349 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
8780c539 4350
023699cd 4351 /* Preserve the mode, mtime and ctime provided by the inode */
8780c539
BB
4352 ZFS_TIME_ENCODE(&ip->i_atime, atime);
4353 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4354 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
023699cd
MM
4355 mode = ip->i_mode;
4356
4357 zp->z_mode = mode;
8780c539
BB
4358
4359 error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4360 mutex_exit(&zp->z_lock);
4361
4362 dmu_tx_commit(tx);
4363out:
0037b49e 4364 ZFS_EXIT(zfsvfs);
8780c539
BB
4365 return (error);
4366}
8780c539 4367
34dc7c2f
BB
4368/*ARGSUSED*/
4369void
c0d35759 4370zfs_inactive(struct inode *ip)
34dc7c2f 4371{
c0d35759 4372 znode_t *zp = ITOZ(ip);
0037b49e 4373 zfsvfs_t *zfsvfs = ITOZSB(ip);
0df9673f 4374 uint64_t atime[2];
34dc7c2f 4375 int error;
cafbd2ac 4376 int need_unlock = 0;
34dc7c2f 4377
cafbd2ac 4378 /* Only read lock if we haven't already write locked, e.g. rollback */
0037b49e 4379 if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) {
cafbd2ac 4380 need_unlock = 1;
0037b49e 4381 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
cafbd2ac 4382 }
c0d35759 4383 if (zp->z_sa_hdl == NULL) {
cafbd2ac 4384 if (need_unlock)
0037b49e 4385 rw_exit(&zfsvfs->z_teardown_inactive_lock);
c0d35759 4386 return;
34dc7c2f
BB
4387 }
4388
4389 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
0037b49e 4390 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
34dc7c2f 4391
428870ff
BB
4392 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4393 zfs_sa_upgrade_txholds(tx, zp);
34dc7c2f
BB
4394 error = dmu_tx_assign(tx, TXG_WAIT);
4395 if (error) {
4396 dmu_tx_abort(tx);
4397 } else {
0df9673f 4398 ZFS_TIME_ENCODE(&ip->i_atime, atime);
34dc7c2f 4399 mutex_enter(&zp->z_lock);
0037b49e 4400 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
0df9673f 4401 (void *)&atime, sizeof (atime), tx);
34dc7c2f
BB
4402 zp->z_atime_dirty = 0;
4403 mutex_exit(&zp->z_lock);
4404 dmu_tx_commit(tx);
4405 }
4406 }
4407
4408 zfs_zinactive(zp);
cafbd2ac 4409 if (need_unlock)
0037b49e 4410 rw_exit(&zfsvfs->z_teardown_inactive_lock);
34dc7c2f
BB
4411}
4412
4413/*
4414 * Bounds-check the seek operation.
4415 *
3558fd73 4416 * IN: ip - inode seeking within
34dc7c2f
BB
4417 * ooff - old file offset
4418 * noffp - pointer to new file offset
4419 * ct - caller context
4420 *
4421 * RETURN: 0 if success
4422 * EINVAL if new offset invalid
4423 */
4424/* ARGSUSED */
3558fd73 4425int
9623f736 4426zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp)
34dc7c2f 4427{
3558fd73 4428 if (S_ISDIR(ip->i_mode))
34dc7c2f
BB
4429 return (0);
4430 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4431}
4432
34dc7c2f 4433/*
dde471ef 4434 * Fill pages with data from the disk.
34dc7c2f
BB
4435 */
4436static int
dde471ef 4437zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
34dc7c2f 4438{
d1d7e268 4439 znode_t *zp = ITOZ(ip);
0037b49e 4440 zfsvfs_t *zfsvfs = ITOZSB(ip);
d1d7e268 4441 objset_t *os;
dde471ef 4442 struct page *cur_pp;
d1d7e268
MK
4443 u_offset_t io_off, total;
4444 size_t io_len;
4445 loff_t i_size;
4446 unsigned page_idx;
4447 int err;
34dc7c2f 4448
0037b49e 4449 os = zfsvfs->z_os;
8b1899d3 4450 io_len = nr_pages << PAGE_SHIFT;
dde471ef
PJ
4451 i_size = i_size_read(ip);
4452 io_off = page_offset(pl[0]);
4453
4454 if (io_off + io_len > i_size)
4455 io_len = i_size - io_off;
34dc7c2f
BB
4456
4457 /*
dde471ef 4458 * Iterate over list of pages and read each page individually.
34dc7c2f 4459 */
dde471ef 4460 page_idx = 0;
34dc7c2f 4461 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
d164b209
BB
4462 caddr_t va;
4463
540c3927 4464 cur_pp = pl[page_idx++];
dde471ef 4465 va = kmap(cur_pp);
9babb374
BB
4466 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4467 DMU_READ_PREFETCH);
dde471ef 4468 kunmap(cur_pp);
34dc7c2f 4469 if (err) {
b128c09f
BB
4470 /* convert checksum errors into IO errors */
4471 if (err == ECKSUM)
2e528b49 4472 err = SET_ERROR(EIO);
34dc7c2f
BB
4473 return (err);
4474 }
34dc7c2f 4475 }
d164b209 4476
34dc7c2f
BB
4477 return (0);
4478}
4479
4480/*
dde471ef 4481 * Uses zfs_fillpage to read data from the file and fill the pages.
34dc7c2f 4482 *
dde471ef
PJ
4483 * IN: ip - inode of file to get data from.
4484 * pl - list of pages to read
4485 * nr_pages - number of pages to read
34dc7c2f 4486 *
d3cc8b15 4487 * RETURN: 0 on success, error code on failure.
34dc7c2f
BB
4488 *
4489 * Timestamps:
4490 * vp - atime updated
4491 */
4492/* ARGSUSED */
dde471ef
PJ
4493int
4494zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages)
34dc7c2f 4495{
dde471ef 4496 znode_t *zp = ITOZ(ip);
0037b49e 4497 zfsvfs_t *zfsvfs = ITOZSB(ip);
dde471ef 4498 int err;
d164b209 4499
d164b209
BB
4500 if (pl == NULL)
4501 return (0);
34dc7c2f 4502
0037b49e 4503 ZFS_ENTER(zfsvfs);
34dc7c2f
BB
4504 ZFS_VERIFY_ZP(zp);
4505
dde471ef 4506 err = zfs_fillpage(ip, pl, nr_pages);
34dc7c2f 4507
0037b49e 4508 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
4509 return (err);
4510}
4511
4512/*
e2e7aa2d 4513 * Check ZFS specific permissions to memory map a section of a file.
34dc7c2f 4514 *
e2e7aa2d
BB
4515 * IN: ip - inode of the file to mmap
4516 * off - file offset
4517 * addrp - start address in memory region
4518 * len - length of memory region
4519 * vm_flags- address flags
34dc7c2f 4520 *
e2e7aa2d
BB
4521 * RETURN: 0 if success
4522 * error code if failure
34dc7c2f
BB
4523 */
4524/*ARGSUSED*/
e2e7aa2d
BB
4525int
4526zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
4527 unsigned long vm_flags)
34dc7c2f 4528{
e2e7aa2d 4529 znode_t *zp = ITOZ(ip);
0037b49e 4530 zfsvfs_t *zfsvfs = ITOZSB(ip);
34dc7c2f 4531
0037b49e 4532 ZFS_ENTER(zfsvfs);
34dc7c2f
BB
4533 ZFS_VERIFY_ZP(zp);
4534
e2e7aa2d 4535 if ((vm_flags & VM_WRITE) && (zp->z_pflags &
428870ff 4536 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
0037b49e 4537 ZFS_EXIT(zfsvfs);
2e528b49 4538 return (SET_ERROR(EPERM));
34dc7c2f
BB
4539 }
4540
e2e7aa2d 4541 if ((vm_flags & (VM_READ | VM_EXEC)) &&
428870ff 4542 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
0037b49e 4543 ZFS_EXIT(zfsvfs);
2e528b49 4544 return (SET_ERROR(EACCES));
34dc7c2f
BB
4545 }
4546
34dc7c2f 4547 if (off < 0 || len > MAXOFFSET_T - off) {
0037b49e 4548 ZFS_EXIT(zfsvfs);
2e528b49 4549 return (SET_ERROR(ENXIO));
34dc7c2f
BB
4550 }
4551
0037b49e 4552 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
4553 return (0);
4554}
4555
3558fd73
BB
4556/*
4557 * convoff - converts the given data (start, whence) to the
4558 * given whence.
4559 */
4560int
4561convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
4562{
5484965a 4563 vattr_t vap;
3558fd73
BB
4564 int error;
4565
4566 if ((lckdat->l_whence == 2) || (whence == 2)) {
80cc2f61 4567 if ((error = zfs_getattr(ip, &vap, 0, CRED())))
3558fd73
BB
4568 return (error);
4569 }
4570
4571 switch (lckdat->l_whence) {
4572 case 1:
4573 lckdat->l_start += offset;
4574 break;
4575 case 2:
5484965a 4576 lckdat->l_start += vap.va_size;
3558fd73
BB
4577 /* FALLTHRU */
4578 case 0:
4579 break;
4580 default:
2e528b49 4581 return (SET_ERROR(EINVAL));
3558fd73
BB
4582 }
4583
4584 if (lckdat->l_start < 0)
2e528b49 4585 return (SET_ERROR(EINVAL));
3558fd73
BB
4586
4587 switch (whence) {
4588 case 1:
4589 lckdat->l_start -= offset;
4590 break;
4591 case 2:
5484965a 4592 lckdat->l_start -= vap.va_size;
3558fd73
BB
4593 /* FALLTHRU */
4594 case 0:
4595 break;
4596 default:
2e528b49 4597 return (SET_ERROR(EINVAL));
3558fd73
BB
4598 }
4599
4600 lckdat->l_whence = (short)whence;
4601 return (0);
4602}
4603
34dc7c2f
BB
4604/*
4605 * Free or allocate space in a file. Currently, this function only
4606 * supports the `F_FREESP' command. However, this command is somewhat
4607 * misnamed, as its functionality includes the ability to allocate as
4608 * well as free space.
4609 *
3558fd73 4610 * IN: ip - inode of file to free data in.
34dc7c2f
BB
4611 * cmd - action to take (only F_FREESP supported).
4612 * bfp - section of file to free/alloc.
4613 * flag - current file open mode flags.
4614 * offset - current file offset.
4615 * cr - credentials of caller [UNUSED].
34dc7c2f 4616 *
d3cc8b15 4617 * RETURN: 0 on success, error code on failure.
34dc7c2f
BB
4618 *
4619 * Timestamps:
3558fd73 4620 * ip - ctime|mtime updated
34dc7c2f
BB
4621 */
4622/* ARGSUSED */
e5c39b95 4623int
3558fd73
BB
4624zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
4625 offset_t offset, cred_t *cr)
34dc7c2f 4626{
3558fd73 4627 znode_t *zp = ITOZ(ip);
0037b49e 4628 zfsvfs_t *zfsvfs = ITOZSB(ip);
34dc7c2f
BB
4629 uint64_t off, len;
4630 int error;
4631
0037b49e 4632 ZFS_ENTER(zfsvfs);
34dc7c2f
BB
4633 ZFS_VERIFY_ZP(zp);
4634
34dc7c2f 4635 if (cmd != F_FREESP) {
0037b49e 4636 ZFS_EXIT(zfsvfs);
2e528b49 4637 return (SET_ERROR(EINVAL));
34dc7c2f
BB
4638 }
4639
f3c9dca0
MT
4640 /*
4641 * Callers might not be able to detect properly that we are read-only,
4642 * so check it explicitly here.
4643 */
0037b49e
BB
4644 if (zfs_is_readonly(zfsvfs)) {
4645 ZFS_EXIT(zfsvfs);
f3c9dca0
MT
4646 return (SET_ERROR(EROFS));
4647 }
4648
3558fd73 4649 if ((error = convoff(ip, bfp, 0, offset))) {
0037b49e 4650 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
4651 return (error);
4652 }
4653
4654 if (bfp->l_len < 0) {
0037b49e 4655 ZFS_EXIT(zfsvfs);
2e528b49 4656 return (SET_ERROR(EINVAL));
34dc7c2f
BB
4657 }
4658
aec69371
ED
4659 /*
4660 * Permissions aren't checked on Solaris because on this OS
4661 * zfs_space() can only be called with an opened file handle.
4662 * On Linux we can get here through truncate_range() which
4663 * operates directly on inodes, so we need to check access rights.
4664 */
4665 if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
0037b49e 4666 ZFS_EXIT(zfsvfs);
aec69371
ED
4667 return (error);
4668 }
4669
34dc7c2f
BB
4670 off = bfp->l_start;
4671 len = bfp->l_len; /* 0 means from off to end of file */
4672
b128c09f 4673 error = zfs_freesp(zp, off, len, flag, TRUE);
34dc7c2f 4674
0037b49e 4675 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
4676 return (error);
4677}
4678
4679/*ARGSUSED*/
e5c39b95 4680int
3558fd73 4681zfs_fid(struct inode *ip, fid_t *fidp)
34dc7c2f 4682{
3558fd73 4683 znode_t *zp = ITOZ(ip);
0037b49e 4684 zfsvfs_t *zfsvfs = ITOZSB(ip);
34dc7c2f 4685 uint32_t gen;
428870ff 4686 uint64_t gen64;
34dc7c2f
BB
4687 uint64_t object = zp->z_id;
4688 zfid_short_t *zfid;
428870ff 4689 int size, i, error;
34dc7c2f 4690
0037b49e 4691 ZFS_ENTER(zfsvfs);
34dc7c2f 4692 ZFS_VERIFY_ZP(zp);
428870ff 4693
0037b49e 4694 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
428870ff 4695 &gen64, sizeof (uint64_t))) != 0) {
0037b49e 4696 ZFS_EXIT(zfsvfs);
428870ff
BB
4697 return (error);
4698 }
4699
4700 gen = (uint32_t)gen64;
34dc7c2f 4701
9b77d1c9 4702 size = SHORT_FID_LEN;
34dc7c2f
BB
4703
4704 zfid = (zfid_short_t *)fidp;
4705
4706 zfid->zf_len = size;
4707
4708 for (i = 0; i < sizeof (zfid->zf_object); i++)
4709 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4710
4711 /* Must have a non-zero generation number to distinguish from .zfs */
4712 if (gen == 0)
4713 gen = 1;
4714 for (i = 0; i < sizeof (zfid->zf_gen); i++)
4715 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4716
0037b49e 4717 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
4718 return (0);
4719}
4720
34dc7c2f 4721/*ARGSUSED*/
e5c39b95 4722int
3558fd73 4723zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
34dc7c2f 4724{
3558fd73 4725 znode_t *zp = ITOZ(ip);
0037b49e 4726 zfsvfs_t *zfsvfs = ITOZSB(ip);
34dc7c2f
BB
4727 int error;
4728 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4729
0037b49e 4730 ZFS_ENTER(zfsvfs);
34dc7c2f
BB
4731 ZFS_VERIFY_ZP(zp);
4732 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
0037b49e 4733 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
4734
4735 return (error);
4736}
4737
4738/*ARGSUSED*/
e5c39b95 4739int
3558fd73 4740zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
34dc7c2f 4741{
3558fd73 4742 znode_t *zp = ITOZ(ip);
0037b49e 4743 zfsvfs_t *zfsvfs = ITOZSB(ip);
34dc7c2f
BB
4744 int error;
4745 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
0037b49e 4746 zilog_t *zilog = zfsvfs->z_log;
34dc7c2f 4747
0037b49e 4748 ZFS_ENTER(zfsvfs);
34dc7c2f 4749 ZFS_VERIFY_ZP(zp);
428870ff 4750
34dc7c2f 4751 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
428870ff 4752
0037b49e 4753 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
572e2857 4754 zil_commit(zilog, 0);
428870ff 4755
0037b49e 4756 ZFS_EXIT(zfsvfs);
34dc7c2f
BB
4757 return (error);
4758}
4759
3558fd73 4760#ifdef HAVE_UIO_ZEROCOPY
428870ff
BB
4761/*
4762 * Tunable, both must be a power of 2.
4763 *
4764 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
4765 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
3558fd73 4766 * an arcbuf for a partial block read
428870ff
BB
4767 */
4768int zcr_blksz_min = (1 << 10); /* 1K */
4769int zcr_blksz_max = (1 << 17); /* 128K */
4770
4771/*ARGSUSED*/
4772static int
3558fd73 4773zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
428870ff 4774{
3558fd73 4775 znode_t *zp = ITOZ(ip);
0037b49e
BB
4776 zfsvfs_t *zfsvfs = ITOZSB(ip);
4777 int max_blksz = zfsvfs->z_max_blksz;
428870ff
BB
4778 uio_t *uio = &xuio->xu_uio;
4779 ssize_t size = uio->uio_resid;
4780 offset_t offset = uio->uio_loffset;
4781 int blksz;
4782 int fullblk, i;
4783 arc_buf_t *abuf;
4784 ssize_t maxsize;
4785 int preamble, postamble;
4786
4787 if (xuio->xu_type != UIOTYPE_ZEROCOPY)
2e528b49 4788 return (SET_ERROR(EINVAL));
428870ff 4789
0037b49e 4790 ZFS_ENTER(zfsvfs);
428870ff
BB
4791 ZFS_VERIFY_ZP(zp);
4792 switch (ioflag) {
4793 case UIO_WRITE:
4794 /*
4795 * Loan out an arc_buf for write if write size is bigger than
4796 * max_blksz, and the file's block size is also max_blksz.
4797 */
4798 blksz = max_blksz;
4799 if (size < blksz || zp->z_blksz != blksz) {
0037b49e 4800 ZFS_EXIT(zfsvfs);
2e528b49 4801 return (SET_ERROR(EINVAL));
428870ff
BB
4802 }
4803 /*
4804 * Caller requests buffers for write before knowing where the
4805 * write offset might be (e.g. NFS TCP write).
4806 */
4807 if (offset == -1) {
4808 preamble = 0;
4809 } else {
4810 preamble = P2PHASE(offset, blksz);
4811 if (preamble) {
4812 preamble = blksz - preamble;
4813 size -= preamble;
4814 }
4815 }
4816
4817 postamble = P2PHASE(size, blksz);
4818 size -= postamble;
4819
4820 fullblk = size / blksz;
4821 (void) dmu_xuio_init(xuio,
4822 (preamble != 0) + fullblk + (postamble != 0));
428870ff
BB
4823
4824 /*
4825 * Have to fix iov base/len for partial buffers. They
4826 * currently represent full arc_buf's.
4827 */
4828 if (preamble) {
4829 /* data begins in the middle of the arc_buf */
4830 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4831 blksz);
4832 ASSERT(abuf);
4833 (void) dmu_xuio_add(xuio, abuf,
4834 blksz - preamble, preamble);
4835 }
4836
4837 for (i = 0; i < fullblk; i++) {
4838 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4839 blksz);
4840 ASSERT(abuf);
4841 (void) dmu_xuio_add(xuio, abuf, 0, blksz);
4842 }
4843
4844 if (postamble) {
4845 /* data ends in the middle of the arc_buf */
4846 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4847 blksz);
4848 ASSERT(abuf);
4849 (void) dmu_xuio_add(xuio, abuf, 0, postamble);
4850 }
4851 break;
4852 case UIO_READ:
4853 /*
4854 * Loan out an arc_buf for read if the read size is larger than
4855 * the current file block size. Block alignment is not
4856 * considered. Partial arc_buf will be loaned out for read.
4857 */
4858 blksz = zp->z_blksz;
4859 if (blksz < zcr_blksz_min)
4860 blksz = zcr_blksz_min;
4861 if (blksz > zcr_blksz_max)
4862 blksz = zcr_blksz_max;
4863 /* avoid potential complexity of dealing with it */
4864 if (blksz > max_blksz) {
0037b49e 4865 ZFS_EXIT(zfsvfs);
2e528b49 4866 return (SET_ERROR(EINVAL));
428870ff
BB
4867 }
4868
4869 maxsize = zp->z_size - uio->uio_loffset;
4870 if (size > maxsize)
4871 size = maxsize;
4872
3558fd73 4873 if (size < blksz) {
0037b49e 4874 ZFS_EXIT(zfsvfs);
2e528b49 4875 return (SET_ERROR(EINVAL));
428870ff
BB
4876 }
4877 break;
4878 default:
0037b49e 4879 ZFS_EXIT(zfsvfs);
2e528b49 4880 return (SET_ERROR(EINVAL));
428870ff
BB
4881 }
4882
4883 uio->uio_extflg = UIO_XUIO;
4884 XUIO_XUZC_RW(xuio) = ioflag;
0037b49e 4885 ZFS_EXIT(zfsvfs);
428870ff
BB
4886 return (0);
4887}
4888
4889/*ARGSUSED*/
4890static int
3558fd73 4891zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
428870ff
BB
4892{
4893 int i;
4894 arc_buf_t *abuf;
4895 int ioflag = XUIO_XUZC_RW(xuio);
4896
4897 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
4898
4899 i = dmu_xuio_cnt(xuio);
4900 while (i-- > 0) {
4901 abuf = dmu_xuio_arcbuf(xuio, i);
4902 /*
4903 * if abuf == NULL, it must be a write buffer
4904 * that has been returned in zfs_write().
4905 */
4906 if (abuf)
4907 dmu_return_arcbuf(abuf);
4908 ASSERT(abuf || ioflag == UIO_WRITE);
4909 }
4910
4911 dmu_xuio_fini(xuio);
4912 return (0);
4913}
3558fd73 4914#endif /* HAVE_UIO_ZEROCOPY */
c409e464
BB
4915
4916#if defined(_KERNEL) && defined(HAVE_SPL)
f298b24d
BB
4917EXPORT_SYMBOL(zfs_open);
4918EXPORT_SYMBOL(zfs_close);
4919EXPORT_SYMBOL(zfs_read);
4920EXPORT_SYMBOL(zfs_write);
4921EXPORT_SYMBOL(zfs_access);
4922EXPORT_SYMBOL(zfs_lookup);
4923EXPORT_SYMBOL(zfs_create);
4924EXPORT_SYMBOL(zfs_tmpfile);
4925EXPORT_SYMBOL(zfs_remove);
4926EXPORT_SYMBOL(zfs_mkdir);
4927EXPORT_SYMBOL(zfs_rmdir);
4928EXPORT_SYMBOL(zfs_readdir);
4929EXPORT_SYMBOL(zfs_fsync);
4930EXPORT_SYMBOL(zfs_getattr);
4931EXPORT_SYMBOL(zfs_getattr_fast);
4932EXPORT_SYMBOL(zfs_setattr);
4933EXPORT_SYMBOL(zfs_rename);
4934EXPORT_SYMBOL(zfs_symlink);
4935EXPORT_SYMBOL(zfs_readlink);
4936EXPORT_SYMBOL(zfs_link);
4937EXPORT_SYMBOL(zfs_inactive);
4938EXPORT_SYMBOL(zfs_space);
4939EXPORT_SYMBOL(zfs_fid);
4940EXPORT_SYMBOL(zfs_getsecattr);
4941EXPORT_SYMBOL(zfs_setsecattr);
4942EXPORT_SYMBOL(zfs_getpage);
4943EXPORT_SYMBOL(zfs_putpage);
4944EXPORT_SYMBOL(zfs_dirty_inode);
4945EXPORT_SYMBOL(zfs_map);
4946
02730c33 4947/* CSTYLED */
a966c564
K
4948module_param(zfs_delete_blocks, ulong, 0644);
4949MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
c409e464
BB
4950module_param(zfs_read_chunk_size, long, 0644);
4951MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");
4952#endif