]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zfs_vnops.c
Fix zfs_putpage() lock inversion
[mirror_zfs.git] / module / zfs / zfs_vnops.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 */
25
26 /* Portions Copyright 2007 Jeremy Teo */
27 /* Portions Copyright 2010 Robert Milkowski */
28
29
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/time.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35 #include <sys/resource.h>
36 #include <sys/vfs.h>
37 #include <sys/vfs_opreg.h>
38 #include <sys/file.h>
39 #include <sys/stat.h>
40 #include <sys/kmem.h>
41 #include <sys/taskq.h>
42 #include <sys/uio.h>
43 #include <sys/vmsystm.h>
44 #include <sys/atomic.h>
45 #include <vm/pvn.h>
46 #include <sys/pathname.h>
47 #include <sys/cmn_err.h>
48 #include <sys/errno.h>
49 #include <sys/unistd.h>
50 #include <sys/zfs_dir.h>
51 #include <sys/zfs_acl.h>
52 #include <sys/zfs_ioctl.h>
53 #include <sys/fs/zfs.h>
54 #include <sys/dmu.h>
55 #include <sys/dmu_objset.h>
56 #include <sys/spa.h>
57 #include <sys/txg.h>
58 #include <sys/dbuf.h>
59 #include <sys/zap.h>
60 #include <sys/sa.h>
61 #include <sys/dirent.h>
62 #include <sys/policy.h>
63 #include <sys/sunddi.h>
64 #include <sys/sid.h>
65 #include <sys/mode.h>
66 #include "fs/fs_subr.h"
67 #include <sys/zfs_ctldir.h>
68 #include <sys/zfs_fuid.h>
69 #include <sys/zfs_sa.h>
70 #include <sys/zfs_vnops.h>
71 #include <sys/dnlc.h>
72 #include <sys/zfs_rlock.h>
73 #include <sys/extdirent.h>
74 #include <sys/kidmap.h>
75 #include <sys/cred.h>
76 #include <sys/attr.h>
77 #include <sys/zpl.h>
78
79 /*
80 * Programming rules.
81 *
82 * Each vnode op performs some logical unit of work. To do this, the ZPL must
83 * properly lock its in-core state, create a DMU transaction, do the work,
84 * record this work in the intent log (ZIL), commit the DMU transaction,
85 * and wait for the intent log to commit if it is a synchronous operation.
86 * Moreover, the vnode ops must work in both normal and log replay context.
87 * The ordering of events is important to avoid deadlocks and references
88 * to freed memory. The example below illustrates the following Big Rules:
89 *
90 * (1) A check must be made in each zfs thread for a mounted file system.
91 * This is done avoiding races using ZFS_ENTER(zsb).
92 * A ZFS_EXIT(zsb) is needed before all returns. Any znodes
93 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
94 * can return EIO from the calling function.
95 *
96 * (2) iput() should always be the last thing except for zil_commit()
97 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
98 * First, if it's the last reference, the vnode/znode
99 * can be freed, so the zp may point to freed memory. Second, the last
100 * reference will call zfs_zinactive(), which may induce a lot of work --
101 * pushing cached pages (which acquires range locks) and syncing out
102 * cached atime changes. Third, zfs_zinactive() may require a new tx,
103 * which could deadlock the system if you were already holding one.
104 * If you must call iput() within a tx then use zfs_iput_async().
105 *
106 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
107 * as they can span dmu_tx_assign() calls.
108 *
109 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
110 * dmu_tx_assign(). This is critical because we don't want to block
111 * while holding locks.
112 *
113 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
114 * reduces lock contention and CPU usage when we must wait (note that if
115 * throughput is constrained by the storage, nearly every transaction
116 * must wait).
117 *
118 * Note, in particular, that if a lock is sometimes acquired before
119 * the tx assigns, and sometimes after (e.g. z_lock), then failing
120 * to use a non-blocking assign can deadlock the system. The scenario:
121 *
122 * Thread A has grabbed a lock before calling dmu_tx_assign().
123 * Thread B is in an already-assigned tx, and blocks for this lock.
124 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
125 * forever, because the previous txg can't quiesce until B's tx commits.
126 *
127 * If dmu_tx_assign() returns ERESTART and zsb->z_assign is TXG_NOWAIT,
128 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
129 * calls to dmu_tx_assign(), pass TXG_WAITED rather than TXG_NOWAIT,
130 * to indicate that this operation has already called dmu_tx_wait().
131 * This will ensure that we don't retry forever, waiting a short bit
132 * each time.
133 *
134 * (5) If the operation succeeded, generate the intent log entry for it
135 * before dropping locks. This ensures that the ordering of events
136 * in the intent log matches the order in which they actually occurred.
137 * During ZIL replay the zfs_log_* functions will update the sequence
138 * number to indicate the zil transaction has replayed.
139 *
140 * (6) At the end of each vnode op, the DMU tx must always commit,
141 * regardless of whether there were any errors.
142 *
143 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
144 * to ensure that synchronous semantics are provided when necessary.
145 *
146 * In general, this is how things should be ordered in each vnode op:
147 *
148 * ZFS_ENTER(zsb); // exit if unmounted
149 * top:
150 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
151 * rw_enter(...); // grab any other locks you need
152 * tx = dmu_tx_create(...); // get DMU tx
153 * dmu_tx_hold_*(); // hold each object you might modify
154 * error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
155 * if (error) {
156 * rw_exit(...); // drop locks
157 * zfs_dirent_unlock(dl); // unlock directory entry
158 * iput(...); // release held vnodes
159 * if (error == ERESTART) {
160 * waited = B_TRUE;
161 * dmu_tx_wait(tx);
162 * dmu_tx_abort(tx);
163 * goto top;
164 * }
165 * dmu_tx_abort(tx); // abort DMU tx
166 * ZFS_EXIT(zsb); // finished in zfs
167 * return (error); // really out of space
168 * }
169 * error = do_real_work(); // do whatever this VOP does
170 * if (error == 0)
171 * zfs_log_*(...); // on success, make ZIL entry
172 * dmu_tx_commit(tx); // commit DMU tx -- error or not
173 * rw_exit(...); // drop locks
174 * zfs_dirent_unlock(dl); // unlock directory entry
175 * iput(...); // release held vnodes
176 * zil_commit(zilog, foid); // synchronous when necessary
177 * ZFS_EXIT(zsb); // finished in zfs
178 * return (error); // done, report error
179 */
180
181 /*
182 * Virus scanning is unsupported. It would be possible to add a hook
183 * here to performance the required virus scan. This could be done
184 * entirely in the kernel or potentially as an update to invoke a
185 * scanning utility.
186 */
187 static int
188 zfs_vscan(struct inode *ip, cred_t *cr, int async)
189 {
190 return (0);
191 }
192
193 /* ARGSUSED */
194 int
195 zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
196 {
197 znode_t *zp = ITOZ(ip);
198 zfs_sb_t *zsb = ITOZSB(ip);
199
200 ZFS_ENTER(zsb);
201 ZFS_VERIFY_ZP(zp);
202
203 /* Honor ZFS_APPENDONLY file attribute */
204 if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
205 ((flag & O_APPEND) == 0)) {
206 ZFS_EXIT(zsb);
207 return (SET_ERROR(EPERM));
208 }
209
210 /* Virus scan eligible files on open */
211 if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) &&
212 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
213 if (zfs_vscan(ip, cr, 0) != 0) {
214 ZFS_EXIT(zsb);
215 return (SET_ERROR(EACCES));
216 }
217 }
218
219 /* Keep a count of the synchronous opens in the znode */
220 if (flag & O_SYNC)
221 atomic_inc_32(&zp->z_sync_cnt);
222
223 ZFS_EXIT(zsb);
224 return (0);
225 }
226 EXPORT_SYMBOL(zfs_open);
227
228 /* ARGSUSED */
229 int
230 zfs_close(struct inode *ip, int flag, cred_t *cr)
231 {
232 znode_t *zp = ITOZ(ip);
233 zfs_sb_t *zsb = ITOZSB(ip);
234
235 ZFS_ENTER(zsb);
236 ZFS_VERIFY_ZP(zp);
237
238 /* Decrement the synchronous opens in the znode */
239 if (flag & O_SYNC)
240 atomic_dec_32(&zp->z_sync_cnt);
241
242 if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) &&
243 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
244 VERIFY(zfs_vscan(ip, cr, 1) == 0);
245
246 ZFS_EXIT(zsb);
247 return (0);
248 }
249 EXPORT_SYMBOL(zfs_close);
250
251 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
252 /*
253 * Lseek support for finding holes (cmd == SEEK_HOLE) and
254 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
255 */
256 static int
257 zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
258 {
259 znode_t *zp = ITOZ(ip);
260 uint64_t noff = (uint64_t)*off; /* new offset */
261 uint64_t file_sz;
262 int error;
263 boolean_t hole;
264
265 file_sz = zp->z_size;
266 if (noff >= file_sz) {
267 return (SET_ERROR(ENXIO));
268 }
269
270 if (cmd == SEEK_HOLE)
271 hole = B_TRUE;
272 else
273 hole = B_FALSE;
274
275 error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
276
277 if (error == ESRCH)
278 return (SET_ERROR(ENXIO));
279
280 /*
281 * We could find a hole that begins after the logical end-of-file,
282 * because dmu_offset_next() only works on whole blocks. If the
283 * EOF falls mid-block, then indicate that the "virtual hole"
284 * at the end of the file begins at the logical EOF, rather than
285 * at the end of the last block.
286 */
287 if (noff > file_sz) {
288 ASSERT(hole);
289 noff = file_sz;
290 }
291
292 if (noff < *off)
293 return (error);
294 *off = noff;
295 return (error);
296 }
297
298 int
299 zfs_holey(struct inode *ip, int cmd, loff_t *off)
300 {
301 znode_t *zp = ITOZ(ip);
302 zfs_sb_t *zsb = ITOZSB(ip);
303 int error;
304
305 ZFS_ENTER(zsb);
306 ZFS_VERIFY_ZP(zp);
307
308 error = zfs_holey_common(ip, cmd, off);
309
310 ZFS_EXIT(zsb);
311 return (error);
312 }
313 EXPORT_SYMBOL(zfs_holey);
314 #endif /* SEEK_HOLE && SEEK_DATA */
315
316 #if defined(_KERNEL)
317 /*
318 * When a file is memory mapped, we must keep the IO data synchronized
319 * between the DMU cache and the memory mapped pages. What this means:
320 *
321 * On Write: If we find a memory mapped page, we write to *both*
322 * the page and the dmu buffer.
323 */
324 static void
325 update_pages(struct inode *ip, int64_t start, int len,
326 objset_t *os, uint64_t oid)
327 {
328 struct address_space *mp = ip->i_mapping;
329 struct page *pp;
330 uint64_t nbytes;
331 int64_t off;
332 void *pb;
333
334 off = start & (PAGE_CACHE_SIZE-1);
335 for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) {
336 nbytes = MIN(PAGE_CACHE_SIZE - off, len);
337
338 pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
339 if (pp) {
340 if (mapping_writably_mapped(mp))
341 flush_dcache_page(pp);
342
343 pb = kmap(pp);
344 (void) dmu_read(os, oid, start+off, nbytes, pb+off,
345 DMU_READ_PREFETCH);
346 kunmap(pp);
347
348 if (mapping_writably_mapped(mp))
349 flush_dcache_page(pp);
350
351 mark_page_accessed(pp);
352 SetPageUptodate(pp);
353 ClearPageError(pp);
354 unlock_page(pp);
355 page_cache_release(pp);
356 }
357
358 len -= nbytes;
359 off = 0;
360 }
361 }
362
363 /*
364 * When a file is memory mapped, we must keep the IO data synchronized
365 * between the DMU cache and the memory mapped pages. What this means:
366 *
367 * On Read: We "read" preferentially from memory mapped pages,
368 * else we default from the dmu buffer.
369 *
370 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
371 * the file is memory mapped.
372 */
373 static int
374 mappedread(struct inode *ip, int nbytes, uio_t *uio)
375 {
376 struct address_space *mp = ip->i_mapping;
377 struct page *pp;
378 znode_t *zp = ITOZ(ip);
379 objset_t *os = ITOZSB(ip)->z_os;
380 int64_t start, off;
381 uint64_t bytes;
382 int len = nbytes;
383 int error = 0;
384 void *pb;
385
386 start = uio->uio_loffset;
387 off = start & (PAGE_CACHE_SIZE-1);
388 for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) {
389 bytes = MIN(PAGE_CACHE_SIZE - off, len);
390
391 pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
392 if (pp) {
393 ASSERT(PageUptodate(pp));
394
395 pb = kmap(pp);
396 error = uiomove(pb + off, bytes, UIO_READ, uio);
397 kunmap(pp);
398
399 if (mapping_writably_mapped(mp))
400 flush_dcache_page(pp);
401
402 mark_page_accessed(pp);
403 unlock_page(pp);
404 page_cache_release(pp);
405 } else {
406 error = dmu_read_uio(os, zp->z_id, uio, bytes);
407 }
408
409 len -= bytes;
410 off = 0;
411 if (error)
412 break;
413 }
414 return (error);
415 }
416 #endif /* _KERNEL */
417
418 unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
419
420 /*
421 * Read bytes from specified file into supplied buffer.
422 *
423 * IN: ip - inode of file to be read from.
424 * uio - structure supplying read location, range info,
425 * and return buffer.
426 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
427 * O_DIRECT flag; used to bypass page cache.
428 * cr - credentials of caller.
429 *
430 * OUT: uio - updated offset and range, buffer filled.
431 *
432 * RETURN: 0 on success, error code on failure.
433 *
434 * Side Effects:
435 * inode - atime updated if byte count > 0
436 */
437 /* ARGSUSED */
438 int
439 zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
440 {
441 znode_t *zp = ITOZ(ip);
442 zfs_sb_t *zsb = ITOZSB(ip);
443 objset_t *os;
444 ssize_t n, nbytes;
445 int error = 0;
446 rl_t *rl;
447 #ifdef HAVE_UIO_ZEROCOPY
448 xuio_t *xuio = NULL;
449 #endif /* HAVE_UIO_ZEROCOPY */
450
451 ZFS_ENTER(zsb);
452 ZFS_VERIFY_ZP(zp);
453 os = zsb->z_os;
454
455 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
456 ZFS_EXIT(zsb);
457 return (SET_ERROR(EACCES));
458 }
459
460 /*
461 * Validate file offset
462 */
463 if (uio->uio_loffset < (offset_t)0) {
464 ZFS_EXIT(zsb);
465 return (SET_ERROR(EINVAL));
466 }
467
468 /*
469 * Fasttrack empty reads
470 */
471 if (uio->uio_resid == 0) {
472 ZFS_EXIT(zsb);
473 return (0);
474 }
475
476 /*
477 * If we're in FRSYNC mode, sync out this znode before reading it.
478 */
479 if (ioflag & FRSYNC || zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
480 zil_commit(zsb->z_log, zp->z_id);
481
482 /*
483 * Lock the range against changes.
484 */
485 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
486
487 /*
488 * If we are reading past end-of-file we can skip
489 * to the end; but we might still need to set atime.
490 */
491 if (uio->uio_loffset >= zp->z_size) {
492 error = 0;
493 goto out;
494 }
495
496 ASSERT(uio->uio_loffset < zp->z_size);
497 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
498
499 #ifdef HAVE_UIO_ZEROCOPY
500 if ((uio->uio_extflg == UIO_XUIO) &&
501 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
502 int nblk;
503 int blksz = zp->z_blksz;
504 uint64_t offset = uio->uio_loffset;
505
506 xuio = (xuio_t *)uio;
507 if ((ISP2(blksz))) {
508 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
509 blksz)) / blksz;
510 } else {
511 ASSERT(offset + n <= blksz);
512 nblk = 1;
513 }
514 (void) dmu_xuio_init(xuio, nblk);
515
516 if (vn_has_cached_data(ip)) {
517 /*
518 * For simplicity, we always allocate a full buffer
519 * even if we only expect to read a portion of a block.
520 */
521 while (--nblk >= 0) {
522 (void) dmu_xuio_add(xuio,
523 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
524 blksz), 0, blksz);
525 }
526 }
527 }
528 #endif /* HAVE_UIO_ZEROCOPY */
529
530 while (n > 0) {
531 nbytes = MIN(n, zfs_read_chunk_size -
532 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
533
534 if (zp->z_is_mapped && !(ioflag & O_DIRECT))
535 error = mappedread(ip, nbytes, uio);
536 else
537 error = dmu_read_uio(os, zp->z_id, uio, nbytes);
538
539 if (error) {
540 /* convert checksum errors into IO errors */
541 if (error == ECKSUM)
542 error = SET_ERROR(EIO);
543 break;
544 }
545
546 n -= nbytes;
547 }
548 out:
549 zfs_range_unlock(rl);
550
551 ZFS_ACCESSTIME_STAMP(zsb, zp);
552 ZFS_EXIT(zsb);
553 return (error);
554 }
555 EXPORT_SYMBOL(zfs_read);
556
557 /*
558 * Write the bytes to a file.
559 *
560 * IN: ip - inode of file to be written to.
561 * uio - structure supplying write location, range info,
562 * and data buffer.
563 * ioflag - FAPPEND flag set if in append mode.
564 * O_DIRECT flag; used to bypass page cache.
565 * cr - credentials of caller.
566 *
567 * OUT: uio - updated offset and range.
568 *
569 * RETURN: 0 if success
570 * error code if failure
571 *
572 * Timestamps:
573 * ip - ctime|mtime updated if byte count > 0
574 */
575
576 /* ARGSUSED */
577 int
578 zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
579 {
580 znode_t *zp = ITOZ(ip);
581 rlim64_t limit = uio->uio_limit;
582 ssize_t start_resid = uio->uio_resid;
583 ssize_t tx_bytes;
584 uint64_t end_size;
585 dmu_tx_t *tx;
586 zfs_sb_t *zsb = ZTOZSB(zp);
587 zilog_t *zilog;
588 offset_t woff;
589 ssize_t n, nbytes;
590 rl_t *rl;
591 int max_blksz = zsb->z_max_blksz;
592 int error = 0;
593 arc_buf_t *abuf;
594 iovec_t *aiov = NULL;
595 xuio_t *xuio = NULL;
596 int i_iov = 0;
597 iovec_t *iovp = uio->uio_iov;
598 int write_eof;
599 int count = 0;
600 sa_bulk_attr_t bulk[4];
601 uint64_t mtime[2], ctime[2];
602 ASSERTV(int iovcnt = uio->uio_iovcnt);
603
604 /*
605 * Fasttrack empty write
606 */
607 n = start_resid;
608 if (n == 0)
609 return (0);
610
611 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
612 limit = MAXOFFSET_T;
613
614 ZFS_ENTER(zsb);
615 ZFS_VERIFY_ZP(zp);
616
617 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
618 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
619 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
620 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
621 &zp->z_pflags, 8);
622
623 /*
624 * If immutable or not appending then return EPERM
625 */
626 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
627 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
628 (uio->uio_loffset < zp->z_size))) {
629 ZFS_EXIT(zsb);
630 return (SET_ERROR(EPERM));
631 }
632
633 zilog = zsb->z_log;
634
635 /*
636 * Validate file offset
637 */
638 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
639 if (woff < 0) {
640 ZFS_EXIT(zsb);
641 return (SET_ERROR(EINVAL));
642 }
643
644 /*
645 * Pre-fault the pages to ensure slow (eg NFS) pages
646 * don't hold up txg.
647 * Skip this if uio contains loaned arc_buf.
648 */
649 #ifdef HAVE_UIO_ZEROCOPY
650 if ((uio->uio_extflg == UIO_XUIO) &&
651 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
652 xuio = (xuio_t *)uio;
653 else
654 #endif
655 uio_prefaultpages(MIN(n, max_blksz), uio);
656
657 /*
658 * If in append mode, set the io offset pointer to eof.
659 */
660 if (ioflag & FAPPEND) {
661 /*
662 * Obtain an appending range lock to guarantee file append
663 * semantics. We reset the write offset once we have the lock.
664 */
665 rl = zfs_range_lock(zp, 0, n, RL_APPEND);
666 woff = rl->r_off;
667 if (rl->r_len == UINT64_MAX) {
668 /*
669 * We overlocked the file because this write will cause
670 * the file block size to increase.
671 * Note that zp_size cannot change with this lock held.
672 */
673 woff = zp->z_size;
674 }
675 uio->uio_loffset = woff;
676 } else {
677 /*
678 * Note that if the file block size will change as a result of
679 * this write, then this range lock will lock the entire file
680 * so that we can re-write the block safely.
681 */
682 rl = zfs_range_lock(zp, woff, n, RL_WRITER);
683 }
684
685 if (woff >= limit) {
686 zfs_range_unlock(rl);
687 ZFS_EXIT(zsb);
688 return (SET_ERROR(EFBIG));
689 }
690
691 if ((woff + n) > limit || woff > (limit - n))
692 n = limit - woff;
693
694 /* Will this write extend the file length? */
695 write_eof = (woff + n > zp->z_size);
696
697 end_size = MAX(zp->z_size, woff + n);
698
699 /*
700 * Write the file in reasonable size chunks. Each chunk is written
701 * in a separate transaction; this keeps the intent log records small
702 * and allows us to do more fine-grained space accounting.
703 */
704 while (n > 0) {
705 abuf = NULL;
706 woff = uio->uio_loffset;
707 if (zfs_owner_overquota(zsb, zp, B_FALSE) ||
708 zfs_owner_overquota(zsb, zp, B_TRUE)) {
709 if (abuf != NULL)
710 dmu_return_arcbuf(abuf);
711 error = SET_ERROR(EDQUOT);
712 break;
713 }
714
715 if (xuio && abuf == NULL) {
716 ASSERT(i_iov < iovcnt);
717 aiov = &iovp[i_iov];
718 abuf = dmu_xuio_arcbuf(xuio, i_iov);
719 dmu_xuio_clear(xuio, i_iov);
720 ASSERT((aiov->iov_base == abuf->b_data) ||
721 ((char *)aiov->iov_base - (char *)abuf->b_data +
722 aiov->iov_len == arc_buf_size(abuf)));
723 i_iov++;
724 } else if (abuf == NULL && n >= max_blksz &&
725 woff >= zp->z_size &&
726 P2PHASE(woff, max_blksz) == 0 &&
727 zp->z_blksz == max_blksz) {
728 /*
729 * This write covers a full block. "Borrow" a buffer
730 * from the dmu so that we can fill it before we enter
731 * a transaction. This avoids the possibility of
732 * holding up the transaction if the data copy hangs
733 * up on a pagefault (e.g., from an NFS server mapping).
734 */
735 size_t cbytes;
736
737 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
738 max_blksz);
739 ASSERT(abuf != NULL);
740 ASSERT(arc_buf_size(abuf) == max_blksz);
741 if ((error = uiocopy(abuf->b_data, max_blksz,
742 UIO_WRITE, uio, &cbytes))) {
743 dmu_return_arcbuf(abuf);
744 break;
745 }
746 ASSERT(cbytes == max_blksz);
747 }
748
749 /*
750 * Start a transaction.
751 */
752 tx = dmu_tx_create(zsb->z_os);
753 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
754 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
755 zfs_sa_upgrade_txholds(tx, zp);
756 error = dmu_tx_assign(tx, TXG_WAIT);
757 if (error) {
758 dmu_tx_abort(tx);
759 if (abuf != NULL)
760 dmu_return_arcbuf(abuf);
761 break;
762 }
763
764 /*
765 * If zfs_range_lock() over-locked we grow the blocksize
766 * and then reduce the lock range. This will only happen
767 * on the first iteration since zfs_range_reduce() will
768 * shrink down r_len to the appropriate size.
769 */
770 if (rl->r_len == UINT64_MAX) {
771 uint64_t new_blksz;
772
773 if (zp->z_blksz > max_blksz) {
774 ASSERT(!ISP2(zp->z_blksz));
775 new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE);
776 } else {
777 new_blksz = MIN(end_size, max_blksz);
778 }
779 zfs_grow_blocksize(zp, new_blksz, tx);
780 zfs_range_reduce(rl, woff, n);
781 }
782
783 /*
784 * XXX - should we really limit each write to z_max_blksz?
785 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
786 */
787 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
788
789 if (abuf == NULL) {
790 tx_bytes = uio->uio_resid;
791 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
792 uio, nbytes, tx);
793 tx_bytes -= uio->uio_resid;
794 } else {
795 tx_bytes = nbytes;
796 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
797 /*
798 * If this is not a full block write, but we are
799 * extending the file past EOF and this data starts
800 * block-aligned, use assign_arcbuf(). Otherwise,
801 * write via dmu_write().
802 */
803 if (tx_bytes < max_blksz && (!write_eof ||
804 aiov->iov_base != abuf->b_data)) {
805 ASSERT(xuio);
806 dmu_write(zsb->z_os, zp->z_id, woff,
807 aiov->iov_len, aiov->iov_base, tx);
808 dmu_return_arcbuf(abuf);
809 xuio_stat_wbuf_copied();
810 } else {
811 ASSERT(xuio || tx_bytes == max_blksz);
812 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
813 woff, abuf, tx);
814 }
815 ASSERT(tx_bytes <= uio->uio_resid);
816 uioskip(uio, tx_bytes);
817 }
818
819 if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT))
820 update_pages(ip, woff, tx_bytes, zsb->z_os, zp->z_id);
821
822 /*
823 * If we made no progress, we're done. If we made even
824 * partial progress, update the znode and ZIL accordingly.
825 */
826 if (tx_bytes == 0) {
827 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb),
828 (void *)&zp->z_size, sizeof (uint64_t), tx);
829 dmu_tx_commit(tx);
830 ASSERT(error != 0);
831 break;
832 }
833
834 /*
835 * Clear Set-UID/Set-GID bits on successful write if not
836 * privileged and at least one of the excute bits is set.
837 *
838 * It would be nice to to this after all writes have
839 * been done, but that would still expose the ISUID/ISGID
840 * to another app after the partial write is committed.
841 *
842 * Note: we don't call zfs_fuid_map_id() here because
843 * user 0 is not an ephemeral uid.
844 */
845 mutex_enter(&zp->z_acl_lock);
846 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
847 (S_IXUSR >> 6))) != 0 &&
848 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
849 secpolicy_vnode_setid_retain(cr,
850 (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
851 uint64_t newmode;
852 zp->z_mode &= ~(S_ISUID | S_ISGID);
853 newmode = zp->z_mode;
854 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zsb),
855 (void *)&newmode, sizeof (uint64_t), tx);
856 }
857 mutex_exit(&zp->z_acl_lock);
858
859 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
860 B_TRUE);
861
862 /*
863 * Update the file size (zp_size) if it has changed;
864 * account for possible concurrent updates.
865 */
866 while ((end_size = zp->z_size) < uio->uio_loffset) {
867 (void) atomic_cas_64(&zp->z_size, end_size,
868 uio->uio_loffset);
869 ASSERT(error == 0);
870 }
871 /*
872 * If we are replaying and eof is non zero then force
873 * the file size to the specified eof. Note, there's no
874 * concurrency during replay.
875 */
876 if (zsb->z_replay && zsb->z_replay_eof != 0)
877 zp->z_size = zsb->z_replay_eof;
878
879 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
880
881 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
882 NULL, NULL);
883 dmu_tx_commit(tx);
884
885 if (error != 0)
886 break;
887 ASSERT(tx_bytes == nbytes);
888 n -= nbytes;
889
890 if (!xuio && n > 0)
891 uio_prefaultpages(MIN(n, max_blksz), uio);
892 }
893
894 zfs_range_unlock(rl);
895
896 /*
897 * If we're in replay mode, or we made no progress, return error.
898 * Otherwise, it's at least a partial write, so it's successful.
899 */
900 if (zsb->z_replay || uio->uio_resid == start_resid) {
901 ZFS_EXIT(zsb);
902 return (error);
903 }
904
905 if (ioflag & (FSYNC | FDSYNC) ||
906 zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
907 zil_commit(zilog, zp->z_id);
908
909 zfs_inode_update(zp);
910 ZFS_EXIT(zsb);
911 return (0);
912 }
913 EXPORT_SYMBOL(zfs_write);
914
915 void
916 zfs_iput_async(struct inode *ip)
917 {
918 objset_t *os = ITOZSB(ip)->z_os;
919
920 ASSERT(atomic_read(&ip->i_count) > 0);
921 ASSERT(os != NULL);
922
923 if (atomic_read(&ip->i_count) == 1)
924 taskq_dispatch(dsl_pool_iput_taskq(dmu_objset_pool(os)),
925 (task_func_t *)iput, ip, TQ_PUSHPAGE);
926 else
927 iput(ip);
928 }
929
930 void
931 zfs_get_done(zgd_t *zgd, int error)
932 {
933 znode_t *zp = zgd->zgd_private;
934
935 if (zgd->zgd_db)
936 dmu_buf_rele(zgd->zgd_db, zgd);
937
938 zfs_range_unlock(zgd->zgd_rl);
939
940 /*
941 * Release the vnode asynchronously as we currently have the
942 * txg stopped from syncing.
943 */
944 zfs_iput_async(ZTOI(zp));
945
946 if (error == 0 && zgd->zgd_bp)
947 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
948
949 kmem_free(zgd, sizeof (zgd_t));
950 }
951
952 #ifdef DEBUG
953 static int zil_fault_io = 0;
954 #endif
955
956 /*
957 * Get data to generate a TX_WRITE intent log record.
958 */
959 int
960 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
961 {
962 zfs_sb_t *zsb = arg;
963 objset_t *os = zsb->z_os;
964 znode_t *zp;
965 uint64_t object = lr->lr_foid;
966 uint64_t offset = lr->lr_offset;
967 uint64_t size = lr->lr_length;
968 blkptr_t *bp = &lr->lr_blkptr;
969 dmu_buf_t *db;
970 zgd_t *zgd;
971 int error = 0;
972
973 ASSERT(zio != NULL);
974 ASSERT(size != 0);
975
976 /*
977 * Nothing to do if the file has been removed
978 */
979 if (zfs_zget(zsb, object, &zp) != 0)
980 return (SET_ERROR(ENOENT));
981 if (zp->z_unlinked) {
982 /*
983 * Release the vnode asynchronously as we currently have the
984 * txg stopped from syncing.
985 */
986 zfs_iput_async(ZTOI(zp));
987 return (SET_ERROR(ENOENT));
988 }
989
990 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_PUSHPAGE);
991 zgd->zgd_zilog = zsb->z_log;
992 zgd->zgd_private = zp;
993
994 /*
995 * Write records come in two flavors: immediate and indirect.
996 * For small writes it's cheaper to store the data with the
997 * log record (immediate); for large writes it's cheaper to
998 * sync the data and get a pointer to it (indirect) so that
999 * we don't have to write the data twice.
1000 */
1001 if (buf != NULL) { /* immediate write */
1002 zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
1003 /* test for truncation needs to be done while range locked */
1004 if (offset >= zp->z_size) {
1005 error = SET_ERROR(ENOENT);
1006 } else {
1007 error = dmu_read(os, object, offset, size, buf,
1008 DMU_READ_NO_PREFETCH);
1009 }
1010 ASSERT(error == 0 || error == ENOENT);
1011 } else { /* indirect write */
1012 /*
1013 * Have to lock the whole block to ensure when it's
1014 * written out and it's checksum is being calculated
1015 * that no one can change the data. We need to re-check
1016 * blocksize after we get the lock in case it's changed!
1017 */
1018 for (;;) {
1019 uint64_t blkoff;
1020 size = zp->z_blksz;
1021 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1022 offset -= blkoff;
1023 zgd->zgd_rl = zfs_range_lock(zp, offset, size,
1024 RL_READER);
1025 if (zp->z_blksz == size)
1026 break;
1027 offset += blkoff;
1028 zfs_range_unlock(zgd->zgd_rl);
1029 }
1030 /* test for truncation needs to be done while range locked */
1031 if (lr->lr_offset >= zp->z_size)
1032 error = SET_ERROR(ENOENT);
1033 #ifdef DEBUG
1034 if (zil_fault_io) {
1035 error = SET_ERROR(EIO);
1036 zil_fault_io = 0;
1037 }
1038 #endif
1039 if (error == 0)
1040 error = dmu_buf_hold(os, object, offset, zgd, &db,
1041 DMU_READ_NO_PREFETCH);
1042
1043 if (error == 0) {
1044 blkptr_t *obp = dmu_buf_get_blkptr(db);
1045 if (obp) {
1046 ASSERT(BP_IS_HOLE(bp));
1047 *bp = *obp;
1048 }
1049
1050 zgd->zgd_db = db;
1051 zgd->zgd_bp = bp;
1052
1053 ASSERT(db->db_offset == offset);
1054 ASSERT(db->db_size == size);
1055
1056 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1057 zfs_get_done, zgd);
1058 ASSERT(error || lr->lr_length <= zp->z_blksz);
1059
1060 /*
1061 * On success, we need to wait for the write I/O
1062 * initiated by dmu_sync() to complete before we can
1063 * release this dbuf. We will finish everything up
1064 * in the zfs_get_done() callback.
1065 */
1066 if (error == 0)
1067 return (0);
1068
1069 if (error == EALREADY) {
1070 lr->lr_common.lrc_txtype = TX_WRITE2;
1071 error = 0;
1072 }
1073 }
1074 }
1075
1076 zfs_get_done(zgd, error);
1077
1078 return (error);
1079 }
1080
1081 /*ARGSUSED*/
1082 int
1083 zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
1084 {
1085 znode_t *zp = ITOZ(ip);
1086 zfs_sb_t *zsb = ITOZSB(ip);
1087 int error;
1088
1089 ZFS_ENTER(zsb);
1090 ZFS_VERIFY_ZP(zp);
1091
1092 if (flag & V_ACE_MASK)
1093 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1094 else
1095 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1096
1097 ZFS_EXIT(zsb);
1098 return (error);
1099 }
1100 EXPORT_SYMBOL(zfs_access);
1101
1102 /*
1103 * Lookup an entry in a directory, or an extended attribute directory.
1104 * If it exists, return a held inode reference for it.
1105 *
1106 * IN: dip - inode of directory to search.
1107 * nm - name of entry to lookup.
1108 * flags - LOOKUP_XATTR set if looking for an attribute.
1109 * cr - credentials of caller.
1110 * direntflags - directory lookup flags
1111 * realpnp - returned pathname.
1112 *
1113 * OUT: ipp - inode of located entry, NULL if not found.
1114 *
1115 * RETURN: 0 on success, error code on failure.
1116 *
1117 * Timestamps:
1118 * NA
1119 */
1120 /* ARGSUSED */
1121 int
1122 zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
1123 cred_t *cr, int *direntflags, pathname_t *realpnp)
1124 {
1125 znode_t *zdp = ITOZ(dip);
1126 zfs_sb_t *zsb = ITOZSB(dip);
1127 int error = 0;
1128
1129 /* fast path */
1130 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1131
1132 if (!S_ISDIR(dip->i_mode)) {
1133 return (SET_ERROR(ENOTDIR));
1134 } else if (zdp->z_sa_hdl == NULL) {
1135 return (SET_ERROR(EIO));
1136 }
1137
1138 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1139 error = zfs_fastaccesschk_execute(zdp, cr);
1140 if (!error) {
1141 *ipp = dip;
1142 igrab(*ipp);
1143 return (0);
1144 }
1145 return (error);
1146 #ifdef HAVE_DNLC
1147 } else {
1148 vnode_t *tvp = dnlc_lookup(dvp, nm);
1149
1150 if (tvp) {
1151 error = zfs_fastaccesschk_execute(zdp, cr);
1152 if (error) {
1153 iput(tvp);
1154 return (error);
1155 }
1156 if (tvp == DNLC_NO_VNODE) {
1157 iput(tvp);
1158 return (SET_ERROR(ENOENT));
1159 } else {
1160 *vpp = tvp;
1161 return (specvp_check(vpp, cr));
1162 }
1163 }
1164 #endif /* HAVE_DNLC */
1165 }
1166 }
1167
1168 ZFS_ENTER(zsb);
1169 ZFS_VERIFY_ZP(zdp);
1170
1171 *ipp = NULL;
1172
1173 if (flags & LOOKUP_XATTR) {
1174 /*
1175 * We don't allow recursive attributes..
1176 * Maybe someday we will.
1177 */
1178 if (zdp->z_pflags & ZFS_XATTR) {
1179 ZFS_EXIT(zsb);
1180 return (SET_ERROR(EINVAL));
1181 }
1182
1183 if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
1184 ZFS_EXIT(zsb);
1185 return (error);
1186 }
1187
1188 /*
1189 * Do we have permission to get into attribute directory?
1190 */
1191
1192 if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0,
1193 B_FALSE, cr))) {
1194 iput(*ipp);
1195 *ipp = NULL;
1196 }
1197
1198 ZFS_EXIT(zsb);
1199 return (error);
1200 }
1201
1202 if (!S_ISDIR(dip->i_mode)) {
1203 ZFS_EXIT(zsb);
1204 return (SET_ERROR(ENOTDIR));
1205 }
1206
1207 /*
1208 * Check accessibility of directory.
1209 */
1210
1211 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
1212 ZFS_EXIT(zsb);
1213 return (error);
1214 }
1215
1216 if (zsb->z_utf8 && u8_validate(nm, strlen(nm),
1217 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1218 ZFS_EXIT(zsb);
1219 return (SET_ERROR(EILSEQ));
1220 }
1221
1222 error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
1223 if ((error == 0) && (*ipp))
1224 zfs_inode_update(ITOZ(*ipp));
1225
1226 ZFS_EXIT(zsb);
1227 return (error);
1228 }
1229 EXPORT_SYMBOL(zfs_lookup);
1230
1231 /*
1232 * Attempt to create a new entry in a directory. If the entry
1233 * already exists, truncate the file if permissible, else return
1234 * an error. Return the ip of the created or trunc'd file.
1235 *
1236 * IN: dip - inode of directory to put new file entry in.
1237 * name - name of new file entry.
1238 * vap - attributes of new file.
1239 * excl - flag indicating exclusive or non-exclusive mode.
1240 * mode - mode to open file with.
1241 * cr - credentials of caller.
1242 * flag - large file flag [UNUSED].
1243 * vsecp - ACL to be set
1244 *
1245 * OUT: ipp - inode of created or trunc'd entry.
1246 *
1247 * RETURN: 0 on success, error code on failure.
1248 *
1249 * Timestamps:
1250 * dip - ctime|mtime updated if new entry created
1251 * ip - ctime|mtime always, atime if new
1252 */
1253
1254 /* ARGSUSED */
1255 int
1256 zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
1257 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1258 {
1259 znode_t *zp, *dzp = ITOZ(dip);
1260 zfs_sb_t *zsb = ITOZSB(dip);
1261 zilog_t *zilog;
1262 objset_t *os;
1263 zfs_dirlock_t *dl;
1264 dmu_tx_t *tx;
1265 int error;
1266 uid_t uid;
1267 gid_t gid;
1268 zfs_acl_ids_t acl_ids;
1269 boolean_t fuid_dirtied;
1270 boolean_t have_acl = B_FALSE;
1271 boolean_t waited = B_FALSE;
1272
1273 /*
1274 * If we have an ephemeral id, ACL, or XVATTR then
1275 * make sure file system is at proper version
1276 */
1277
1278 gid = crgetgid(cr);
1279 uid = crgetuid(cr);
1280
1281 if (zsb->z_use_fuids == B_FALSE &&
1282 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1283 return (SET_ERROR(EINVAL));
1284
1285 ZFS_ENTER(zsb);
1286 ZFS_VERIFY_ZP(dzp);
1287 os = zsb->z_os;
1288 zilog = zsb->z_log;
1289
1290 if (zsb->z_utf8 && u8_validate(name, strlen(name),
1291 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1292 ZFS_EXIT(zsb);
1293 return (SET_ERROR(EILSEQ));
1294 }
1295
1296 if (vap->va_mask & ATTR_XVATTR) {
1297 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1298 crgetuid(cr), cr, vap->va_mode)) != 0) {
1299 ZFS_EXIT(zsb);
1300 return (error);
1301 }
1302 }
1303
1304 top:
1305 *ipp = NULL;
1306 if (*name == '\0') {
1307 /*
1308 * Null component name refers to the directory itself.
1309 */
1310 igrab(dip);
1311 zp = dzp;
1312 dl = NULL;
1313 error = 0;
1314 } else {
1315 /* possible igrab(zp) */
1316 int zflg = 0;
1317
1318 if (flag & FIGNORECASE)
1319 zflg |= ZCILOOK;
1320
1321 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1322 NULL, NULL);
1323 if (error) {
1324 if (have_acl)
1325 zfs_acl_ids_free(&acl_ids);
1326 if (strcmp(name, "..") == 0)
1327 error = SET_ERROR(EISDIR);
1328 ZFS_EXIT(zsb);
1329 return (error);
1330 }
1331 }
1332
1333 if (zp == NULL) {
1334 uint64_t txtype;
1335
1336 /*
1337 * Create a new file object and update the directory
1338 * to reference it.
1339 */
1340 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1341 if (have_acl)
1342 zfs_acl_ids_free(&acl_ids);
1343 goto out;
1344 }
1345
1346 /*
1347 * We only support the creation of regular files in
1348 * extended attribute directories.
1349 */
1350
1351 if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
1352 if (have_acl)
1353 zfs_acl_ids_free(&acl_ids);
1354 error = SET_ERROR(EINVAL);
1355 goto out;
1356 }
1357
1358 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1359 cr, vsecp, &acl_ids)) != 0)
1360 goto out;
1361 have_acl = B_TRUE;
1362
1363 if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
1364 zfs_acl_ids_free(&acl_ids);
1365 error = SET_ERROR(EDQUOT);
1366 goto out;
1367 }
1368
1369 tx = dmu_tx_create(os);
1370
1371 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1372 ZFS_SA_BASE_ATTR_SIZE);
1373
1374 fuid_dirtied = zsb->z_fuid_dirty;
1375 if (fuid_dirtied)
1376 zfs_fuid_txhold(zsb, tx);
1377 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1378 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1379 if (!zsb->z_use_sa &&
1380 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1381 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1382 0, acl_ids.z_aclp->z_acl_bytes);
1383 }
1384 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
1385 if (error) {
1386 zfs_dirent_unlock(dl);
1387 if (error == ERESTART) {
1388 waited = B_TRUE;
1389 dmu_tx_wait(tx);
1390 dmu_tx_abort(tx);
1391 goto top;
1392 }
1393 zfs_acl_ids_free(&acl_ids);
1394 dmu_tx_abort(tx);
1395 ZFS_EXIT(zsb);
1396 return (error);
1397 }
1398 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1399
1400 if (fuid_dirtied)
1401 zfs_fuid_sync(zsb, tx);
1402
1403 (void) zfs_link_create(dl, zp, tx, ZNEW);
1404 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1405 if (flag & FIGNORECASE)
1406 txtype |= TX_CI;
1407 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1408 vsecp, acl_ids.z_fuidp, vap);
1409 zfs_acl_ids_free(&acl_ids);
1410 dmu_tx_commit(tx);
1411 } else {
1412 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1413
1414 if (have_acl)
1415 zfs_acl_ids_free(&acl_ids);
1416 have_acl = B_FALSE;
1417
1418 /*
1419 * A directory entry already exists for this name.
1420 */
1421 /*
1422 * Can't truncate an existing file if in exclusive mode.
1423 */
1424 if (excl) {
1425 error = SET_ERROR(EEXIST);
1426 goto out;
1427 }
1428 /*
1429 * Can't open a directory for writing.
1430 */
1431 if (S_ISDIR(ZTOI(zp)->i_mode)) {
1432 error = SET_ERROR(EISDIR);
1433 goto out;
1434 }
1435 /*
1436 * Verify requested access to file.
1437 */
1438 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1439 goto out;
1440 }
1441
1442 mutex_enter(&dzp->z_lock);
1443 dzp->z_seq++;
1444 mutex_exit(&dzp->z_lock);
1445
1446 /*
1447 * Truncate regular files if requested.
1448 */
1449 if (S_ISREG(ZTOI(zp)->i_mode) &&
1450 (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
1451 /* we can't hold any locks when calling zfs_freesp() */
1452 zfs_dirent_unlock(dl);
1453 dl = NULL;
1454 error = zfs_freesp(zp, 0, 0, mode, TRUE);
1455 }
1456 }
1457 out:
1458
1459 if (dl)
1460 zfs_dirent_unlock(dl);
1461
1462 if (error) {
1463 if (zp)
1464 iput(ZTOI(zp));
1465 } else {
1466 zfs_inode_update(dzp);
1467 zfs_inode_update(zp);
1468 *ipp = ZTOI(zp);
1469 }
1470
1471 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1472 zil_commit(zilog, 0);
1473
1474 ZFS_EXIT(zsb);
1475 return (error);
1476 }
1477 EXPORT_SYMBOL(zfs_create);
1478
1479 /*
1480 * Remove an entry from a directory.
1481 *
1482 * IN: dip - inode of directory to remove entry from.
1483 * name - name of entry to remove.
1484 * cr - credentials of caller.
1485 *
1486 * RETURN: 0 if success
1487 * error code if failure
1488 *
1489 * Timestamps:
1490 * dip - ctime|mtime
1491 * ip - ctime (if nlink > 0)
1492 */
1493
1494 uint64_t null_xattr = 0;
1495
1496 /*ARGSUSED*/
1497 int
1498 zfs_remove(struct inode *dip, char *name, cred_t *cr)
1499 {
1500 znode_t *zp, *dzp = ITOZ(dip);
1501 znode_t *xzp;
1502 struct inode *ip;
1503 zfs_sb_t *zsb = ITOZSB(dip);
1504 zilog_t *zilog;
1505 uint64_t xattr_obj;
1506 uint64_t xattr_obj_unlinked = 0;
1507 uint64_t obj = 0;
1508 zfs_dirlock_t *dl;
1509 dmu_tx_t *tx;
1510 boolean_t unlinked;
1511 uint64_t txtype;
1512 pathname_t *realnmp = NULL;
1513 #ifdef HAVE_PN_UTILS
1514 pathname_t realnm;
1515 #endif /* HAVE_PN_UTILS */
1516 int error;
1517 int zflg = ZEXISTS;
1518 boolean_t waited = B_FALSE;
1519
1520 ZFS_ENTER(zsb);
1521 ZFS_VERIFY_ZP(dzp);
1522 zilog = zsb->z_log;
1523
1524 #ifdef HAVE_PN_UTILS
1525 if (flags & FIGNORECASE) {
1526 zflg |= ZCILOOK;
1527 pn_alloc(&realnm);
1528 realnmp = &realnm;
1529 }
1530 #endif /* HAVE_PN_UTILS */
1531
1532 top:
1533 xattr_obj = 0;
1534 xzp = NULL;
1535 /*
1536 * Attempt to lock directory; fail if entry doesn't exist.
1537 */
1538 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1539 NULL, realnmp))) {
1540 #ifdef HAVE_PN_UTILS
1541 if (realnmp)
1542 pn_free(realnmp);
1543 #endif /* HAVE_PN_UTILS */
1544 ZFS_EXIT(zsb);
1545 return (error);
1546 }
1547
1548 ip = ZTOI(zp);
1549
1550 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1551 goto out;
1552 }
1553
1554 /*
1555 * Need to use rmdir for removing directories.
1556 */
1557 if (S_ISDIR(ip->i_mode)) {
1558 error = SET_ERROR(EPERM);
1559 goto out;
1560 }
1561
1562 #ifdef HAVE_DNLC
1563 if (realnmp)
1564 dnlc_remove(dvp, realnmp->pn_buf);
1565 else
1566 dnlc_remove(dvp, name);
1567 #endif /* HAVE_DNLC */
1568
1569 /*
1570 * We never delete the znode and always place it in the unlinked
1571 * set. The dentry cache will always hold the last reference and
1572 * is responsible for safely freeing the znode.
1573 */
1574 obj = zp->z_id;
1575 tx = dmu_tx_create(zsb->z_os);
1576 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1577 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1578 zfs_sa_upgrade_txholds(tx, zp);
1579 zfs_sa_upgrade_txholds(tx, dzp);
1580
1581 /* are there any extended attributes? */
1582 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
1583 &xattr_obj, sizeof (xattr_obj));
1584 if (error == 0 && xattr_obj) {
1585 error = zfs_zget(zsb, xattr_obj, &xzp);
1586 ASSERT0(error);
1587 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1588 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1589 }
1590
1591 /* charge as an update -- would be nice not to charge at all */
1592 dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
1593
1594 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
1595 if (error) {
1596 zfs_dirent_unlock(dl);
1597 iput(ip);
1598 if (xzp)
1599 iput(ZTOI(xzp));
1600 if (error == ERESTART) {
1601 waited = B_TRUE;
1602 dmu_tx_wait(tx);
1603 dmu_tx_abort(tx);
1604 goto top;
1605 }
1606 #ifdef HAVE_PN_UTILS
1607 if (realnmp)
1608 pn_free(realnmp);
1609 #endif /* HAVE_PN_UTILS */
1610 dmu_tx_abort(tx);
1611 ZFS_EXIT(zsb);
1612 return (error);
1613 }
1614
1615 /*
1616 * Remove the directory entry.
1617 */
1618 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1619
1620 if (error) {
1621 dmu_tx_commit(tx);
1622 goto out;
1623 }
1624
1625 if (unlinked) {
1626 /*
1627 * Hold z_lock so that we can make sure that the ACL obj
1628 * hasn't changed. Could have been deleted due to
1629 * zfs_sa_upgrade().
1630 */
1631 mutex_enter(&zp->z_lock);
1632 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
1633 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1634 mutex_exit(&zp->z_lock);
1635 zfs_unlinked_add(zp, tx);
1636 }
1637
1638 txtype = TX_REMOVE;
1639 #ifdef HAVE_PN_UTILS
1640 if (flags & FIGNORECASE)
1641 txtype |= TX_CI;
1642 #endif /* HAVE_PN_UTILS */
1643 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1644
1645 dmu_tx_commit(tx);
1646 out:
1647 #ifdef HAVE_PN_UTILS
1648 if (realnmp)
1649 pn_free(realnmp);
1650 #endif /* HAVE_PN_UTILS */
1651
1652 zfs_dirent_unlock(dl);
1653 zfs_inode_update(dzp);
1654 zfs_inode_update(zp);
1655 if (xzp)
1656 zfs_inode_update(xzp);
1657
1658 iput(ip);
1659 if (xzp)
1660 iput(ZTOI(xzp));
1661
1662 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1663 zil_commit(zilog, 0);
1664
1665 ZFS_EXIT(zsb);
1666 return (error);
1667 }
1668 EXPORT_SYMBOL(zfs_remove);
1669
1670 /*
1671 * Create a new directory and insert it into dip using the name
1672 * provided. Return a pointer to the inserted directory.
1673 *
1674 * IN: dip - inode of directory to add subdir to.
1675 * dirname - name of new directory.
1676 * vap - attributes of new directory.
1677 * cr - credentials of caller.
1678 * vsecp - ACL to be set
1679 *
1680 * OUT: ipp - inode of created directory.
1681 *
1682 * RETURN: 0 if success
1683 * error code if failure
1684 *
1685 * Timestamps:
1686 * dip - ctime|mtime updated
1687 * ipp - ctime|mtime|atime updated
1688 */
1689 /*ARGSUSED*/
1690 int
1691 zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
1692 cred_t *cr, int flags, vsecattr_t *vsecp)
1693 {
1694 znode_t *zp, *dzp = ITOZ(dip);
1695 zfs_sb_t *zsb = ITOZSB(dip);
1696 zilog_t *zilog;
1697 zfs_dirlock_t *dl;
1698 uint64_t txtype;
1699 dmu_tx_t *tx;
1700 int error;
1701 int zf = ZNEW;
1702 uid_t uid;
1703 gid_t gid = crgetgid(cr);
1704 zfs_acl_ids_t acl_ids;
1705 boolean_t fuid_dirtied;
1706 boolean_t waited = B_FALSE;
1707
1708 ASSERT(S_ISDIR(vap->va_mode));
1709
1710 /*
1711 * If we have an ephemeral id, ACL, or XVATTR then
1712 * make sure file system is at proper version
1713 */
1714
1715 uid = crgetuid(cr);
1716 if (zsb->z_use_fuids == B_FALSE &&
1717 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1718 return (SET_ERROR(EINVAL));
1719
1720 ZFS_ENTER(zsb);
1721 ZFS_VERIFY_ZP(dzp);
1722 zilog = zsb->z_log;
1723
1724 if (dzp->z_pflags & ZFS_XATTR) {
1725 ZFS_EXIT(zsb);
1726 return (SET_ERROR(EINVAL));
1727 }
1728
1729 if (zsb->z_utf8 && u8_validate(dirname,
1730 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1731 ZFS_EXIT(zsb);
1732 return (SET_ERROR(EILSEQ));
1733 }
1734 if (flags & FIGNORECASE)
1735 zf |= ZCILOOK;
1736
1737 if (vap->va_mask & ATTR_XVATTR) {
1738 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1739 crgetuid(cr), cr, vap->va_mode)) != 0) {
1740 ZFS_EXIT(zsb);
1741 return (error);
1742 }
1743 }
1744
1745 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1746 vsecp, &acl_ids)) != 0) {
1747 ZFS_EXIT(zsb);
1748 return (error);
1749 }
1750 /*
1751 * First make sure the new directory doesn't exist.
1752 *
1753 * Existence is checked first to make sure we don't return
1754 * EACCES instead of EEXIST which can cause some applications
1755 * to fail.
1756 */
1757 top:
1758 *ipp = NULL;
1759
1760 if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1761 NULL, NULL))) {
1762 zfs_acl_ids_free(&acl_ids);
1763 ZFS_EXIT(zsb);
1764 return (error);
1765 }
1766
1767 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
1768 zfs_acl_ids_free(&acl_ids);
1769 zfs_dirent_unlock(dl);
1770 ZFS_EXIT(zsb);
1771 return (error);
1772 }
1773
1774 if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
1775 zfs_acl_ids_free(&acl_ids);
1776 zfs_dirent_unlock(dl);
1777 ZFS_EXIT(zsb);
1778 return (SET_ERROR(EDQUOT));
1779 }
1780
1781 /*
1782 * Add a new entry to the directory.
1783 */
1784 tx = dmu_tx_create(zsb->z_os);
1785 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1786 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1787 fuid_dirtied = zsb->z_fuid_dirty;
1788 if (fuid_dirtied)
1789 zfs_fuid_txhold(zsb, tx);
1790 if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1791 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1792 acl_ids.z_aclp->z_acl_bytes);
1793 }
1794
1795 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1796 ZFS_SA_BASE_ATTR_SIZE);
1797
1798 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
1799 if (error) {
1800 zfs_dirent_unlock(dl);
1801 if (error == ERESTART) {
1802 waited = B_TRUE;
1803 dmu_tx_wait(tx);
1804 dmu_tx_abort(tx);
1805 goto top;
1806 }
1807 zfs_acl_ids_free(&acl_ids);
1808 dmu_tx_abort(tx);
1809 ZFS_EXIT(zsb);
1810 return (error);
1811 }
1812
1813 /*
1814 * Create new node.
1815 */
1816 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1817
1818 if (fuid_dirtied)
1819 zfs_fuid_sync(zsb, tx);
1820
1821 /*
1822 * Now put new name in parent dir.
1823 */
1824 (void) zfs_link_create(dl, zp, tx, ZNEW);
1825
1826 *ipp = ZTOI(zp);
1827
1828 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
1829 if (flags & FIGNORECASE)
1830 txtype |= TX_CI;
1831 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
1832 acl_ids.z_fuidp, vap);
1833
1834 zfs_acl_ids_free(&acl_ids);
1835
1836 dmu_tx_commit(tx);
1837
1838 zfs_dirent_unlock(dl);
1839
1840 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1841 zil_commit(zilog, 0);
1842
1843 zfs_inode_update(dzp);
1844 zfs_inode_update(zp);
1845 ZFS_EXIT(zsb);
1846 return (0);
1847 }
1848 EXPORT_SYMBOL(zfs_mkdir);
1849
1850 /*
1851 * Remove a directory subdir entry. If the current working
1852 * directory is the same as the subdir to be removed, the
1853 * remove will fail.
1854 *
1855 * IN: dip - inode of directory to remove from.
1856 * name - name of directory to be removed.
1857 * cwd - inode of current working directory.
1858 * cr - credentials of caller.
1859 * flags - case flags
1860 *
1861 * RETURN: 0 on success, error code on failure.
1862 *
1863 * Timestamps:
1864 * dip - ctime|mtime updated
1865 */
1866 /*ARGSUSED*/
1867 int
1868 zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr,
1869 int flags)
1870 {
1871 znode_t *dzp = ITOZ(dip);
1872 znode_t *zp;
1873 struct inode *ip;
1874 zfs_sb_t *zsb = ITOZSB(dip);
1875 zilog_t *zilog;
1876 zfs_dirlock_t *dl;
1877 dmu_tx_t *tx;
1878 int error;
1879 int zflg = ZEXISTS;
1880 boolean_t waited = B_FALSE;
1881
1882 ZFS_ENTER(zsb);
1883 ZFS_VERIFY_ZP(dzp);
1884 zilog = zsb->z_log;
1885
1886 if (flags & FIGNORECASE)
1887 zflg |= ZCILOOK;
1888 top:
1889 zp = NULL;
1890
1891 /*
1892 * Attempt to lock directory; fail if entry doesn't exist.
1893 */
1894 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1895 NULL, NULL))) {
1896 ZFS_EXIT(zsb);
1897 return (error);
1898 }
1899
1900 ip = ZTOI(zp);
1901
1902 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1903 goto out;
1904 }
1905
1906 if (!S_ISDIR(ip->i_mode)) {
1907 error = SET_ERROR(ENOTDIR);
1908 goto out;
1909 }
1910
1911 if (ip == cwd) {
1912 error = SET_ERROR(EINVAL);
1913 goto out;
1914 }
1915
1916 /*
1917 * Grab a lock on the directory to make sure that noone is
1918 * trying to add (or lookup) entries while we are removing it.
1919 */
1920 rw_enter(&zp->z_name_lock, RW_WRITER);
1921
1922 /*
1923 * Grab a lock on the parent pointer to make sure we play well
1924 * with the treewalk and directory rename code.
1925 */
1926 rw_enter(&zp->z_parent_lock, RW_WRITER);
1927
1928 tx = dmu_tx_create(zsb->z_os);
1929 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1930 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1931 dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
1932 zfs_sa_upgrade_txholds(tx, zp);
1933 zfs_sa_upgrade_txholds(tx, dzp);
1934 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
1935 if (error) {
1936 rw_exit(&zp->z_parent_lock);
1937 rw_exit(&zp->z_name_lock);
1938 zfs_dirent_unlock(dl);
1939 iput(ip);
1940 if (error == ERESTART) {
1941 waited = B_TRUE;
1942 dmu_tx_wait(tx);
1943 dmu_tx_abort(tx);
1944 goto top;
1945 }
1946 dmu_tx_abort(tx);
1947 ZFS_EXIT(zsb);
1948 return (error);
1949 }
1950
1951 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
1952
1953 if (error == 0) {
1954 uint64_t txtype = TX_RMDIR;
1955 if (flags & FIGNORECASE)
1956 txtype |= TX_CI;
1957 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
1958 }
1959
1960 dmu_tx_commit(tx);
1961
1962 rw_exit(&zp->z_parent_lock);
1963 rw_exit(&zp->z_name_lock);
1964 out:
1965 zfs_dirent_unlock(dl);
1966
1967 zfs_inode_update(dzp);
1968 zfs_inode_update(zp);
1969 iput(ip);
1970
1971 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1972 zil_commit(zilog, 0);
1973
1974 ZFS_EXIT(zsb);
1975 return (error);
1976 }
1977 EXPORT_SYMBOL(zfs_rmdir);
1978
1979 /*
1980 * Read as many directory entries as will fit into the provided
1981 * dirent buffer from the given directory cursor position.
1982 *
1983 * IN: ip - inode of directory to read.
1984 * dirent - buffer for directory entries.
1985 *
1986 * OUT: dirent - filler buffer of directory entries.
1987 *
1988 * RETURN: 0 if success
1989 * error code if failure
1990 *
1991 * Timestamps:
1992 * ip - atime updated
1993 *
1994 * Note that the low 4 bits of the cookie returned by zap is always zero.
1995 * This allows us to use the low range for "special" directory entries:
1996 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
1997 * we use the offset 2 for the '.zfs' directory.
1998 */
1999 /* ARGSUSED */
2000 int
2001 zfs_readdir(struct inode *ip, struct dir_context *ctx, cred_t *cr)
2002 {
2003 znode_t *zp = ITOZ(ip);
2004 zfs_sb_t *zsb = ITOZSB(ip);
2005 objset_t *os;
2006 zap_cursor_t zc;
2007 zap_attribute_t zap;
2008 int error;
2009 uint8_t prefetch;
2010 uint8_t type;
2011 int done = 0;
2012 uint64_t parent;
2013 uint64_t offset; /* must be unsigned; checks for < 1 */
2014
2015 ZFS_ENTER(zsb);
2016 ZFS_VERIFY_ZP(zp);
2017
2018 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zsb),
2019 &parent, sizeof (parent))) != 0)
2020 goto out;
2021
2022 /*
2023 * Quit if directory has been removed (posix)
2024 */
2025 if (zp->z_unlinked)
2026 goto out;
2027
2028 error = 0;
2029 os = zsb->z_os;
2030 offset = ctx->pos;
2031 prefetch = zp->z_zn_prefetch;
2032
2033 /*
2034 * Initialize the iterator cursor.
2035 */
2036 if (offset <= 3) {
2037 /*
2038 * Start iteration from the beginning of the directory.
2039 */
2040 zap_cursor_init(&zc, os, zp->z_id);
2041 } else {
2042 /*
2043 * The offset is a serialized cursor.
2044 */
2045 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2046 }
2047
2048 /*
2049 * Transform to file-system independent format
2050 */
2051 while (!done) {
2052 uint64_t objnum;
2053 /*
2054 * Special case `.', `..', and `.zfs'.
2055 */
2056 if (offset == 0) {
2057 (void) strcpy(zap.za_name, ".");
2058 zap.za_normalization_conflict = 0;
2059 objnum = zp->z_id;
2060 type = DT_DIR;
2061 } else if (offset == 1) {
2062 (void) strcpy(zap.za_name, "..");
2063 zap.za_normalization_conflict = 0;
2064 objnum = parent;
2065 type = DT_DIR;
2066 } else if (offset == 2 && zfs_show_ctldir(zp)) {
2067 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2068 zap.za_normalization_conflict = 0;
2069 objnum = ZFSCTL_INO_ROOT;
2070 type = DT_DIR;
2071 } else {
2072 /*
2073 * Grab next entry.
2074 */
2075 if ((error = zap_cursor_retrieve(&zc, &zap))) {
2076 if (error == ENOENT)
2077 break;
2078 else
2079 goto update;
2080 }
2081
2082 /*
2083 * Allow multiple entries provided the first entry is
2084 * the object id. Non-zpl consumers may safely make
2085 * use of the additional space.
2086 *
2087 * XXX: This should be a feature flag for compatibility
2088 */
2089 if (zap.za_integer_length != 8 ||
2090 zap.za_num_integers == 0) {
2091 cmn_err(CE_WARN, "zap_readdir: bad directory "
2092 "entry, obj = %lld, offset = %lld, "
2093 "length = %d, num = %lld\n",
2094 (u_longlong_t)zp->z_id,
2095 (u_longlong_t)offset,
2096 zap.za_integer_length,
2097 (u_longlong_t)zap.za_num_integers);
2098 error = SET_ERROR(ENXIO);
2099 goto update;
2100 }
2101
2102 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2103 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2104 }
2105
2106 done = !dir_emit(ctx, zap.za_name, strlen(zap.za_name),
2107 objnum, type);
2108 if (done)
2109 break;
2110
2111 /* Prefetch znode */
2112 if (prefetch) {
2113 dmu_prefetch(os, objnum, 0, 0);
2114 }
2115
2116 /*
2117 * Move to the next entry, fill in the previous offset.
2118 */
2119 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2120 zap_cursor_advance(&zc);
2121 offset = zap_cursor_serialize(&zc);
2122 } else {
2123 offset += 1;
2124 }
2125 ctx->pos = offset;
2126 }
2127 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2128
2129 update:
2130 zap_cursor_fini(&zc);
2131 if (error == ENOENT)
2132 error = 0;
2133
2134 ZFS_ACCESSTIME_STAMP(zsb, zp);
2135
2136 out:
2137 ZFS_EXIT(zsb);
2138
2139 return (error);
2140 }
2141 EXPORT_SYMBOL(zfs_readdir);
2142
2143 ulong_t zfs_fsync_sync_cnt = 4;
2144
2145 int
2146 zfs_fsync(struct inode *ip, int syncflag, cred_t *cr)
2147 {
2148 znode_t *zp = ITOZ(ip);
2149 zfs_sb_t *zsb = ITOZSB(ip);
2150
2151 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2152
2153 if (zsb->z_os->os_sync != ZFS_SYNC_DISABLED) {
2154 ZFS_ENTER(zsb);
2155 ZFS_VERIFY_ZP(zp);
2156 zil_commit(zsb->z_log, zp->z_id);
2157 ZFS_EXIT(zsb);
2158 }
2159 return (0);
2160 }
2161 EXPORT_SYMBOL(zfs_fsync);
2162
2163
2164 /*
2165 * Get the requested file attributes and place them in the provided
2166 * vattr structure.
2167 *
2168 * IN: ip - inode of file.
2169 * vap - va_mask identifies requested attributes.
2170 * If ATTR_XVATTR set, then optional attrs are requested
2171 * flags - ATTR_NOACLCHECK (CIFS server context)
2172 * cr - credentials of caller.
2173 *
2174 * OUT: vap - attribute values.
2175 *
2176 * RETURN: 0 (always succeeds)
2177 */
2178 /* ARGSUSED */
2179 int
2180 zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2181 {
2182 znode_t *zp = ITOZ(ip);
2183 zfs_sb_t *zsb = ITOZSB(ip);
2184 int error = 0;
2185 uint64_t links;
2186 uint64_t mtime[2], ctime[2];
2187 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2188 xoptattr_t *xoap = NULL;
2189 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2190 sa_bulk_attr_t bulk[2];
2191 int count = 0;
2192
2193 ZFS_ENTER(zsb);
2194 ZFS_VERIFY_ZP(zp);
2195
2196 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2197
2198 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
2199 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
2200
2201 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2202 ZFS_EXIT(zsb);
2203 return (error);
2204 }
2205
2206 /*
2207 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2208 * Also, if we are the owner don't bother, since owner should
2209 * always be allowed to read basic attributes of file.
2210 */
2211 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2212 (vap->va_uid != crgetuid(cr))) {
2213 if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2214 skipaclchk, cr))) {
2215 ZFS_EXIT(zsb);
2216 return (error);
2217 }
2218 }
2219
2220 /*
2221 * Return all attributes. It's cheaper to provide the answer
2222 * than to determine whether we were asked the question.
2223 */
2224
2225 mutex_enter(&zp->z_lock);
2226 vap->va_type = vn_mode_to_vtype(zp->z_mode);
2227 vap->va_mode = zp->z_mode;
2228 vap->va_fsid = ZTOI(zp)->i_sb->s_dev;
2229 vap->va_nodeid = zp->z_id;
2230 if ((zp->z_id == zsb->z_root) && zfs_show_ctldir(zp))
2231 links = zp->z_links + 1;
2232 else
2233 links = zp->z_links;
2234 vap->va_nlink = MIN(links, ZFS_LINK_MAX);
2235 vap->va_size = i_size_read(ip);
2236 vap->va_rdev = ip->i_rdev;
2237 vap->va_seq = ip->i_generation;
2238
2239 /*
2240 * Add in any requested optional attributes and the create time.
2241 * Also set the corresponding bits in the returned attribute bitmap.
2242 */
2243 if ((xoap = xva_getxoptattr(xvap)) != NULL && zsb->z_use_fuids) {
2244 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2245 xoap->xoa_archive =
2246 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2247 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2248 }
2249
2250 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2251 xoap->xoa_readonly =
2252 ((zp->z_pflags & ZFS_READONLY) != 0);
2253 XVA_SET_RTN(xvap, XAT_READONLY);
2254 }
2255
2256 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2257 xoap->xoa_system =
2258 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2259 XVA_SET_RTN(xvap, XAT_SYSTEM);
2260 }
2261
2262 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2263 xoap->xoa_hidden =
2264 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2265 XVA_SET_RTN(xvap, XAT_HIDDEN);
2266 }
2267
2268 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2269 xoap->xoa_nounlink =
2270 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2271 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2272 }
2273
2274 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2275 xoap->xoa_immutable =
2276 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2277 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2278 }
2279
2280 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2281 xoap->xoa_appendonly =
2282 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2283 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2284 }
2285
2286 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2287 xoap->xoa_nodump =
2288 ((zp->z_pflags & ZFS_NODUMP) != 0);
2289 XVA_SET_RTN(xvap, XAT_NODUMP);
2290 }
2291
2292 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2293 xoap->xoa_opaque =
2294 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2295 XVA_SET_RTN(xvap, XAT_OPAQUE);
2296 }
2297
2298 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2299 xoap->xoa_av_quarantined =
2300 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2301 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2302 }
2303
2304 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2305 xoap->xoa_av_modified =
2306 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2307 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2308 }
2309
2310 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2311 S_ISREG(ip->i_mode)) {
2312 zfs_sa_get_scanstamp(zp, xvap);
2313 }
2314
2315 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2316 uint64_t times[2];
2317
2318 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zsb),
2319 times, sizeof (times));
2320 ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2321 XVA_SET_RTN(xvap, XAT_CREATETIME);
2322 }
2323
2324 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2325 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2326 XVA_SET_RTN(xvap, XAT_REPARSE);
2327 }
2328 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2329 xoap->xoa_generation = zp->z_gen;
2330 XVA_SET_RTN(xvap, XAT_GEN);
2331 }
2332
2333 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2334 xoap->xoa_offline =
2335 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2336 XVA_SET_RTN(xvap, XAT_OFFLINE);
2337 }
2338
2339 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2340 xoap->xoa_sparse =
2341 ((zp->z_pflags & ZFS_SPARSE) != 0);
2342 XVA_SET_RTN(xvap, XAT_SPARSE);
2343 }
2344 }
2345
2346 ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
2347 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2348 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2349
2350 mutex_exit(&zp->z_lock);
2351
2352 sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2353
2354 if (zp->z_blksz == 0) {
2355 /*
2356 * Block size hasn't been set; suggest maximal I/O transfers.
2357 */
2358 vap->va_blksize = zsb->z_max_blksz;
2359 }
2360
2361 ZFS_EXIT(zsb);
2362 return (0);
2363 }
2364 EXPORT_SYMBOL(zfs_getattr);
2365
2366 /*
2367 * Get the basic file attributes and place them in the provided kstat
2368 * structure. The inode is assumed to be the authoritative source
2369 * for most of the attributes. However, the znode currently has the
2370 * authoritative atime, blksize, and block count.
2371 *
2372 * IN: ip - inode of file.
2373 *
2374 * OUT: sp - kstat values.
2375 *
2376 * RETURN: 0 (always succeeds)
2377 */
2378 /* ARGSUSED */
2379 int
2380 zfs_getattr_fast(struct inode *ip, struct kstat *sp)
2381 {
2382 znode_t *zp = ITOZ(ip);
2383 zfs_sb_t *zsb = ITOZSB(ip);
2384 uint32_t blksize;
2385 u_longlong_t nblocks;
2386
2387 ZFS_ENTER(zsb);
2388 ZFS_VERIFY_ZP(zp);
2389
2390 mutex_enter(&zp->z_lock);
2391
2392 generic_fillattr(ip, sp);
2393 ZFS_TIME_DECODE(&sp->atime, zp->z_atime);
2394
2395 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
2396 sp->blksize = blksize;
2397 sp->blocks = nblocks;
2398
2399 if (unlikely(zp->z_blksz == 0)) {
2400 /*
2401 * Block size hasn't been set; suggest maximal I/O transfers.
2402 */
2403 sp->blksize = zsb->z_max_blksz;
2404 }
2405
2406 mutex_exit(&zp->z_lock);
2407
2408 ZFS_EXIT(zsb);
2409
2410 return (0);
2411 }
2412 EXPORT_SYMBOL(zfs_getattr_fast);
2413
2414 /*
2415 * Set the file attributes to the values contained in the
2416 * vattr structure.
2417 *
2418 * IN: ip - inode of file to be modified.
2419 * vap - new attribute values.
2420 * If ATTR_XVATTR set, then optional attrs are being set
2421 * flags - ATTR_UTIME set if non-default time values provided.
2422 * - ATTR_NOACLCHECK (CIFS context only).
2423 * cr - credentials of caller.
2424 *
2425 * RETURN: 0 if success
2426 * error code if failure
2427 *
2428 * Timestamps:
2429 * ip - ctime updated, mtime updated if size changed.
2430 */
2431 /* ARGSUSED */
2432 int
2433 zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2434 {
2435 znode_t *zp = ITOZ(ip);
2436 zfs_sb_t *zsb = ITOZSB(ip);
2437 zilog_t *zilog;
2438 dmu_tx_t *tx;
2439 vattr_t oldva;
2440 xvattr_t *tmpxvattr;
2441 uint_t mask = vap->va_mask;
2442 uint_t saved_mask = 0;
2443 int trim_mask = 0;
2444 uint64_t new_mode;
2445 uint64_t new_uid, new_gid;
2446 uint64_t xattr_obj;
2447 uint64_t mtime[2], ctime[2];
2448 znode_t *attrzp;
2449 int need_policy = FALSE;
2450 int err, err2;
2451 zfs_fuid_info_t *fuidp = NULL;
2452 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2453 xoptattr_t *xoap;
2454 zfs_acl_t *aclp;
2455 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2456 boolean_t fuid_dirtied = B_FALSE;
2457 sa_bulk_attr_t *bulk, *xattr_bulk;
2458 int count = 0, xattr_count = 0;
2459
2460 if (mask == 0)
2461 return (0);
2462
2463 ZFS_ENTER(zsb);
2464 ZFS_VERIFY_ZP(zp);
2465
2466 zilog = zsb->z_log;
2467
2468 /*
2469 * Make sure that if we have ephemeral uid/gid or xvattr specified
2470 * that file system is at proper version level
2471 */
2472
2473 if (zsb->z_use_fuids == B_FALSE &&
2474 (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2475 ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2476 (mask & ATTR_XVATTR))) {
2477 ZFS_EXIT(zsb);
2478 return (SET_ERROR(EINVAL));
2479 }
2480
2481 if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
2482 ZFS_EXIT(zsb);
2483 return (SET_ERROR(EISDIR));
2484 }
2485
2486 if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
2487 ZFS_EXIT(zsb);
2488 return (SET_ERROR(EINVAL));
2489 }
2490
2491 /*
2492 * If this is an xvattr_t, then get a pointer to the structure of
2493 * optional attributes. If this is NULL, then we have a vattr_t.
2494 */
2495 xoap = xva_getxoptattr(xvap);
2496
2497 tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
2498 xva_init(tmpxvattr);
2499
2500 bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP);
2501 xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP);
2502
2503 /*
2504 * Immutable files can only alter immutable bit and atime
2505 */
2506 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2507 ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
2508 ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2509 err = EPERM;
2510 goto out3;
2511 }
2512
2513 if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2514 err = EPERM;
2515 goto out3;
2516 }
2517
2518 /*
2519 * Verify timestamps doesn't overflow 32 bits.
2520 * ZFS can handle large timestamps, but 32bit syscalls can't
2521 * handle times greater than 2039. This check should be removed
2522 * once large timestamps are fully supported.
2523 */
2524 if (mask & (ATTR_ATIME | ATTR_MTIME)) {
2525 if (((mask & ATTR_ATIME) &&
2526 TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2527 ((mask & ATTR_MTIME) &&
2528 TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2529 err = EOVERFLOW;
2530 goto out3;
2531 }
2532 }
2533
2534 top:
2535 attrzp = NULL;
2536 aclp = NULL;
2537
2538 /* Can this be moved to before the top label? */
2539 if (zfs_is_readonly(zsb)) {
2540 err = EROFS;
2541 goto out3;
2542 }
2543
2544 /*
2545 * First validate permissions
2546 */
2547
2548 if (mask & ATTR_SIZE) {
2549 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2550 if (err)
2551 goto out3;
2552
2553 /*
2554 * XXX - Note, we are not providing any open
2555 * mode flags here (like FNDELAY), so we may
2556 * block if there are locks present... this
2557 * should be addressed in openat().
2558 */
2559 /* XXX - would it be OK to generate a log record here? */
2560 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2561 if (err)
2562 goto out3;
2563 }
2564
2565 if (mask & (ATTR_ATIME|ATTR_MTIME) ||
2566 ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2567 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2568 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2569 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2570 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2571 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2572 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2573 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2574 skipaclchk, cr);
2575 }
2576
2577 if (mask & (ATTR_UID|ATTR_GID)) {
2578 int idmask = (mask & (ATTR_UID|ATTR_GID));
2579 int take_owner;
2580 int take_group;
2581
2582 /*
2583 * NOTE: even if a new mode is being set,
2584 * we may clear S_ISUID/S_ISGID bits.
2585 */
2586
2587 if (!(mask & ATTR_MODE))
2588 vap->va_mode = zp->z_mode;
2589
2590 /*
2591 * Take ownership or chgrp to group we are a member of
2592 */
2593
2594 take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
2595 take_group = (mask & ATTR_GID) &&
2596 zfs_groupmember(zsb, vap->va_gid, cr);
2597
2598 /*
2599 * If both ATTR_UID and ATTR_GID are set then take_owner and
2600 * take_group must both be set in order to allow taking
2601 * ownership.
2602 *
2603 * Otherwise, send the check through secpolicy_vnode_setattr()
2604 *
2605 */
2606
2607 if (((idmask == (ATTR_UID|ATTR_GID)) &&
2608 take_owner && take_group) ||
2609 ((idmask == ATTR_UID) && take_owner) ||
2610 ((idmask == ATTR_GID) && take_group)) {
2611 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2612 skipaclchk, cr) == 0) {
2613 /*
2614 * Remove setuid/setgid for non-privileged users
2615 */
2616 (void) secpolicy_setid_clear(vap, cr);
2617 trim_mask = (mask & (ATTR_UID|ATTR_GID));
2618 } else {
2619 need_policy = TRUE;
2620 }
2621 } else {
2622 need_policy = TRUE;
2623 }
2624 }
2625
2626 mutex_enter(&zp->z_lock);
2627 oldva.va_mode = zp->z_mode;
2628 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2629 if (mask & ATTR_XVATTR) {
2630 /*
2631 * Update xvattr mask to include only those attributes
2632 * that are actually changing.
2633 *
2634 * the bits will be restored prior to actually setting
2635 * the attributes so the caller thinks they were set.
2636 */
2637 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2638 if (xoap->xoa_appendonly !=
2639 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2640 need_policy = TRUE;
2641 } else {
2642 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2643 XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
2644 }
2645 }
2646
2647 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2648 if (xoap->xoa_nounlink !=
2649 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2650 need_policy = TRUE;
2651 } else {
2652 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2653 XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
2654 }
2655 }
2656
2657 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2658 if (xoap->xoa_immutable !=
2659 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2660 need_policy = TRUE;
2661 } else {
2662 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2663 XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
2664 }
2665 }
2666
2667 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2668 if (xoap->xoa_nodump !=
2669 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2670 need_policy = TRUE;
2671 } else {
2672 XVA_CLR_REQ(xvap, XAT_NODUMP);
2673 XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
2674 }
2675 }
2676
2677 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2678 if (xoap->xoa_av_modified !=
2679 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2680 need_policy = TRUE;
2681 } else {
2682 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2683 XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
2684 }
2685 }
2686
2687 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2688 if ((!S_ISREG(ip->i_mode) &&
2689 xoap->xoa_av_quarantined) ||
2690 xoap->xoa_av_quarantined !=
2691 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2692 need_policy = TRUE;
2693 } else {
2694 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2695 XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
2696 }
2697 }
2698
2699 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2700 mutex_exit(&zp->z_lock);
2701 err = EPERM;
2702 goto out3;
2703 }
2704
2705 if (need_policy == FALSE &&
2706 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2707 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2708 need_policy = TRUE;
2709 }
2710 }
2711
2712 mutex_exit(&zp->z_lock);
2713
2714 if (mask & ATTR_MODE) {
2715 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
2716 err = secpolicy_setid_setsticky_clear(ip, vap,
2717 &oldva, cr);
2718 if (err)
2719 goto out3;
2720
2721 trim_mask |= ATTR_MODE;
2722 } else {
2723 need_policy = TRUE;
2724 }
2725 }
2726
2727 if (need_policy) {
2728 /*
2729 * If trim_mask is set then take ownership
2730 * has been granted or write_acl is present and user
2731 * has the ability to modify mode. In that case remove
2732 * UID|GID and or MODE from mask so that
2733 * secpolicy_vnode_setattr() doesn't revoke it.
2734 */
2735
2736 if (trim_mask) {
2737 saved_mask = vap->va_mask;
2738 vap->va_mask &= ~trim_mask;
2739 }
2740 err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
2741 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
2742 if (err)
2743 goto out3;
2744
2745 if (trim_mask)
2746 vap->va_mask |= saved_mask;
2747 }
2748
2749 /*
2750 * secpolicy_vnode_setattr, or take ownership may have
2751 * changed va_mask
2752 */
2753 mask = vap->va_mask;
2754
2755 if ((mask & (ATTR_UID | ATTR_GID))) {
2756 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
2757 &xattr_obj, sizeof (xattr_obj));
2758
2759 if (err == 0 && xattr_obj) {
2760 err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
2761 if (err)
2762 goto out2;
2763 }
2764 if (mask & ATTR_UID) {
2765 new_uid = zfs_fuid_create(zsb,
2766 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
2767 if (new_uid != zp->z_uid &&
2768 zfs_fuid_overquota(zsb, B_FALSE, new_uid)) {
2769 if (attrzp)
2770 iput(ZTOI(attrzp));
2771 err = EDQUOT;
2772 goto out2;
2773 }
2774 }
2775
2776 if (mask & ATTR_GID) {
2777 new_gid = zfs_fuid_create(zsb, (uint64_t)vap->va_gid,
2778 cr, ZFS_GROUP, &fuidp);
2779 if (new_gid != zp->z_gid &&
2780 zfs_fuid_overquota(zsb, B_TRUE, new_gid)) {
2781 if (attrzp)
2782 iput(ZTOI(attrzp));
2783 err = EDQUOT;
2784 goto out2;
2785 }
2786 }
2787 }
2788 tx = dmu_tx_create(zsb->z_os);
2789
2790 if (mask & ATTR_MODE) {
2791 uint64_t pmode = zp->z_mode;
2792 uint64_t acl_obj;
2793 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2794
2795 zfs_acl_chmod_setattr(zp, &aclp, new_mode);
2796
2797 mutex_enter(&zp->z_lock);
2798 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
2799 /*
2800 * Are we upgrading ACL from old V0 format
2801 * to V1 format?
2802 */
2803 if (zsb->z_version >= ZPL_VERSION_FUID &&
2804 zfs_znode_acl_version(zp) ==
2805 ZFS_ACL_VERSION_INITIAL) {
2806 dmu_tx_hold_free(tx, acl_obj, 0,
2807 DMU_OBJECT_END);
2808 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2809 0, aclp->z_acl_bytes);
2810 } else {
2811 dmu_tx_hold_write(tx, acl_obj, 0,
2812 aclp->z_acl_bytes);
2813 }
2814 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2815 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2816 0, aclp->z_acl_bytes);
2817 }
2818 mutex_exit(&zp->z_lock);
2819 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2820 } else {
2821 if ((mask & ATTR_XVATTR) &&
2822 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
2823 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2824 else
2825 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2826 }
2827
2828 if (attrzp) {
2829 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
2830 }
2831
2832 fuid_dirtied = zsb->z_fuid_dirty;
2833 if (fuid_dirtied)
2834 zfs_fuid_txhold(zsb, tx);
2835
2836 zfs_sa_upgrade_txholds(tx, zp);
2837
2838 err = dmu_tx_assign(tx, TXG_WAIT);
2839 if (err)
2840 goto out;
2841
2842 count = 0;
2843 /*
2844 * Set each attribute requested.
2845 * We group settings according to the locks they need to acquire.
2846 *
2847 * Note: you cannot set ctime directly, although it will be
2848 * updated as a side-effect of calling this function.
2849 */
2850
2851
2852 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2853 mutex_enter(&zp->z_acl_lock);
2854 mutex_enter(&zp->z_lock);
2855
2856 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
2857 &zp->z_pflags, sizeof (zp->z_pflags));
2858
2859 if (attrzp) {
2860 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2861 mutex_enter(&attrzp->z_acl_lock);
2862 mutex_enter(&attrzp->z_lock);
2863 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2864 SA_ZPL_FLAGS(zsb), NULL, &attrzp->z_pflags,
2865 sizeof (attrzp->z_pflags));
2866 }
2867
2868 if (mask & (ATTR_UID|ATTR_GID)) {
2869
2870 if (mask & ATTR_UID) {
2871 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
2872 &new_uid, sizeof (new_uid));
2873 zp->z_uid = new_uid;
2874 if (attrzp) {
2875 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2876 SA_ZPL_UID(zsb), NULL, &new_uid,
2877 sizeof (new_uid));
2878 attrzp->z_uid = new_uid;
2879 }
2880 }
2881
2882 if (mask & ATTR_GID) {
2883 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb),
2884 NULL, &new_gid, sizeof (new_gid));
2885 zp->z_gid = new_gid;
2886 if (attrzp) {
2887 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2888 SA_ZPL_GID(zsb), NULL, &new_gid,
2889 sizeof (new_gid));
2890 attrzp->z_gid = new_gid;
2891 }
2892 }
2893 if (!(mask & ATTR_MODE)) {
2894 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb),
2895 NULL, &new_mode, sizeof (new_mode));
2896 new_mode = zp->z_mode;
2897 }
2898 err = zfs_acl_chown_setattr(zp);
2899 ASSERT(err == 0);
2900 if (attrzp) {
2901 err = zfs_acl_chown_setattr(attrzp);
2902 ASSERT(err == 0);
2903 }
2904 }
2905
2906 if (mask & ATTR_MODE) {
2907 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
2908 &new_mode, sizeof (new_mode));
2909 zp->z_mode = new_mode;
2910 ASSERT3P(aclp, !=, NULL);
2911 err = zfs_aclset_common(zp, aclp, cr, tx);
2912 ASSERT0(err);
2913 if (zp->z_acl_cached)
2914 zfs_acl_free(zp->z_acl_cached);
2915 zp->z_acl_cached = aclp;
2916 aclp = NULL;
2917 }
2918
2919
2920 if (mask & ATTR_ATIME) {
2921 ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
2922 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
2923 &zp->z_atime, sizeof (zp->z_atime));
2924 }
2925
2926 if (mask & ATTR_MTIME) {
2927 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
2928 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL,
2929 mtime, sizeof (mtime));
2930 }
2931
2932 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
2933 if (mask & ATTR_SIZE && !(mask & ATTR_MTIME)) {
2934 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb),
2935 NULL, mtime, sizeof (mtime));
2936 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
2937 &ctime, sizeof (ctime));
2938 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
2939 B_TRUE);
2940 } else if (mask != 0) {
2941 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
2942 &ctime, sizeof (ctime));
2943 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
2944 B_TRUE);
2945 if (attrzp) {
2946 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2947 SA_ZPL_CTIME(zsb), NULL,
2948 &ctime, sizeof (ctime));
2949 zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
2950 mtime, ctime, B_TRUE);
2951 }
2952 }
2953 /*
2954 * Do this after setting timestamps to prevent timestamp
2955 * update from toggling bit
2956 */
2957
2958 if (xoap && (mask & ATTR_XVATTR)) {
2959
2960 /*
2961 * restore trimmed off masks
2962 * so that return masks can be set for caller.
2963 */
2964
2965 if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
2966 XVA_SET_REQ(xvap, XAT_APPENDONLY);
2967 }
2968 if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
2969 XVA_SET_REQ(xvap, XAT_NOUNLINK);
2970 }
2971 if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
2972 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
2973 }
2974 if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
2975 XVA_SET_REQ(xvap, XAT_NODUMP);
2976 }
2977 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
2978 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
2979 }
2980 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
2981 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
2982 }
2983
2984 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
2985 ASSERT(S_ISREG(ip->i_mode));
2986
2987 zfs_xvattr_set(zp, xvap, tx);
2988 }
2989
2990 if (fuid_dirtied)
2991 zfs_fuid_sync(zsb, tx);
2992
2993 if (mask != 0)
2994 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
2995
2996 mutex_exit(&zp->z_lock);
2997 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2998 mutex_exit(&zp->z_acl_lock);
2999
3000 if (attrzp) {
3001 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3002 mutex_exit(&attrzp->z_acl_lock);
3003 mutex_exit(&attrzp->z_lock);
3004 }
3005 out:
3006 if (err == 0 && attrzp) {
3007 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3008 xattr_count, tx);
3009 ASSERT(err2 == 0);
3010 }
3011
3012 if (attrzp)
3013 iput(ZTOI(attrzp));
3014 if (aclp)
3015 zfs_acl_free(aclp);
3016
3017 if (fuidp) {
3018 zfs_fuid_info_free(fuidp);
3019 fuidp = NULL;
3020 }
3021
3022 if (err) {
3023 dmu_tx_abort(tx);
3024 if (err == ERESTART)
3025 goto top;
3026 } else {
3027 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3028 dmu_tx_commit(tx);
3029 zfs_inode_update(zp);
3030 }
3031
3032 out2:
3033 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3034 zil_commit(zilog, 0);
3035
3036 out3:
3037 kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * 7);
3038 kmem_free(bulk, sizeof (sa_bulk_attr_t) * 7);
3039 kmem_free(tmpxvattr, sizeof (xvattr_t));
3040 ZFS_EXIT(zsb);
3041 return (err);
3042 }
3043 EXPORT_SYMBOL(zfs_setattr);
3044
3045 typedef struct zfs_zlock {
3046 krwlock_t *zl_rwlock; /* lock we acquired */
3047 znode_t *zl_znode; /* znode we held */
3048 struct zfs_zlock *zl_next; /* next in list */
3049 } zfs_zlock_t;
3050
3051 /*
3052 * Drop locks and release vnodes that were held by zfs_rename_lock().
3053 */
3054 static void
3055 zfs_rename_unlock(zfs_zlock_t **zlpp)
3056 {
3057 zfs_zlock_t *zl;
3058
3059 while ((zl = *zlpp) != NULL) {
3060 if (zl->zl_znode != NULL)
3061 iput(ZTOI(zl->zl_znode));
3062 rw_exit(zl->zl_rwlock);
3063 *zlpp = zl->zl_next;
3064 kmem_free(zl, sizeof (*zl));
3065 }
3066 }
3067
3068 /*
3069 * Search back through the directory tree, using the ".." entries.
3070 * Lock each directory in the chain to prevent concurrent renames.
3071 * Fail any attempt to move a directory into one of its own descendants.
3072 * XXX - z_parent_lock can overlap with map or grow locks
3073 */
3074 static int
3075 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3076 {
3077 zfs_zlock_t *zl;
3078 znode_t *zp = tdzp;
3079 uint64_t rootid = ZTOZSB(zp)->z_root;
3080 uint64_t oidp = zp->z_id;
3081 krwlock_t *rwlp = &szp->z_parent_lock;
3082 krw_t rw = RW_WRITER;
3083
3084 /*
3085 * First pass write-locks szp and compares to zp->z_id.
3086 * Later passes read-lock zp and compare to zp->z_parent.
3087 */
3088 do {
3089 if (!rw_tryenter(rwlp, rw)) {
3090 /*
3091 * Another thread is renaming in this path.
3092 * Note that if we are a WRITER, we don't have any
3093 * parent_locks held yet.
3094 */
3095 if (rw == RW_READER && zp->z_id > szp->z_id) {
3096 /*
3097 * Drop our locks and restart
3098 */
3099 zfs_rename_unlock(&zl);
3100 *zlpp = NULL;
3101 zp = tdzp;
3102 oidp = zp->z_id;
3103 rwlp = &szp->z_parent_lock;
3104 rw = RW_WRITER;
3105 continue;
3106 } else {
3107 /*
3108 * Wait for other thread to drop its locks
3109 */
3110 rw_enter(rwlp, rw);
3111 }
3112 }
3113
3114 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3115 zl->zl_rwlock = rwlp;
3116 zl->zl_znode = NULL;
3117 zl->zl_next = *zlpp;
3118 *zlpp = zl;
3119
3120 if (oidp == szp->z_id) /* We're a descendant of szp */
3121 return (SET_ERROR(EINVAL));
3122
3123 if (oidp == rootid) /* We've hit the top */
3124 return (0);
3125
3126 if (rw == RW_READER) { /* i.e. not the first pass */
3127 int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
3128 if (error)
3129 return (error);
3130 zl->zl_znode = zp;
3131 }
3132 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
3133 &oidp, sizeof (oidp));
3134 rwlp = &zp->z_parent_lock;
3135 rw = RW_READER;
3136
3137 } while (zp->z_id != sdzp->z_id);
3138
3139 return (0);
3140 }
3141
3142 /*
3143 * Move an entry from the provided source directory to the target
3144 * directory. Change the entry name as indicated.
3145 *
3146 * IN: sdip - Source directory containing the "old entry".
3147 * snm - Old entry name.
3148 * tdip - Target directory to contain the "new entry".
3149 * tnm - New entry name.
3150 * cr - credentials of caller.
3151 * flags - case flags
3152 *
3153 * RETURN: 0 on success, error code on failure.
3154 *
3155 * Timestamps:
3156 * sdip,tdip - ctime|mtime updated
3157 */
3158 /*ARGSUSED*/
3159 int
3160 zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
3161 cred_t *cr, int flags)
3162 {
3163 znode_t *tdzp, *szp, *tzp;
3164 znode_t *sdzp = ITOZ(sdip);
3165 zfs_sb_t *zsb = ITOZSB(sdip);
3166 zilog_t *zilog;
3167 zfs_dirlock_t *sdl, *tdl;
3168 dmu_tx_t *tx;
3169 zfs_zlock_t *zl;
3170 int cmp, serr, terr;
3171 int error = 0;
3172 int zflg = 0;
3173 boolean_t waited = B_FALSE;
3174
3175 ZFS_ENTER(zsb);
3176 ZFS_VERIFY_ZP(sdzp);
3177 zilog = zsb->z_log;
3178
3179 if (tdip->i_sb != sdip->i_sb || zfsctl_is_node(tdip)) {
3180 ZFS_EXIT(zsb);
3181 return (SET_ERROR(EXDEV));
3182 }
3183
3184 tdzp = ITOZ(tdip);
3185 ZFS_VERIFY_ZP(tdzp);
3186 if (zsb->z_utf8 && u8_validate(tnm,
3187 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3188 ZFS_EXIT(zsb);
3189 return (SET_ERROR(EILSEQ));
3190 }
3191
3192 if (flags & FIGNORECASE)
3193 zflg |= ZCILOOK;
3194
3195 top:
3196 szp = NULL;
3197 tzp = NULL;
3198 zl = NULL;
3199
3200 /*
3201 * This is to prevent the creation of links into attribute space
3202 * by renaming a linked file into/outof an attribute directory.
3203 * See the comment in zfs_link() for why this is considered bad.
3204 */
3205 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3206 ZFS_EXIT(zsb);
3207 return (SET_ERROR(EINVAL));
3208 }
3209
3210 /*
3211 * Lock source and target directory entries. To prevent deadlock,
3212 * a lock ordering must be defined. We lock the directory with
3213 * the smallest object id first, or if it's a tie, the one with
3214 * the lexically first name.
3215 */
3216 if (sdzp->z_id < tdzp->z_id) {
3217 cmp = -1;
3218 } else if (sdzp->z_id > tdzp->z_id) {
3219 cmp = 1;
3220 } else {
3221 /*
3222 * First compare the two name arguments without
3223 * considering any case folding.
3224 */
3225 int nofold = (zsb->z_norm & ~U8_TEXTPREP_TOUPPER);
3226
3227 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3228 ASSERT(error == 0 || !zsb->z_utf8);
3229 if (cmp == 0) {
3230 /*
3231 * POSIX: "If the old argument and the new argument
3232 * both refer to links to the same existing file,
3233 * the rename() function shall return successfully
3234 * and perform no other action."
3235 */
3236 ZFS_EXIT(zsb);
3237 return (0);
3238 }
3239 /*
3240 * If the file system is case-folding, then we may
3241 * have some more checking to do. A case-folding file
3242 * system is either supporting mixed case sensitivity
3243 * access or is completely case-insensitive. Note
3244 * that the file system is always case preserving.
3245 *
3246 * In mixed sensitivity mode case sensitive behavior
3247 * is the default. FIGNORECASE must be used to
3248 * explicitly request case insensitive behavior.
3249 *
3250 * If the source and target names provided differ only
3251 * by case (e.g., a request to rename 'tim' to 'Tim'),
3252 * we will treat this as a special case in the
3253 * case-insensitive mode: as long as the source name
3254 * is an exact match, we will allow this to proceed as
3255 * a name-change request.
3256 */
3257 if ((zsb->z_case == ZFS_CASE_INSENSITIVE ||
3258 (zsb->z_case == ZFS_CASE_MIXED &&
3259 flags & FIGNORECASE)) &&
3260 u8_strcmp(snm, tnm, 0, zsb->z_norm, U8_UNICODE_LATEST,
3261 &error) == 0) {
3262 /*
3263 * case preserving rename request, require exact
3264 * name matches
3265 */
3266 zflg |= ZCIEXACT;
3267 zflg &= ~ZCILOOK;
3268 }
3269 }
3270
3271 /*
3272 * If the source and destination directories are the same, we should
3273 * grab the z_name_lock of that directory only once.
3274 */
3275 if (sdzp == tdzp) {
3276 zflg |= ZHAVELOCK;
3277 rw_enter(&sdzp->z_name_lock, RW_READER);
3278 }
3279
3280 if (cmp < 0) {
3281 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3282 ZEXISTS | zflg, NULL, NULL);
3283 terr = zfs_dirent_lock(&tdl,
3284 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3285 } else {
3286 terr = zfs_dirent_lock(&tdl,
3287 tdzp, tnm, &tzp, zflg, NULL, NULL);
3288 serr = zfs_dirent_lock(&sdl,
3289 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3290 NULL, NULL);
3291 }
3292
3293 if (serr) {
3294 /*
3295 * Source entry invalid or not there.
3296 */
3297 if (!terr) {
3298 zfs_dirent_unlock(tdl);
3299 if (tzp)
3300 iput(ZTOI(tzp));
3301 }
3302
3303 if (sdzp == tdzp)
3304 rw_exit(&sdzp->z_name_lock);
3305
3306 if (strcmp(snm, "..") == 0)
3307 serr = EINVAL;
3308 ZFS_EXIT(zsb);
3309 return (serr);
3310 }
3311 if (terr) {
3312 zfs_dirent_unlock(sdl);
3313 iput(ZTOI(szp));
3314
3315 if (sdzp == tdzp)
3316 rw_exit(&sdzp->z_name_lock);
3317
3318 if (strcmp(tnm, "..") == 0)
3319 terr = EINVAL;
3320 ZFS_EXIT(zsb);
3321 return (terr);
3322 }
3323
3324 /*
3325 * Must have write access at the source to remove the old entry
3326 * and write access at the target to create the new entry.
3327 * Note that if target and source are the same, this can be
3328 * done in a single check.
3329 */
3330
3331 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
3332 goto out;
3333
3334 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3335 /*
3336 * Check to make sure rename is valid.
3337 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3338 */
3339 if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
3340 goto out;
3341 }
3342
3343 /*
3344 * Does target exist?
3345 */
3346 if (tzp) {
3347 /*
3348 * Source and target must be the same type.
3349 */
3350 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3351 if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
3352 error = SET_ERROR(ENOTDIR);
3353 goto out;
3354 }
3355 } else {
3356 if (S_ISDIR(ZTOI(tzp)->i_mode)) {
3357 error = SET_ERROR(EISDIR);
3358 goto out;
3359 }
3360 }
3361 /*
3362 * POSIX dictates that when the source and target
3363 * entries refer to the same file object, rename
3364 * must do nothing and exit without error.
3365 */
3366 if (szp->z_id == tzp->z_id) {
3367 error = 0;
3368 goto out;
3369 }
3370 }
3371
3372 tx = dmu_tx_create(zsb->z_os);
3373 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3374 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3375 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3376 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3377 if (sdzp != tdzp) {
3378 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3379 zfs_sa_upgrade_txholds(tx, tdzp);
3380 }
3381 if (tzp) {
3382 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3383 zfs_sa_upgrade_txholds(tx, tzp);
3384 }
3385
3386 zfs_sa_upgrade_txholds(tx, szp);
3387 dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
3388 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
3389 if (error) {
3390 if (zl != NULL)
3391 zfs_rename_unlock(&zl);
3392 zfs_dirent_unlock(sdl);
3393 zfs_dirent_unlock(tdl);
3394
3395 if (sdzp == tdzp)
3396 rw_exit(&sdzp->z_name_lock);
3397
3398 iput(ZTOI(szp));
3399 if (tzp)
3400 iput(ZTOI(tzp));
3401 if (error == ERESTART) {
3402 waited = B_TRUE;
3403 dmu_tx_wait(tx);
3404 dmu_tx_abort(tx);
3405 goto top;
3406 }
3407 dmu_tx_abort(tx);
3408 ZFS_EXIT(zsb);
3409 return (error);
3410 }
3411
3412 if (tzp) /* Attempt to remove the existing target */
3413 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3414
3415 if (error == 0) {
3416 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3417 if (error == 0) {
3418 szp->z_pflags |= ZFS_AV_MODIFIED;
3419
3420 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zsb),
3421 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3422 ASSERT0(error);
3423
3424 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3425 if (error == 0) {
3426 zfs_log_rename(zilog, tx, TX_RENAME |
3427 (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3428 sdl->dl_name, tdzp, tdl->dl_name, szp);
3429 } else {
3430 /*
3431 * At this point, we have successfully created
3432 * the target name, but have failed to remove
3433 * the source name. Since the create was done
3434 * with the ZRENAMING flag, there are
3435 * complications; for one, the link count is
3436 * wrong. The easiest way to deal with this
3437 * is to remove the newly created target, and
3438 * return the original error. This must
3439 * succeed; fortunately, it is very unlikely to
3440 * fail, since we just created it.
3441 */
3442 VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3443 ZRENAMING, NULL), ==, 0);
3444 }
3445 }
3446 }
3447
3448 dmu_tx_commit(tx);
3449 out:
3450 if (zl != NULL)
3451 zfs_rename_unlock(&zl);
3452
3453 zfs_dirent_unlock(sdl);
3454 zfs_dirent_unlock(tdl);
3455
3456 zfs_inode_update(sdzp);
3457 if (sdzp == tdzp)
3458 rw_exit(&sdzp->z_name_lock);
3459
3460 if (sdzp != tdzp)
3461 zfs_inode_update(tdzp);
3462
3463 zfs_inode_update(szp);
3464 iput(ZTOI(szp));
3465 if (tzp) {
3466 zfs_inode_update(tzp);
3467 iput(ZTOI(tzp));
3468 }
3469
3470 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3471 zil_commit(zilog, 0);
3472
3473 ZFS_EXIT(zsb);
3474 return (error);
3475 }
3476 EXPORT_SYMBOL(zfs_rename);
3477
3478 /*
3479 * Insert the indicated symbolic reference entry into the directory.
3480 *
3481 * IN: dip - Directory to contain new symbolic link.
3482 * link - Name for new symlink entry.
3483 * vap - Attributes of new entry.
3484 * target - Target path of new symlink.
3485 *
3486 * cr - credentials of caller.
3487 * flags - case flags
3488 *
3489 * RETURN: 0 on success, error code on failure.
3490 *
3491 * Timestamps:
3492 * dip - ctime|mtime updated
3493 */
3494 /*ARGSUSED*/
3495 int
3496 zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
3497 struct inode **ipp, cred_t *cr, int flags)
3498 {
3499 znode_t *zp, *dzp = ITOZ(dip);
3500 zfs_dirlock_t *dl;
3501 dmu_tx_t *tx;
3502 zfs_sb_t *zsb = ITOZSB(dip);
3503 zilog_t *zilog;
3504 uint64_t len = strlen(link);
3505 int error;
3506 int zflg = ZNEW;
3507 zfs_acl_ids_t acl_ids;
3508 boolean_t fuid_dirtied;
3509 uint64_t txtype = TX_SYMLINK;
3510 boolean_t waited = B_FALSE;
3511
3512 ASSERT(S_ISLNK(vap->va_mode));
3513
3514 ZFS_ENTER(zsb);
3515 ZFS_VERIFY_ZP(dzp);
3516 zilog = zsb->z_log;
3517
3518 if (zsb->z_utf8 && u8_validate(name, strlen(name),
3519 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3520 ZFS_EXIT(zsb);
3521 return (SET_ERROR(EILSEQ));
3522 }
3523 if (flags & FIGNORECASE)
3524 zflg |= ZCILOOK;
3525
3526 if (len > MAXPATHLEN) {
3527 ZFS_EXIT(zsb);
3528 return (SET_ERROR(ENAMETOOLONG));
3529 }
3530
3531 if ((error = zfs_acl_ids_create(dzp, 0,
3532 vap, cr, NULL, &acl_ids)) != 0) {
3533 ZFS_EXIT(zsb);
3534 return (error);
3535 }
3536 top:
3537 *ipp = NULL;
3538
3539 /*
3540 * Attempt to lock directory; fail if entry already exists.
3541 */
3542 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3543 if (error) {
3544 zfs_acl_ids_free(&acl_ids);
3545 ZFS_EXIT(zsb);
3546 return (error);
3547 }
3548
3549 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
3550 zfs_acl_ids_free(&acl_ids);
3551 zfs_dirent_unlock(dl);
3552 ZFS_EXIT(zsb);
3553 return (error);
3554 }
3555
3556 if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
3557 zfs_acl_ids_free(&acl_ids);
3558 zfs_dirent_unlock(dl);
3559 ZFS_EXIT(zsb);
3560 return (SET_ERROR(EDQUOT));
3561 }
3562 tx = dmu_tx_create(zsb->z_os);
3563 fuid_dirtied = zsb->z_fuid_dirty;
3564 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3565 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3566 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3567 ZFS_SA_BASE_ATTR_SIZE + len);
3568 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3569 if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3570 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3571 acl_ids.z_aclp->z_acl_bytes);
3572 }
3573 if (fuid_dirtied)
3574 zfs_fuid_txhold(zsb, tx);
3575 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
3576 if (error) {
3577 zfs_dirent_unlock(dl);
3578 if (error == ERESTART) {
3579 waited = B_TRUE;
3580 dmu_tx_wait(tx);
3581 dmu_tx_abort(tx);
3582 goto top;
3583 }
3584 zfs_acl_ids_free(&acl_ids);
3585 dmu_tx_abort(tx);
3586 ZFS_EXIT(zsb);
3587 return (error);
3588 }
3589
3590 /*
3591 * Create a new object for the symlink.
3592 * for version 4 ZPL datsets the symlink will be an SA attribute
3593 */
3594 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3595
3596 if (fuid_dirtied)
3597 zfs_fuid_sync(zsb, tx);
3598
3599 mutex_enter(&zp->z_lock);
3600 if (zp->z_is_sa)
3601 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zsb),
3602 link, len, tx);
3603 else
3604 zfs_sa_symlink(zp, link, len, tx);
3605 mutex_exit(&zp->z_lock);
3606
3607 zp->z_size = len;
3608 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb),
3609 &zp->z_size, sizeof (zp->z_size), tx);
3610 /*
3611 * Insert the new object into the directory.
3612 */
3613 (void) zfs_link_create(dl, zp, tx, ZNEW);
3614
3615 if (flags & FIGNORECASE)
3616 txtype |= TX_CI;
3617 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3618
3619 zfs_inode_update(dzp);
3620 zfs_inode_update(zp);
3621
3622 zfs_acl_ids_free(&acl_ids);
3623
3624 dmu_tx_commit(tx);
3625
3626 zfs_dirent_unlock(dl);
3627
3628 *ipp = ZTOI(zp);
3629
3630 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3631 zil_commit(zilog, 0);
3632
3633 ZFS_EXIT(zsb);
3634 return (error);
3635 }
3636 EXPORT_SYMBOL(zfs_symlink);
3637
3638 /*
3639 * Return, in the buffer contained in the provided uio structure,
3640 * the symbolic path referred to by ip.
3641 *
3642 * IN: ip - inode of symbolic link
3643 * uio - structure to contain the link path.
3644 * cr - credentials of caller.
3645 *
3646 * RETURN: 0 if success
3647 * error code if failure
3648 *
3649 * Timestamps:
3650 * ip - atime updated
3651 */
3652 /* ARGSUSED */
3653 int
3654 zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr)
3655 {
3656 znode_t *zp = ITOZ(ip);
3657 zfs_sb_t *zsb = ITOZSB(ip);
3658 int error;
3659
3660 ZFS_ENTER(zsb);
3661 ZFS_VERIFY_ZP(zp);
3662
3663 mutex_enter(&zp->z_lock);
3664 if (zp->z_is_sa)
3665 error = sa_lookup_uio(zp->z_sa_hdl,
3666 SA_ZPL_SYMLINK(zsb), uio);
3667 else
3668 error = zfs_sa_readlink(zp, uio);
3669 mutex_exit(&zp->z_lock);
3670
3671 ZFS_ACCESSTIME_STAMP(zsb, zp);
3672 ZFS_EXIT(zsb);
3673 return (error);
3674 }
3675 EXPORT_SYMBOL(zfs_readlink);
3676
3677 /*
3678 * Insert a new entry into directory tdip referencing sip.
3679 *
3680 * IN: tdip - Directory to contain new entry.
3681 * sip - inode of new entry.
3682 * name - name of new entry.
3683 * cr - credentials of caller.
3684 *
3685 * RETURN: 0 if success
3686 * error code if failure
3687 *
3688 * Timestamps:
3689 * tdip - ctime|mtime updated
3690 * sip - ctime updated
3691 */
3692 /* ARGSUSED */
3693 int
3694 zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr)
3695 {
3696 znode_t *dzp = ITOZ(tdip);
3697 znode_t *tzp, *szp;
3698 zfs_sb_t *zsb = ITOZSB(tdip);
3699 zilog_t *zilog;
3700 zfs_dirlock_t *dl;
3701 dmu_tx_t *tx;
3702 int error;
3703 int zf = ZNEW;
3704 uint64_t parent;
3705 uid_t owner;
3706 boolean_t waited = B_FALSE;
3707
3708 ASSERT(S_ISDIR(tdip->i_mode));
3709
3710 ZFS_ENTER(zsb);
3711 ZFS_VERIFY_ZP(dzp);
3712 zilog = zsb->z_log;
3713
3714 /*
3715 * POSIX dictates that we return EPERM here.
3716 * Better choices include ENOTSUP or EISDIR.
3717 */
3718 if (S_ISDIR(sip->i_mode)) {
3719 ZFS_EXIT(zsb);
3720 return (SET_ERROR(EPERM));
3721 }
3722
3723 if (sip->i_sb != tdip->i_sb || zfsctl_is_node(sip)) {
3724 ZFS_EXIT(zsb);
3725 return (SET_ERROR(EXDEV));
3726 }
3727
3728 szp = ITOZ(sip);
3729 ZFS_VERIFY_ZP(szp);
3730
3731 /* Prevent links to .zfs/shares files */
3732
3733 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zsb),
3734 &parent, sizeof (uint64_t))) != 0) {
3735 ZFS_EXIT(zsb);
3736 return (error);
3737 }
3738 if (parent == zsb->z_shares_dir) {
3739 ZFS_EXIT(zsb);
3740 return (SET_ERROR(EPERM));
3741 }
3742
3743 if (zsb->z_utf8 && u8_validate(name,
3744 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3745 ZFS_EXIT(zsb);
3746 return (SET_ERROR(EILSEQ));
3747 }
3748 #ifdef HAVE_PN_UTILS
3749 if (flags & FIGNORECASE)
3750 zf |= ZCILOOK;
3751 #endif /* HAVE_PN_UTILS */
3752
3753 /*
3754 * We do not support links between attributes and non-attributes
3755 * because of the potential security risk of creating links
3756 * into "normal" file space in order to circumvent restrictions
3757 * imposed in attribute space.
3758 */
3759 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
3760 ZFS_EXIT(zsb);
3761 return (SET_ERROR(EINVAL));
3762 }
3763
3764 owner = zfs_fuid_map_id(zsb, szp->z_uid, cr, ZFS_OWNER);
3765 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
3766 ZFS_EXIT(zsb);
3767 return (SET_ERROR(EPERM));
3768 }
3769
3770 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
3771 ZFS_EXIT(zsb);
3772 return (error);
3773 }
3774
3775 top:
3776 /*
3777 * Attempt to lock directory; fail if entry already exists.
3778 */
3779 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
3780 if (error) {
3781 ZFS_EXIT(zsb);
3782 return (error);
3783 }
3784
3785 tx = dmu_tx_create(zsb->z_os);
3786 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3787 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3788 zfs_sa_upgrade_txholds(tx, szp);
3789 zfs_sa_upgrade_txholds(tx, dzp);
3790 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
3791 if (error) {
3792 zfs_dirent_unlock(dl);
3793 if (error == ERESTART) {
3794 waited = B_TRUE;
3795 dmu_tx_wait(tx);
3796 dmu_tx_abort(tx);
3797 goto top;
3798 }
3799 dmu_tx_abort(tx);
3800 ZFS_EXIT(zsb);
3801 return (error);
3802 }
3803
3804 error = zfs_link_create(dl, szp, tx, 0);
3805
3806 if (error == 0) {
3807 uint64_t txtype = TX_LINK;
3808 #ifdef HAVE_PN_UTILS
3809 if (flags & FIGNORECASE)
3810 txtype |= TX_CI;
3811 #endif /* HAVE_PN_UTILS */
3812 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
3813 }
3814
3815 dmu_tx_commit(tx);
3816
3817 zfs_dirent_unlock(dl);
3818
3819 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3820 zil_commit(zilog, 0);
3821
3822 zfs_inode_update(dzp);
3823 zfs_inode_update(szp);
3824 ZFS_EXIT(zsb);
3825 return (error);
3826 }
3827 EXPORT_SYMBOL(zfs_link);
3828
3829 static void
3830 zfs_putpage_commit_cb(void *arg)
3831 {
3832 struct page *pp = arg;
3833
3834 ClearPageError(pp);
3835 end_page_writeback(pp);
3836 }
3837
3838 /*
3839 * Push a page out to disk, once the page is on stable storage the
3840 * registered commit callback will be run as notification of completion.
3841 *
3842 * IN: ip - page mapped for inode.
3843 * pp - page to push (page is locked)
3844 * wbc - writeback control data
3845 *
3846 * RETURN: 0 if success
3847 * error code if failure
3848 *
3849 * Timestamps:
3850 * ip - ctime|mtime updated
3851 */
3852 /* ARGSUSED */
3853 int
3854 zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
3855 {
3856 znode_t *zp = ITOZ(ip);
3857 zfs_sb_t *zsb = ITOZSB(ip);
3858 loff_t offset;
3859 loff_t pgoff;
3860 unsigned int pglen;
3861 rl_t *rl;
3862 dmu_tx_t *tx;
3863 caddr_t va;
3864 int err = 0;
3865 uint64_t mtime[2], ctime[2];
3866 sa_bulk_attr_t bulk[3];
3867 int cnt = 0;
3868
3869 ZFS_ENTER(zsb);
3870 ZFS_VERIFY_ZP(zp);
3871
3872 ASSERT(PageLocked(pp));
3873
3874 pgoff = page_offset(pp); /* Page byte-offset in file */
3875 offset = i_size_read(ip); /* File length in bytes */
3876 pglen = MIN(PAGE_CACHE_SIZE, /* Page length in bytes */
3877 P2ROUNDUP(offset, PAGE_CACHE_SIZE)-pgoff);
3878
3879 /* Page is beyond end of file */
3880 if (pgoff >= offset) {
3881 unlock_page(pp);
3882 ZFS_EXIT(zsb);
3883 return (0);
3884 }
3885
3886 /* Truncate page length to end of file */
3887 if (pgoff + pglen > offset)
3888 pglen = offset - pgoff;
3889
3890 #if 0
3891 /*
3892 * FIXME: Allow mmap writes past its quota. The correct fix
3893 * is to register a page_mkwrite() handler to count the page
3894 * against its quota when it is about to be dirtied.
3895 */
3896 if (zfs_owner_overquota(zsb, zp, B_FALSE) ||
3897 zfs_owner_overquota(zsb, zp, B_TRUE)) {
3898 err = EDQUOT;
3899 }
3900 #endif
3901
3902 rl = zfs_range_lock(zp, pgoff, pglen, RL_WRITER);
3903
3904 set_page_writeback(pp);
3905 unlock_page(pp);
3906
3907 tx = dmu_tx_create(zsb->z_os);
3908 dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
3909 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3910 zfs_sa_upgrade_txholds(tx, zp);
3911 err = dmu_tx_assign(tx, TXG_NOWAIT);
3912 if (err != 0) {
3913 if (err == ERESTART)
3914 dmu_tx_wait(tx);
3915
3916 dmu_tx_abort(tx);
3917 __set_page_dirty_nobuffers(pp);
3918 ClearPageError(pp);
3919 end_page_writeback(pp);
3920 zfs_range_unlock(rl);
3921 ZFS_EXIT(zsb);
3922 return (err);
3923 }
3924
3925 va = kmap(pp);
3926 ASSERT3U(pglen, <=, PAGE_CACHE_SIZE);
3927 dmu_write(zsb->z_os, zp->z_id, pgoff, pglen, va, tx);
3928 kunmap(pp);
3929
3930 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
3931 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
3932 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zsb), NULL, &zp->z_pflags, 8);
3933
3934 /* Preserve the mtime and ctime provided by the inode */
3935 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
3936 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
3937 zp->z_atime_dirty = 0;
3938 zp->z_seq++;
3939
3940 err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
3941
3942 zfs_log_write(zsb->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
3943 zfs_putpage_commit_cb, pp);
3944 dmu_tx_commit(tx);
3945
3946 zfs_range_unlock(rl);
3947
3948 if (wbc->sync_mode != WB_SYNC_NONE) {
3949 /*
3950 * Note that this is rarely called under writepages(), because
3951 * writepages() normally handles the entire commit for
3952 * performance reasons.
3953 */
3954 if (zsb->z_log != NULL)
3955 zil_commit(zsb->z_log, zp->z_id);
3956 }
3957
3958 ZFS_EXIT(zsb);
3959 return (err);
3960 }
3961
3962 /*
3963 * Update the system attributes when the inode has been dirtied. For the
3964 * moment we only update the mode, atime, mtime, and ctime.
3965 */
3966 int
3967 zfs_dirty_inode(struct inode *ip, int flags)
3968 {
3969 znode_t *zp = ITOZ(ip);
3970 zfs_sb_t *zsb = ITOZSB(ip);
3971 dmu_tx_t *tx;
3972 uint64_t mode, atime[2], mtime[2], ctime[2];
3973 sa_bulk_attr_t bulk[4];
3974 int error;
3975 int cnt = 0;
3976
3977 if (zfs_is_readonly(zsb) || dmu_objset_is_snapshot(zsb->z_os))
3978 return (0);
3979
3980 ZFS_ENTER(zsb);
3981 ZFS_VERIFY_ZP(zp);
3982
3983 tx = dmu_tx_create(zsb->z_os);
3984
3985 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3986 zfs_sa_upgrade_txholds(tx, zp);
3987
3988 error = dmu_tx_assign(tx, TXG_WAIT);
3989 if (error) {
3990 dmu_tx_abort(tx);
3991 goto out;
3992 }
3993
3994 mutex_enter(&zp->z_lock);
3995 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zsb), NULL, &mode, 8);
3996 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zsb), NULL, &atime, 16);
3997 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
3998 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
3999
4000 /* Preserve the mode, mtime and ctime provided by the inode */
4001 ZFS_TIME_ENCODE(&ip->i_atime, atime);
4002 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4003 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
4004 mode = ip->i_mode;
4005
4006 zp->z_mode = mode;
4007 zp->z_atime_dirty = 0;
4008
4009 error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4010 mutex_exit(&zp->z_lock);
4011
4012 dmu_tx_commit(tx);
4013 out:
4014 ZFS_EXIT(zsb);
4015 return (error);
4016 }
4017 EXPORT_SYMBOL(zfs_dirty_inode);
4018
4019 /*ARGSUSED*/
4020 void
4021 zfs_inactive(struct inode *ip)
4022 {
4023 znode_t *zp = ITOZ(ip);
4024 zfs_sb_t *zsb = ITOZSB(ip);
4025 int error;
4026
4027 if (zfsctl_is_node(ip)) {
4028 zfsctl_inode_inactive(ip);
4029 return;
4030 }
4031
4032 rw_enter(&zsb->z_teardown_inactive_lock, RW_READER);
4033 if (zp->z_sa_hdl == NULL) {
4034 rw_exit(&zsb->z_teardown_inactive_lock);
4035 return;
4036 }
4037
4038 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4039 dmu_tx_t *tx = dmu_tx_create(zsb->z_os);
4040
4041 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4042 zfs_sa_upgrade_txholds(tx, zp);
4043 error = dmu_tx_assign(tx, TXG_WAIT);
4044 if (error) {
4045 dmu_tx_abort(tx);
4046 } else {
4047 mutex_enter(&zp->z_lock);
4048 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zsb),
4049 (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
4050 zp->z_atime_dirty = 0;
4051 mutex_exit(&zp->z_lock);
4052 dmu_tx_commit(tx);
4053 }
4054 }
4055
4056 zfs_zinactive(zp);
4057 rw_exit(&zsb->z_teardown_inactive_lock);
4058 }
4059 EXPORT_SYMBOL(zfs_inactive);
4060
4061 /*
4062 * Bounds-check the seek operation.
4063 *
4064 * IN: ip - inode seeking within
4065 * ooff - old file offset
4066 * noffp - pointer to new file offset
4067 * ct - caller context
4068 *
4069 * RETURN: 0 if success
4070 * EINVAL if new offset invalid
4071 */
4072 /* ARGSUSED */
4073 int
4074 zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp)
4075 {
4076 if (S_ISDIR(ip->i_mode))
4077 return (0);
4078 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4079 }
4080 EXPORT_SYMBOL(zfs_seek);
4081
4082 /*
4083 * Fill pages with data from the disk.
4084 */
4085 static int
4086 zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
4087 {
4088 znode_t *zp = ITOZ(ip);
4089 zfs_sb_t *zsb = ITOZSB(ip);
4090 objset_t *os;
4091 struct page *cur_pp;
4092 u_offset_t io_off, total;
4093 size_t io_len;
4094 loff_t i_size;
4095 unsigned page_idx;
4096 int err;
4097
4098 os = zsb->z_os;
4099 io_len = nr_pages << PAGE_CACHE_SHIFT;
4100 i_size = i_size_read(ip);
4101 io_off = page_offset(pl[0]);
4102
4103 if (io_off + io_len > i_size)
4104 io_len = i_size - io_off;
4105
4106 /*
4107 * Iterate over list of pages and read each page individually.
4108 */
4109 page_idx = 0;
4110 cur_pp = pl[0];
4111 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4112 caddr_t va;
4113
4114 va = kmap(cur_pp);
4115 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4116 DMU_READ_PREFETCH);
4117 kunmap(cur_pp);
4118 if (err) {
4119 /* convert checksum errors into IO errors */
4120 if (err == ECKSUM)
4121 err = SET_ERROR(EIO);
4122 return (err);
4123 }
4124 cur_pp = pl[++page_idx];
4125 }
4126
4127 return (0);
4128 }
4129
4130 /*
4131 * Uses zfs_fillpage to read data from the file and fill the pages.
4132 *
4133 * IN: ip - inode of file to get data from.
4134 * pl - list of pages to read
4135 * nr_pages - number of pages to read
4136 *
4137 * RETURN: 0 on success, error code on failure.
4138 *
4139 * Timestamps:
4140 * vp - atime updated
4141 */
4142 /* ARGSUSED */
4143 int
4144 zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages)
4145 {
4146 znode_t *zp = ITOZ(ip);
4147 zfs_sb_t *zsb = ITOZSB(ip);
4148 int err;
4149
4150 if (pl == NULL)
4151 return (0);
4152
4153 ZFS_ENTER(zsb);
4154 ZFS_VERIFY_ZP(zp);
4155
4156 err = zfs_fillpage(ip, pl, nr_pages);
4157
4158 if (!err)
4159 ZFS_ACCESSTIME_STAMP(zsb, zp);
4160
4161 ZFS_EXIT(zsb);
4162 return (err);
4163 }
4164 EXPORT_SYMBOL(zfs_getpage);
4165
4166 /*
4167 * Check ZFS specific permissions to memory map a section of a file.
4168 *
4169 * IN: ip - inode of the file to mmap
4170 * off - file offset
4171 * addrp - start address in memory region
4172 * len - length of memory region
4173 * vm_flags- address flags
4174 *
4175 * RETURN: 0 if success
4176 * error code if failure
4177 */
4178 /*ARGSUSED*/
4179 int
4180 zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
4181 unsigned long vm_flags)
4182 {
4183 znode_t *zp = ITOZ(ip);
4184 zfs_sb_t *zsb = ITOZSB(ip);
4185
4186 ZFS_ENTER(zsb);
4187 ZFS_VERIFY_ZP(zp);
4188
4189 if ((vm_flags & VM_WRITE) && (zp->z_pflags &
4190 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4191 ZFS_EXIT(zsb);
4192 return (SET_ERROR(EPERM));
4193 }
4194
4195 if ((vm_flags & (VM_READ | VM_EXEC)) &&
4196 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4197 ZFS_EXIT(zsb);
4198 return (SET_ERROR(EACCES));
4199 }
4200
4201 if (off < 0 || len > MAXOFFSET_T - off) {
4202 ZFS_EXIT(zsb);
4203 return (SET_ERROR(ENXIO));
4204 }
4205
4206 ZFS_EXIT(zsb);
4207 return (0);
4208 }
4209 EXPORT_SYMBOL(zfs_map);
4210
4211 /*
4212 * convoff - converts the given data (start, whence) to the
4213 * given whence.
4214 */
4215 int
4216 convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
4217 {
4218 vattr_t vap;
4219 int error;
4220
4221 if ((lckdat->l_whence == 2) || (whence == 2)) {
4222 if ((error = zfs_getattr(ip, &vap, 0, CRED()) != 0))
4223 return (error);
4224 }
4225
4226 switch (lckdat->l_whence) {
4227 case 1:
4228 lckdat->l_start += offset;
4229 break;
4230 case 2:
4231 lckdat->l_start += vap.va_size;
4232 /* FALLTHRU */
4233 case 0:
4234 break;
4235 default:
4236 return (SET_ERROR(EINVAL));
4237 }
4238
4239 if (lckdat->l_start < 0)
4240 return (SET_ERROR(EINVAL));
4241
4242 switch (whence) {
4243 case 1:
4244 lckdat->l_start -= offset;
4245 break;
4246 case 2:
4247 lckdat->l_start -= vap.va_size;
4248 /* FALLTHRU */
4249 case 0:
4250 break;
4251 default:
4252 return (SET_ERROR(EINVAL));
4253 }
4254
4255 lckdat->l_whence = (short)whence;
4256 return (0);
4257 }
4258
4259 /*
4260 * Free or allocate space in a file. Currently, this function only
4261 * supports the `F_FREESP' command. However, this command is somewhat
4262 * misnamed, as its functionality includes the ability to allocate as
4263 * well as free space.
4264 *
4265 * IN: ip - inode of file to free data in.
4266 * cmd - action to take (only F_FREESP supported).
4267 * bfp - section of file to free/alloc.
4268 * flag - current file open mode flags.
4269 * offset - current file offset.
4270 * cr - credentials of caller [UNUSED].
4271 *
4272 * RETURN: 0 on success, error code on failure.
4273 *
4274 * Timestamps:
4275 * ip - ctime|mtime updated
4276 */
4277 /* ARGSUSED */
4278 int
4279 zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
4280 offset_t offset, cred_t *cr)
4281 {
4282 znode_t *zp = ITOZ(ip);
4283 zfs_sb_t *zsb = ITOZSB(ip);
4284 uint64_t off, len;
4285 int error;
4286
4287 ZFS_ENTER(zsb);
4288 ZFS_VERIFY_ZP(zp);
4289
4290 if (cmd != F_FREESP) {
4291 ZFS_EXIT(zsb);
4292 return (SET_ERROR(EINVAL));
4293 }
4294
4295 if ((error = convoff(ip, bfp, 0, offset))) {
4296 ZFS_EXIT(zsb);
4297 return (error);
4298 }
4299
4300 if (bfp->l_len < 0) {
4301 ZFS_EXIT(zsb);
4302 return (SET_ERROR(EINVAL));
4303 }
4304
4305 /*
4306 * Permissions aren't checked on Solaris because on this OS
4307 * zfs_space() can only be called with an opened file handle.
4308 * On Linux we can get here through truncate_range() which
4309 * operates directly on inodes, so we need to check access rights.
4310 */
4311 if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
4312 ZFS_EXIT(zsb);
4313 return (error);
4314 }
4315
4316 off = bfp->l_start;
4317 len = bfp->l_len; /* 0 means from off to end of file */
4318
4319 error = zfs_freesp(zp, off, len, flag, TRUE);
4320
4321 ZFS_EXIT(zsb);
4322 return (error);
4323 }
4324 EXPORT_SYMBOL(zfs_space);
4325
4326 /*ARGSUSED*/
4327 int
4328 zfs_fid(struct inode *ip, fid_t *fidp)
4329 {
4330 znode_t *zp = ITOZ(ip);
4331 zfs_sb_t *zsb = ITOZSB(ip);
4332 uint32_t gen;
4333 uint64_t gen64;
4334 uint64_t object = zp->z_id;
4335 zfid_short_t *zfid;
4336 int size, i, error;
4337
4338 ZFS_ENTER(zsb);
4339 ZFS_VERIFY_ZP(zp);
4340
4341 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zsb),
4342 &gen64, sizeof (uint64_t))) != 0) {
4343 ZFS_EXIT(zsb);
4344 return (error);
4345 }
4346
4347 gen = (uint32_t)gen64;
4348
4349 size = (zsb->z_parent != zsb) ? LONG_FID_LEN : SHORT_FID_LEN;
4350 if (fidp->fid_len < size) {
4351 fidp->fid_len = size;
4352 ZFS_EXIT(zsb);
4353 return (SET_ERROR(ENOSPC));
4354 }
4355
4356 zfid = (zfid_short_t *)fidp;
4357
4358 zfid->zf_len = size;
4359
4360 for (i = 0; i < sizeof (zfid->zf_object); i++)
4361 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4362
4363 /* Must have a non-zero generation number to distinguish from .zfs */
4364 if (gen == 0)
4365 gen = 1;
4366 for (i = 0; i < sizeof (zfid->zf_gen); i++)
4367 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4368
4369 if (size == LONG_FID_LEN) {
4370 uint64_t objsetid = dmu_objset_id(zsb->z_os);
4371 zfid_long_t *zlfid;
4372
4373 zlfid = (zfid_long_t *)fidp;
4374
4375 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
4376 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
4377
4378 /* XXX - this should be the generation number for the objset */
4379 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
4380 zlfid->zf_setgen[i] = 0;
4381 }
4382
4383 ZFS_EXIT(zsb);
4384 return (0);
4385 }
4386 EXPORT_SYMBOL(zfs_fid);
4387
4388 /*ARGSUSED*/
4389 int
4390 zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
4391 {
4392 znode_t *zp = ITOZ(ip);
4393 zfs_sb_t *zsb = ITOZSB(ip);
4394 int error;
4395 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4396
4397 ZFS_ENTER(zsb);
4398 ZFS_VERIFY_ZP(zp);
4399 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
4400 ZFS_EXIT(zsb);
4401
4402 return (error);
4403 }
4404 EXPORT_SYMBOL(zfs_getsecattr);
4405
4406 /*ARGSUSED*/
4407 int
4408 zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
4409 {
4410 znode_t *zp = ITOZ(ip);
4411 zfs_sb_t *zsb = ITOZSB(ip);
4412 int error;
4413 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4414 zilog_t *zilog = zsb->z_log;
4415
4416 ZFS_ENTER(zsb);
4417 ZFS_VERIFY_ZP(zp);
4418
4419 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
4420
4421 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
4422 zil_commit(zilog, 0);
4423
4424 ZFS_EXIT(zsb);
4425 return (error);
4426 }
4427 EXPORT_SYMBOL(zfs_setsecattr);
4428
4429 #ifdef HAVE_UIO_ZEROCOPY
4430 /*
4431 * Tunable, both must be a power of 2.
4432 *
4433 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
4434 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
4435 * an arcbuf for a partial block read
4436 */
4437 int zcr_blksz_min = (1 << 10); /* 1K */
4438 int zcr_blksz_max = (1 << 17); /* 128K */
4439
4440 /*ARGSUSED*/
4441 static int
4442 zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
4443 {
4444 znode_t *zp = ITOZ(ip);
4445 zfs_sb_t *zsb = ITOZSB(ip);
4446 int max_blksz = zsb->z_max_blksz;
4447 uio_t *uio = &xuio->xu_uio;
4448 ssize_t size = uio->uio_resid;
4449 offset_t offset = uio->uio_loffset;
4450 int blksz;
4451 int fullblk, i;
4452 arc_buf_t *abuf;
4453 ssize_t maxsize;
4454 int preamble, postamble;
4455
4456 if (xuio->xu_type != UIOTYPE_ZEROCOPY)
4457 return (SET_ERROR(EINVAL));
4458
4459 ZFS_ENTER(zsb);
4460 ZFS_VERIFY_ZP(zp);
4461 switch (ioflag) {
4462 case UIO_WRITE:
4463 /*
4464 * Loan out an arc_buf for write if write size is bigger than
4465 * max_blksz, and the file's block size is also max_blksz.
4466 */
4467 blksz = max_blksz;
4468 if (size < blksz || zp->z_blksz != blksz) {
4469 ZFS_EXIT(zsb);
4470 return (SET_ERROR(EINVAL));
4471 }
4472 /*
4473 * Caller requests buffers for write before knowing where the
4474 * write offset might be (e.g. NFS TCP write).
4475 */
4476 if (offset == -1) {
4477 preamble = 0;
4478 } else {
4479 preamble = P2PHASE(offset, blksz);
4480 if (preamble) {
4481 preamble = blksz - preamble;
4482 size -= preamble;
4483 }
4484 }
4485
4486 postamble = P2PHASE(size, blksz);
4487 size -= postamble;
4488
4489 fullblk = size / blksz;
4490 (void) dmu_xuio_init(xuio,
4491 (preamble != 0) + fullblk + (postamble != 0));
4492
4493 /*
4494 * Have to fix iov base/len for partial buffers. They
4495 * currently represent full arc_buf's.
4496 */
4497 if (preamble) {
4498 /* data begins in the middle of the arc_buf */
4499 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4500 blksz);
4501 ASSERT(abuf);
4502 (void) dmu_xuio_add(xuio, abuf,
4503 blksz - preamble, preamble);
4504 }
4505
4506 for (i = 0; i < fullblk; i++) {
4507 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4508 blksz);
4509 ASSERT(abuf);
4510 (void) dmu_xuio_add(xuio, abuf, 0, blksz);
4511 }
4512
4513 if (postamble) {
4514 /* data ends in the middle of the arc_buf */
4515 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4516 blksz);
4517 ASSERT(abuf);
4518 (void) dmu_xuio_add(xuio, abuf, 0, postamble);
4519 }
4520 break;
4521 case UIO_READ:
4522 /*
4523 * Loan out an arc_buf for read if the read size is larger than
4524 * the current file block size. Block alignment is not
4525 * considered. Partial arc_buf will be loaned out for read.
4526 */
4527 blksz = zp->z_blksz;
4528 if (blksz < zcr_blksz_min)
4529 blksz = zcr_blksz_min;
4530 if (blksz > zcr_blksz_max)
4531 blksz = zcr_blksz_max;
4532 /* avoid potential complexity of dealing with it */
4533 if (blksz > max_blksz) {
4534 ZFS_EXIT(zsb);
4535 return (SET_ERROR(EINVAL));
4536 }
4537
4538 maxsize = zp->z_size - uio->uio_loffset;
4539 if (size > maxsize)
4540 size = maxsize;
4541
4542 if (size < blksz) {
4543 ZFS_EXIT(zsb);
4544 return (SET_ERROR(EINVAL));
4545 }
4546 break;
4547 default:
4548 ZFS_EXIT(zsb);
4549 return (SET_ERROR(EINVAL));
4550 }
4551
4552 uio->uio_extflg = UIO_XUIO;
4553 XUIO_XUZC_RW(xuio) = ioflag;
4554 ZFS_EXIT(zsb);
4555 return (0);
4556 }
4557
4558 /*ARGSUSED*/
4559 static int
4560 zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
4561 {
4562 int i;
4563 arc_buf_t *abuf;
4564 int ioflag = XUIO_XUZC_RW(xuio);
4565
4566 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
4567
4568 i = dmu_xuio_cnt(xuio);
4569 while (i-- > 0) {
4570 abuf = dmu_xuio_arcbuf(xuio, i);
4571 /*
4572 * if abuf == NULL, it must be a write buffer
4573 * that has been returned in zfs_write().
4574 */
4575 if (abuf)
4576 dmu_return_arcbuf(abuf);
4577 ASSERT(abuf || ioflag == UIO_WRITE);
4578 }
4579
4580 dmu_xuio_fini(xuio);
4581 return (0);
4582 }
4583 #endif /* HAVE_UIO_ZEROCOPY */
4584
4585 #if defined(_KERNEL) && defined(HAVE_SPL)
4586 module_param(zfs_read_chunk_size, long, 0644);
4587 MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");
4588 #endif