]> git.proxmox.com Git - mirror_zfs-debian.git/blob - module/zfs/zfs_dir.c
1fcc69fd12e645a6f88d5684d2e7f37cba774376
[mirror_zfs-debian.git] / module / zfs / zfs_dir.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, 2016 by Delphix. All rights reserved.
25 * Copyright 2017 Nexenta Systems, Inc.
26 */
27
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/time.h>
31 #include <sys/systm.h>
32 #include <sys/sysmacros.h>
33 #include <sys/resource.h>
34 #include <sys/vfs.h>
35 #include <sys/vnode.h>
36 #include <sys/file.h>
37 #include <sys/mode.h>
38 #include <sys/kmem.h>
39 #include <sys/uio.h>
40 #include <sys/pathname.h>
41 #include <sys/cmn_err.h>
42 #include <sys/errno.h>
43 #include <sys/stat.h>
44 #include <sys/unistd.h>
45 #include <sys/sunddi.h>
46 #include <sys/random.h>
47 #include <sys/policy.h>
48 #include <sys/zfs_dir.h>
49 #include <sys/zfs_acl.h>
50 #include <sys/zfs_vnops.h>
51 #include <sys/fs/zfs.h>
52 #include "fs/fs_subr.h"
53 #include <sys/zap.h>
54 #include <sys/dmu.h>
55 #include <sys/atomic.h>
56 #include <sys/zfs_ctldir.h>
57 #include <sys/zfs_fuid.h>
58 #include <sys/sa.h>
59 #include <sys/zfs_sa.h>
60 #include <sys/dnlc.h>
61 #include <sys/extdirent.h>
62
63 /*
64 * zfs_match_find() is used by zfs_dirent_lock() to peform zap lookups
65 * of names after deciding which is the appropriate lookup interface.
66 */
67 static int
68 zfs_match_find(zfsvfs_t *zfsvfs, znode_t *dzp, char *name, matchtype_t mt,
69 boolean_t update, int *deflags, pathname_t *rpnp, uint64_t *zoid)
70 {
71 boolean_t conflict = B_FALSE;
72 int error;
73
74 if (zfsvfs->z_norm) {
75 size_t bufsz = 0;
76 char *buf = NULL;
77
78 if (rpnp) {
79 buf = rpnp->pn_buf;
80 bufsz = rpnp->pn_bufsize;
81 }
82
83 /*
84 * In the non-mixed case we only expect there would ever
85 * be one match, but we need to use the normalizing lookup.
86 */
87 error = zap_lookup_norm(zfsvfs->z_os, dzp->z_id, name, 8, 1,
88 zoid, mt, buf, bufsz, &conflict);
89 } else {
90 error = zap_lookup(zfsvfs->z_os, dzp->z_id, name, 8, 1, zoid);
91 }
92
93 /*
94 * Allow multiple entries provided the first entry is
95 * the object id. Non-zpl consumers may safely make
96 * use of the additional space.
97 *
98 * XXX: This should be a feature flag for compatibility
99 */
100 if (error == EOVERFLOW)
101 error = 0;
102
103 if (zfsvfs->z_norm && !error && deflags)
104 *deflags = conflict ? ED_CASE_CONFLICT : 0;
105
106 *zoid = ZFS_DIRENT_OBJ(*zoid);
107
108 #ifdef HAVE_DNLC
109 if (error == ENOENT && update)
110 dnlc_update(ZTOI(dzp), name, DNLC_NO_VNODE);
111 #endif /* HAVE_DNLC */
112
113 return (error);
114 }
115
116 /*
117 * Lock a directory entry. A dirlock on <dzp, name> protects that name
118 * in dzp's directory zap object. As long as you hold a dirlock, you can
119 * assume two things: (1) dzp cannot be reaped, and (2) no other thread
120 * can change the zap entry for (i.e. link or unlink) this name.
121 *
122 * Input arguments:
123 * dzp - znode for directory
124 * name - name of entry to lock
125 * flag - ZNEW: if the entry already exists, fail with EEXIST.
126 * ZEXISTS: if the entry does not exist, fail with ENOENT.
127 * ZSHARED: allow concurrent access with other ZSHARED callers.
128 * ZXATTR: we want dzp's xattr directory
129 * ZCILOOK: On a mixed sensitivity file system,
130 * this lookup should be case-insensitive.
131 * ZCIEXACT: On a purely case-insensitive file system,
132 * this lookup should be case-sensitive.
133 * ZRENAMING: we are locking for renaming, force narrow locks
134 * ZHAVELOCK: Don't grab the z_name_lock for this call. The
135 * current thread already holds it.
136 *
137 * Output arguments:
138 * zpp - pointer to the znode for the entry (NULL if there isn't one)
139 * dlpp - pointer to the dirlock for this entry (NULL on error)
140 * direntflags - (case-insensitive lookup only)
141 * flags if multiple case-sensitive matches exist in directory
142 * realpnp - (case-insensitive lookup only)
143 * actual name matched within the directory
144 *
145 * Return value: 0 on success or errno on failure.
146 *
147 * NOTE: Always checks for, and rejects, '.' and '..'.
148 * NOTE: For case-insensitive file systems we take wide locks (see below),
149 * but return znode pointers to a single match.
150 */
151 int
152 zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
153 int flag, int *direntflags, pathname_t *realpnp)
154 {
155 zfsvfs_t *zfsvfs = ZTOZSB(dzp);
156 zfs_dirlock_t *dl;
157 boolean_t update;
158 matchtype_t mt = 0;
159 uint64_t zoid;
160 #ifdef HAVE_DNLC
161 vnode_t *vp = NULL;
162 #endif /* HAVE_DNLC */
163 int error = 0;
164 int cmpflags;
165
166 *zpp = NULL;
167 *dlpp = NULL;
168
169 /*
170 * Verify that we are not trying to lock '.', '..', or '.zfs'
171 */
172 if ((name[0] == '.' &&
173 (name[1] == '\0' || (name[1] == '.' && name[2] == '\0'))) ||
174 (zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0))
175 return (SET_ERROR(EEXIST));
176
177 /*
178 * Case sensitivity and normalization preferences are set when
179 * the file system is created. These are stored in the
180 * zfsvfs->z_case and zfsvfs->z_norm fields. These choices
181 * affect what vnodes can be cached in the DNLC, how we
182 * perform zap lookups, and the "width" of our dirlocks.
183 *
184 * A normal dirlock locks a single name. Note that with
185 * normalization a name can be composed multiple ways, but
186 * when normalized, these names all compare equal. A wide
187 * dirlock locks multiple names. We need these when the file
188 * system is supporting mixed-mode access. It is sometimes
189 * necessary to lock all case permutations of file name at
190 * once so that simultaneous case-insensitive/case-sensitive
191 * behaves as rationally as possible.
192 */
193
194 /*
195 * When matching we may need to normalize & change case according to
196 * FS settings.
197 *
198 * Note that a normalized match is necessary for a case insensitive
199 * filesystem when the lookup request is not exact because normalization
200 * can fold case independent of normalizing code point sequences.
201 *
202 * See the table above zfs_dropname().
203 */
204 if (zfsvfs->z_norm != 0) {
205 mt = MT_NORMALIZE;
206
207 /*
208 * Determine if the match needs to honor the case specified in
209 * lookup, and if so keep track of that so that during
210 * normalization we don't fold case.
211 */
212 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE &&
213 (flag & ZCIEXACT)) ||
214 (zfsvfs->z_case == ZFS_CASE_MIXED && !(flag & ZCILOOK))) {
215 mt |= MT_MATCH_CASE;
216 }
217 }
218
219 /*
220 * Only look in or update the DNLC if we are looking for the
221 * name on a file system that does not require normalization
222 * or case folding. We can also look there if we happen to be
223 * on a non-normalizing, mixed sensitivity file system IF we
224 * are looking for the exact name.
225 *
226 * Maybe can add TO-UPPERed version of name to dnlc in ci-only
227 * case for performance improvement?
228 */
229 update = !zfsvfs->z_norm ||
230 (zfsvfs->z_case == ZFS_CASE_MIXED &&
231 !(zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER) && !(flag & ZCILOOK));
232
233 /*
234 * ZRENAMING indicates we are in a situation where we should
235 * take narrow locks regardless of the file system's
236 * preferences for normalizing and case folding. This will
237 * prevent us deadlocking trying to grab the same wide lock
238 * twice if the two names happen to be case-insensitive
239 * matches.
240 */
241 if (flag & ZRENAMING)
242 cmpflags = 0;
243 else
244 cmpflags = zfsvfs->z_norm;
245
246 /*
247 * Wait until there are no locks on this name.
248 *
249 * Don't grab the the lock if it is already held. However, cannot
250 * have both ZSHARED and ZHAVELOCK together.
251 */
252 ASSERT(!(flag & ZSHARED) || !(flag & ZHAVELOCK));
253 if (!(flag & ZHAVELOCK))
254 rw_enter(&dzp->z_name_lock, RW_READER);
255
256 mutex_enter(&dzp->z_lock);
257 for (;;) {
258 if (dzp->z_unlinked && !(flag & ZXATTR)) {
259 mutex_exit(&dzp->z_lock);
260 if (!(flag & ZHAVELOCK))
261 rw_exit(&dzp->z_name_lock);
262 return (SET_ERROR(ENOENT));
263 }
264 for (dl = dzp->z_dirlocks; dl != NULL; dl = dl->dl_next) {
265 if ((u8_strcmp(name, dl->dl_name, 0, cmpflags,
266 U8_UNICODE_LATEST, &error) == 0) || error != 0)
267 break;
268 }
269 if (error != 0) {
270 mutex_exit(&dzp->z_lock);
271 if (!(flag & ZHAVELOCK))
272 rw_exit(&dzp->z_name_lock);
273 return (SET_ERROR(ENOENT));
274 }
275 if (dl == NULL) {
276 /*
277 * Allocate a new dirlock and add it to the list.
278 */
279 dl = kmem_alloc(sizeof (zfs_dirlock_t), KM_SLEEP);
280 cv_init(&dl->dl_cv, NULL, CV_DEFAULT, NULL);
281 dl->dl_name = name;
282 dl->dl_sharecnt = 0;
283 dl->dl_namelock = 0;
284 dl->dl_namesize = 0;
285 dl->dl_dzp = dzp;
286 dl->dl_next = dzp->z_dirlocks;
287 dzp->z_dirlocks = dl;
288 break;
289 }
290 if ((flag & ZSHARED) && dl->dl_sharecnt != 0)
291 break;
292 cv_wait(&dl->dl_cv, &dzp->z_lock);
293 }
294
295 /*
296 * If the z_name_lock was NOT held for this dirlock record it.
297 */
298 if (flag & ZHAVELOCK)
299 dl->dl_namelock = 1;
300
301 if ((flag & ZSHARED) && ++dl->dl_sharecnt > 1 && dl->dl_namesize == 0) {
302 /*
303 * We're the second shared reference to dl. Make a copy of
304 * dl_name in case the first thread goes away before we do.
305 * Note that we initialize the new name before storing its
306 * pointer into dl_name, because the first thread may load
307 * dl->dl_name at any time. It'll either see the old value,
308 * which belongs to it, or the new shared copy; either is OK.
309 */
310 dl->dl_namesize = strlen(dl->dl_name) + 1;
311 name = kmem_alloc(dl->dl_namesize, KM_SLEEP);
312 bcopy(dl->dl_name, name, dl->dl_namesize);
313 dl->dl_name = name;
314 }
315
316 mutex_exit(&dzp->z_lock);
317
318 /*
319 * We have a dirlock on the name. (Note that it is the dirlock,
320 * not the dzp's z_lock, that protects the name in the zap object.)
321 * See if there's an object by this name; if so, put a hold on it.
322 */
323 if (flag & ZXATTR) {
324 error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &zoid,
325 sizeof (zoid));
326 if (error == 0)
327 error = (zoid == 0 ? SET_ERROR(ENOENT) : 0);
328 } else {
329 #ifdef HAVE_DNLC
330 if (update)
331 vp = dnlc_lookup(ZTOI(dzp), name);
332 if (vp == DNLC_NO_VNODE) {
333 iput(vp);
334 error = SET_ERROR(ENOENT);
335 } else if (vp) {
336 if (flag & ZNEW) {
337 zfs_dirent_unlock(dl);
338 iput(vp);
339 return (SET_ERROR(EEXIST));
340 }
341 *dlpp = dl;
342 *zpp = VTOZ(vp);
343 return (0);
344 } else {
345 error = zfs_match_find(zfsvfs, dzp, name, mt,
346 update, direntflags, realpnp, &zoid);
347 }
348 #else
349 error = zfs_match_find(zfsvfs, dzp, name, mt,
350 update, direntflags, realpnp, &zoid);
351 #endif /* HAVE_DNLC */
352 }
353 if (error) {
354 if (error != ENOENT || (flag & ZEXISTS)) {
355 zfs_dirent_unlock(dl);
356 return (error);
357 }
358 } else {
359 if (flag & ZNEW) {
360 zfs_dirent_unlock(dl);
361 return (SET_ERROR(EEXIST));
362 }
363 error = zfs_zget(zfsvfs, zoid, zpp);
364 if (error) {
365 zfs_dirent_unlock(dl);
366 return (error);
367 }
368 #ifdef HAVE_DNLC
369 if (!(flag & ZXATTR) && update)
370 dnlc_update(ZTOI(dzp), name, ZTOI(*zpp));
371 #endif /* HAVE_DNLC */
372 }
373
374 *dlpp = dl;
375
376 return (0);
377 }
378
379 /*
380 * Unlock this directory entry and wake anyone who was waiting for it.
381 */
382 void
383 zfs_dirent_unlock(zfs_dirlock_t *dl)
384 {
385 znode_t *dzp = dl->dl_dzp;
386 zfs_dirlock_t **prev_dl, *cur_dl;
387
388 mutex_enter(&dzp->z_lock);
389
390 if (!dl->dl_namelock)
391 rw_exit(&dzp->z_name_lock);
392
393 if (dl->dl_sharecnt > 1) {
394 dl->dl_sharecnt--;
395 mutex_exit(&dzp->z_lock);
396 return;
397 }
398 prev_dl = &dzp->z_dirlocks;
399 while ((cur_dl = *prev_dl) != dl)
400 prev_dl = &cur_dl->dl_next;
401 *prev_dl = dl->dl_next;
402 cv_broadcast(&dl->dl_cv);
403 mutex_exit(&dzp->z_lock);
404
405 if (dl->dl_namesize != 0)
406 kmem_free(dl->dl_name, dl->dl_namesize);
407 cv_destroy(&dl->dl_cv);
408 kmem_free(dl, sizeof (*dl));
409 }
410
411 /*
412 * Look up an entry in a directory.
413 *
414 * NOTE: '.' and '..' are handled as special cases because
415 * no directory entries are actually stored for them. If this is
416 * the root of a filesystem, then '.zfs' is also treated as a
417 * special pseudo-directory.
418 */
419 int
420 zfs_dirlook(znode_t *dzp, char *name, struct inode **ipp, int flags,
421 int *deflg, pathname_t *rpnp)
422 {
423 zfs_dirlock_t *dl;
424 znode_t *zp;
425 int error = 0;
426 uint64_t parent;
427
428 if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
429 *ipp = ZTOI(dzp);
430 igrab(*ipp);
431 } else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
432 zfsvfs_t *zfsvfs = ZTOZSB(dzp);
433
434 /*
435 * If we are a snapshot mounted under .zfs, return
436 * the inode pointer for the snapshot directory.
437 */
438 if ((error = sa_lookup(dzp->z_sa_hdl,
439 SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0)
440 return (error);
441
442 if (parent == dzp->z_id && zfsvfs->z_parent != zfsvfs) {
443 error = zfsctl_root_lookup(zfsvfs->z_parent->z_ctldir,
444 "snapshot", ipp, 0, kcred, NULL, NULL);
445 return (error);
446 }
447 rw_enter(&dzp->z_parent_lock, RW_READER);
448 error = zfs_zget(zfsvfs, parent, &zp);
449 if (error == 0)
450 *ipp = ZTOI(zp);
451 rw_exit(&dzp->z_parent_lock);
452 } else if (zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0) {
453 *ipp = zfsctl_root(dzp);
454 } else {
455 int zf;
456
457 zf = ZEXISTS | ZSHARED;
458 if (flags & FIGNORECASE)
459 zf |= ZCILOOK;
460
461 error = zfs_dirent_lock(&dl, dzp, name, &zp, zf, deflg, rpnp);
462 if (error == 0) {
463 *ipp = ZTOI(zp);
464 zfs_dirent_unlock(dl);
465 dzp->z_zn_prefetch = B_TRUE; /* enable prefetching */
466 }
467 rpnp = NULL;
468 }
469
470 if ((flags & FIGNORECASE) && rpnp && !error)
471 (void) strlcpy(rpnp->pn_buf, name, rpnp->pn_bufsize);
472
473 return (error);
474 }
475
476 /*
477 * unlinked Set (formerly known as the "delete queue") Error Handling
478 *
479 * When dealing with the unlinked set, we dmu_tx_hold_zap(), but we
480 * don't specify the name of the entry that we will be manipulating. We
481 * also fib and say that we won't be adding any new entries to the
482 * unlinked set, even though we might (this is to lower the minimum file
483 * size that can be deleted in a full filesystem). So on the small
484 * chance that the nlink list is using a fat zap (ie. has more than
485 * 2000 entries), we *may* not pre-read a block that's needed.
486 * Therefore it is remotely possible for some of the assertions
487 * regarding the unlinked set below to fail due to i/o error. On a
488 * nondebug system, this will result in the space being leaked.
489 */
490 void
491 zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
492 {
493 zfsvfs_t *zfsvfs = ZTOZSB(zp);
494
495 ASSERT(zp->z_unlinked);
496 ASSERT(ZTOI(zp)->i_nlink == 0);
497
498 VERIFY3U(0, ==,
499 zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
500 }
501
502 /*
503 * Clean up any znodes that had no links when we either crashed or
504 * (force) umounted the file system.
505 */
506 void
507 zfs_unlinked_drain(zfsvfs_t *zfsvfs)
508 {
509 zap_cursor_t zc;
510 zap_attribute_t zap;
511 dmu_object_info_t doi;
512 znode_t *zp;
513 int error;
514
515 /*
516 * Iterate over the contents of the unlinked set.
517 */
518 for (zap_cursor_init(&zc, zfsvfs->z_os, zfsvfs->z_unlinkedobj);
519 zap_cursor_retrieve(&zc, &zap) == 0;
520 zap_cursor_advance(&zc)) {
521
522 /*
523 * See what kind of object we have in list
524 */
525
526 error = dmu_object_info(zfsvfs->z_os,
527 zap.za_first_integer, &doi);
528 if (error != 0)
529 continue;
530
531 ASSERT((doi.doi_type == DMU_OT_PLAIN_FILE_CONTENTS) ||
532 (doi.doi_type == DMU_OT_DIRECTORY_CONTENTS));
533 /*
534 * We need to re-mark these list entries for deletion,
535 * so we pull them back into core and set zp->z_unlinked.
536 */
537 error = zfs_zget(zfsvfs, zap.za_first_integer, &zp);
538
539 /*
540 * We may pick up znodes that are already marked for deletion.
541 * This could happen during the purge of an extended attribute
542 * directory. All we need to do is skip over them, since they
543 * are already in the system marked z_unlinked.
544 */
545 if (error != 0)
546 continue;
547
548 zp->z_unlinked = B_TRUE;
549 iput(ZTOI(zp));
550 }
551 zap_cursor_fini(&zc);
552 }
553
554 /*
555 * Delete the entire contents of a directory. Return a count
556 * of the number of entries that could not be deleted. If we encounter
557 * an error, return a count of at least one so that the directory stays
558 * in the unlinked set.
559 *
560 * NOTE: this function assumes that the directory is inactive,
561 * so there is no need to lock its entries before deletion.
562 * Also, it assumes the directory contents is *only* regular
563 * files.
564 */
565 static int
566 zfs_purgedir(znode_t *dzp)
567 {
568 zap_cursor_t zc;
569 zap_attribute_t zap;
570 znode_t *xzp;
571 dmu_tx_t *tx;
572 zfsvfs_t *zfsvfs = ZTOZSB(dzp);
573 zfs_dirlock_t dl;
574 int skipped = 0;
575 int error;
576
577 for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id);
578 (error = zap_cursor_retrieve(&zc, &zap)) == 0;
579 zap_cursor_advance(&zc)) {
580 error = zfs_zget(zfsvfs,
581 ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp);
582 if (error) {
583 skipped += 1;
584 continue;
585 }
586
587 ASSERT(S_ISREG(ZTOI(xzp)->i_mode) ||
588 S_ISLNK(ZTOI(xzp)->i_mode));
589
590 tx = dmu_tx_create(zfsvfs->z_os);
591 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
592 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name);
593 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
594 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
595 /* Is this really needed ? */
596 zfs_sa_upgrade_txholds(tx, xzp);
597 dmu_tx_mark_netfree(tx);
598 error = dmu_tx_assign(tx, TXG_WAIT);
599 if (error) {
600 dmu_tx_abort(tx);
601 zfs_iput_async(ZTOI(xzp));
602 skipped += 1;
603 continue;
604 }
605 bzero(&dl, sizeof (dl));
606 dl.dl_dzp = dzp;
607 dl.dl_name = zap.za_name;
608
609 error = zfs_link_destroy(&dl, xzp, tx, 0, NULL);
610 if (error)
611 skipped += 1;
612 dmu_tx_commit(tx);
613
614 zfs_iput_async(ZTOI(xzp));
615 }
616 zap_cursor_fini(&zc);
617 if (error != ENOENT)
618 skipped += 1;
619 return (skipped);
620 }
621
622 void
623 zfs_rmnode(znode_t *zp)
624 {
625 zfsvfs_t *zfsvfs = ZTOZSB(zp);
626 objset_t *os = zfsvfs->z_os;
627 znode_t *xzp = NULL;
628 dmu_tx_t *tx;
629 uint64_t acl_obj;
630 uint64_t xattr_obj;
631 uint64_t links;
632 int error;
633
634 ASSERT(ZTOI(zp)->i_nlink == 0);
635 ASSERT(atomic_read(&ZTOI(zp)->i_count) == 0);
636
637 /*
638 * If this is an attribute directory, purge its contents.
639 */
640 if (S_ISDIR(ZTOI(zp)->i_mode) && (zp->z_pflags & ZFS_XATTR)) {
641 if (zfs_purgedir(zp) != 0) {
642 /*
643 * Not enough space to delete some xattrs.
644 * Leave it in the unlinked set.
645 */
646 zfs_znode_dmu_fini(zp);
647
648 return;
649 }
650 }
651
652 /*
653 * Free up all the data in the file. We don't do this for directories
654 * because we need truncate and remove to be in the same tx, like in
655 * zfs_znode_delete(). Otherwise, if we crash here we'll end up with
656 * an inconsistent truncated zap object in the delete queue. Note a
657 * truncated file is harmless since it only contains user data.
658 */
659 if (S_ISREG(ZTOI(zp)->i_mode)) {
660 error = dmu_free_long_range(os, zp->z_id, 0, DMU_OBJECT_END);
661 if (error) {
662 /*
663 * Not enough space or we were interrupted by unmount.
664 * Leave the file in the unlinked set.
665 */
666 zfs_znode_dmu_fini(zp);
667 return;
668 }
669 }
670
671 /*
672 * If the file has extended attributes, we're going to unlink
673 * the xattr dir.
674 */
675 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
676 &xattr_obj, sizeof (xattr_obj));
677 if (error == 0 && xattr_obj) {
678 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
679 ASSERT(error == 0);
680 }
681
682 acl_obj = zfs_external_acl(zp);
683
684 /*
685 * Set up the final transaction.
686 */
687 tx = dmu_tx_create(os);
688 dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END);
689 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
690 if (xzp) {
691 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, TRUE, NULL);
692 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
693 }
694 if (acl_obj)
695 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
696
697 zfs_sa_upgrade_txholds(tx, zp);
698 error = dmu_tx_assign(tx, TXG_WAIT);
699 if (error) {
700 /*
701 * Not enough space to delete the file. Leave it in the
702 * unlinked set, leaking it until the fs is remounted (at
703 * which point we'll call zfs_unlinked_drain() to process it).
704 */
705 dmu_tx_abort(tx);
706 zfs_znode_dmu_fini(zp);
707 goto out;
708 }
709
710 if (xzp) {
711 ASSERT(error == 0);
712 mutex_enter(&xzp->z_lock);
713 xzp->z_unlinked = B_TRUE; /* mark xzp for deletion */
714 clear_nlink(ZTOI(xzp)); /* no more links to it */
715 links = 0;
716 VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
717 &links, sizeof (links), tx));
718 mutex_exit(&xzp->z_lock);
719 zfs_unlinked_add(xzp, tx);
720 }
721
722 /* Remove this znode from the unlinked set */
723 VERIFY3U(0, ==,
724 zap_remove_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
725
726 zfs_znode_delete(zp, tx);
727
728 dmu_tx_commit(tx);
729 out:
730 if (xzp)
731 zfs_iput_async(ZTOI(xzp));
732 }
733
734 static uint64_t
735 zfs_dirent(znode_t *zp, uint64_t mode)
736 {
737 uint64_t de = zp->z_id;
738
739 if (ZTOZSB(zp)->z_version >= ZPL_VERSION_DIRENT_TYPE)
740 de |= IFTODT(mode) << 60;
741 return (de);
742 }
743
744 /*
745 * Link zp into dl. Can only fail if zp has been unlinked.
746 */
747 int
748 zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
749 {
750 znode_t *dzp = dl->dl_dzp;
751 zfsvfs_t *zfsvfs = ZTOZSB(zp);
752 uint64_t value;
753 int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode);
754 sa_bulk_attr_t bulk[5];
755 uint64_t mtime[2], ctime[2];
756 uint64_t links;
757 int count = 0;
758 int error;
759
760 mutex_enter(&zp->z_lock);
761
762 if (!(flag & ZRENAMING)) {
763 if (zp->z_unlinked) { /* no new links to unlinked zp */
764 ASSERT(!(flag & (ZNEW | ZEXISTS)));
765 mutex_exit(&zp->z_lock);
766 return (SET_ERROR(ENOENT));
767 }
768 if (!(flag & ZNEW)) {
769 /*
770 * ZNEW nodes come from zfs_mknode() where the link
771 * count has already been initialised
772 */
773 inc_nlink(ZTOI(zp));
774 links = ZTOI(zp)->i_nlink;
775 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
776 NULL, &links, sizeof (links));
777 }
778 }
779 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
780 &dzp->z_id, sizeof (dzp->z_id));
781 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
782 &zp->z_pflags, sizeof (zp->z_pflags));
783
784 if (!(flag & ZNEW)) {
785 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
786 ctime, sizeof (ctime));
787 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
788 ctime);
789 }
790 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
791 ASSERT(error == 0);
792
793 mutex_exit(&zp->z_lock);
794
795 mutex_enter(&dzp->z_lock);
796 dzp->z_size++;
797 if (zp_is_dir)
798 inc_nlink(ZTOI(dzp));
799 links = ZTOI(dzp)->i_nlink;
800 count = 0;
801 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
802 &dzp->z_size, sizeof (dzp->z_size));
803 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
804 &links, sizeof (links));
805 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
806 mtime, sizeof (mtime));
807 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
808 ctime, sizeof (ctime));
809 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
810 &dzp->z_pflags, sizeof (dzp->z_pflags));
811 zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
812 error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
813 ASSERT(error == 0);
814 mutex_exit(&dzp->z_lock);
815
816 value = zfs_dirent(zp, zp->z_mode);
817 error = zap_add(ZTOZSB(zp)->z_os, dzp->z_id, dl->dl_name,
818 8, 1, &value, tx);
819 ASSERT(error == 0);
820
821 return (0);
822 }
823
824 /*
825 * The match type in the code for this function should conform to:
826 *
827 * ------------------------------------------------------------------------
828 * fs type | z_norm | lookup type | match type
829 * ---------|-------------|-------------|----------------------------------
830 * CS !norm | 0 | 0 | 0 (exact)
831 * CS norm | formX | 0 | MT_NORMALIZE
832 * CI !norm | upper | !ZCIEXACT | MT_NORMALIZE
833 * CI !norm | upper | ZCIEXACT | MT_NORMALIZE | MT_MATCH_CASE
834 * CI norm | upper|formX | !ZCIEXACT | MT_NORMALIZE
835 * CI norm | upper|formX | ZCIEXACT | MT_NORMALIZE | MT_MATCH_CASE
836 * CM !norm | upper | !ZCILOOK | MT_NORMALIZE | MT_MATCH_CASE
837 * CM !norm | upper | ZCILOOK | MT_NORMALIZE
838 * CM norm | upper|formX | !ZCILOOK | MT_NORMALIZE | MT_MATCH_CASE
839 * CM norm | upper|formX | ZCILOOK | MT_NORMALIZE
840 *
841 * Abbreviations:
842 * CS = Case Sensitive, CI = Case Insensitive, CM = Case Mixed
843 * upper = case folding set by fs type on creation (U8_TEXTPREP_TOUPPER)
844 * formX = unicode normalization form set on fs creation
845 */
846 static int
847 zfs_dropname(zfs_dirlock_t *dl, znode_t *zp, znode_t *dzp, dmu_tx_t *tx,
848 int flag)
849 {
850 int error;
851
852 if (ZTOZSB(zp)->z_norm) {
853 matchtype_t mt = MT_NORMALIZE;
854
855 if ((ZTOZSB(zp)->z_case == ZFS_CASE_INSENSITIVE &&
856 (flag & ZCIEXACT)) ||
857 (ZTOZSB(zp)->z_case == ZFS_CASE_MIXED &&
858 !(flag & ZCILOOK))) {
859 mt |= MT_MATCH_CASE;
860 }
861
862 error = zap_remove_norm(ZTOZSB(zp)->z_os, dzp->z_id,
863 dl->dl_name, mt, tx);
864 } else {
865 error = zap_remove(ZTOZSB(zp)->z_os, dzp->z_id, dl->dl_name,
866 tx);
867 }
868
869 return (error);
870 }
871
872 /*
873 * Unlink zp from dl, and mark zp for deletion if this was the last link. Can
874 * fail if zp is a mount point (EBUSY) or a non-empty directory (ENOTEMPTY).
875 * If 'unlinkedp' is NULL, we put unlinked znodes on the unlinked list.
876 * If it's non-NULL, we use it to indicate whether the znode needs deletion,
877 * and it's the caller's job to do it.
878 */
879 int
880 zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
881 boolean_t *unlinkedp)
882 {
883 znode_t *dzp = dl->dl_dzp;
884 zfsvfs_t *zfsvfs = ZTOZSB(dzp);
885 int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode);
886 boolean_t unlinked = B_FALSE;
887 sa_bulk_attr_t bulk[5];
888 uint64_t mtime[2], ctime[2];
889 uint64_t links;
890 int count = 0;
891 int error;
892
893 #ifdef HAVE_DNLC
894 dnlc_remove(ZTOI(dzp), dl->dl_name);
895 #endif /* HAVE_DNLC */
896
897 if (!(flag & ZRENAMING)) {
898 mutex_enter(&zp->z_lock);
899
900 if (zp_is_dir && !zfs_dirempty(zp)) {
901 mutex_exit(&zp->z_lock);
902 return (SET_ERROR(ENOTEMPTY));
903 }
904
905 /*
906 * If we get here, we are going to try to remove the object.
907 * First try removing the name from the directory; if that
908 * fails, return the error.
909 */
910 error = zfs_dropname(dl, zp, dzp, tx, flag);
911 if (error != 0) {
912 mutex_exit(&zp->z_lock);
913 return (error);
914 }
915
916 if (ZTOI(zp)->i_nlink <= zp_is_dir) {
917 zfs_panic_recover("zfs: link count on %lu is %u, "
918 "should be at least %u", zp->z_id,
919 (int)ZTOI(zp)->i_nlink, zp_is_dir + 1);
920 set_nlink(ZTOI(zp), zp_is_dir + 1);
921 }
922 drop_nlink(ZTOI(zp));
923 if (ZTOI(zp)->i_nlink == zp_is_dir) {
924 zp->z_unlinked = B_TRUE;
925 clear_nlink(ZTOI(zp));
926 unlinked = B_TRUE;
927 } else {
928 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
929 NULL, &ctime, sizeof (ctime));
930 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
931 NULL, &zp->z_pflags, sizeof (zp->z_pflags));
932 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
933 ctime);
934 }
935 links = ZTOI(zp)->i_nlink;
936 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
937 NULL, &links, sizeof (links));
938 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
939 count = 0;
940 ASSERT(error == 0);
941 mutex_exit(&zp->z_lock);
942 } else {
943 error = zfs_dropname(dl, zp, dzp, tx, flag);
944 if (error != 0)
945 return (error);
946 }
947
948 mutex_enter(&dzp->z_lock);
949 dzp->z_size--; /* one dirent removed */
950 if (zp_is_dir)
951 drop_nlink(ZTOI(dzp)); /* ".." link from zp */
952 links = ZTOI(dzp)->i_nlink;
953 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
954 NULL, &links, sizeof (links));
955 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
956 NULL, &dzp->z_size, sizeof (dzp->z_size));
957 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
958 NULL, ctime, sizeof (ctime));
959 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
960 NULL, mtime, sizeof (mtime));
961 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
962 NULL, &dzp->z_pflags, sizeof (dzp->z_pflags));
963 zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
964 error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
965 ASSERT(error == 0);
966 mutex_exit(&dzp->z_lock);
967
968 if (unlinkedp != NULL)
969 *unlinkedp = unlinked;
970 else if (unlinked)
971 zfs_unlinked_add(zp, tx);
972
973 return (0);
974 }
975
976 /*
977 * Indicate whether the directory is empty. Works with or without z_lock
978 * held, but can only be consider a hint in the latter case. Returns true
979 * if only "." and ".." remain and there's no work in progress.
980 */
981 boolean_t
982 zfs_dirempty(znode_t *dzp)
983 {
984 return (dzp->z_size == 2 && dzp->z_dirlocks == 0);
985 }
986
987 int
988 zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr)
989 {
990 zfsvfs_t *zfsvfs = ZTOZSB(zp);
991 znode_t *xzp;
992 dmu_tx_t *tx;
993 int error;
994 zfs_acl_ids_t acl_ids;
995 boolean_t fuid_dirtied;
996 #ifdef DEBUG
997 uint64_t parent;
998 #endif
999
1000 *xipp = NULL;
1001
1002 if ((error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr)))
1003 return (error);
1004
1005 if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL,
1006 &acl_ids)) != 0)
1007 return (error);
1008 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1009 zfs_acl_ids_free(&acl_ids);
1010 return (SET_ERROR(EDQUOT));
1011 }
1012
1013 tx = dmu_tx_create(zfsvfs->z_os);
1014 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1015 ZFS_SA_BASE_ATTR_SIZE);
1016 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1017 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1018 fuid_dirtied = zfsvfs->z_fuid_dirty;
1019 if (fuid_dirtied)
1020 zfs_fuid_txhold(zfsvfs, tx);
1021 error = dmu_tx_assign(tx, TXG_WAIT);
1022 if (error) {
1023 zfs_acl_ids_free(&acl_ids);
1024 dmu_tx_abort(tx);
1025 return (error);
1026 }
1027 zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids);
1028
1029 if (fuid_dirtied)
1030 zfs_fuid_sync(zfsvfs, tx);
1031
1032 #ifdef DEBUG
1033 error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
1034 &parent, sizeof (parent));
1035 ASSERT(error == 0 && parent == zp->z_id);
1036 #endif
1037
1038 VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
1039 sizeof (xzp->z_id), tx));
1040
1041 if (!zp->z_unlinked)
1042 (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp,
1043 xzp, "", NULL, acl_ids.z_fuidp, vap);
1044
1045 zfs_acl_ids_free(&acl_ids);
1046 dmu_tx_commit(tx);
1047
1048 *xipp = ZTOI(xzp);
1049
1050 return (0);
1051 }
1052
1053 /*
1054 * Return a znode for the extended attribute directory for zp.
1055 * ** If the directory does not already exist, it is created **
1056 *
1057 * IN: zp - znode to obtain attribute directory from
1058 * cr - credentials of caller
1059 * flags - flags from the VOP_LOOKUP call
1060 *
1061 * OUT: xipp - pointer to extended attribute znode
1062 *
1063 * RETURN: 0 on success
1064 * error number on failure
1065 */
1066 int
1067 zfs_get_xattrdir(znode_t *zp, struct inode **xipp, cred_t *cr, int flags)
1068 {
1069 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1070 znode_t *xzp;
1071 zfs_dirlock_t *dl;
1072 vattr_t va;
1073 int error;
1074 top:
1075 error = zfs_dirent_lock(&dl, zp, "", &xzp, ZXATTR, NULL, NULL);
1076 if (error)
1077 return (error);
1078
1079 if (xzp != NULL) {
1080 *xipp = ZTOI(xzp);
1081 zfs_dirent_unlock(dl);
1082 return (0);
1083 }
1084
1085 if (!(flags & CREATE_XATTR_DIR)) {
1086 zfs_dirent_unlock(dl);
1087 return (SET_ERROR(ENOENT));
1088 }
1089
1090 if (zfs_is_readonly(zfsvfs)) {
1091 zfs_dirent_unlock(dl);
1092 return (SET_ERROR(EROFS));
1093 }
1094
1095 /*
1096 * The ability to 'create' files in an attribute
1097 * directory comes from the write_xattr permission on the base file.
1098 *
1099 * The ability to 'search' an attribute directory requires
1100 * read_xattr permission on the base file.
1101 *
1102 * Once in a directory the ability to read/write attributes
1103 * is controlled by the permissions on the attribute file.
1104 */
1105 va.va_mask = ATTR_MODE | ATTR_UID | ATTR_GID;
1106 va.va_mode = S_IFDIR | S_ISVTX | 0777;
1107 zfs_fuid_map_ids(zp, cr, &va.va_uid, &va.va_gid);
1108
1109 va.va_dentry = NULL;
1110 error = zfs_make_xattrdir(zp, &va, xipp, cr);
1111 zfs_dirent_unlock(dl);
1112
1113 if (error == ERESTART) {
1114 /* NB: we already did dmu_tx_wait() if necessary */
1115 goto top;
1116 }
1117
1118 return (error);
1119 }
1120
1121 /*
1122 * Decide whether it is okay to remove within a sticky directory.
1123 *
1124 * In sticky directories, write access is not sufficient;
1125 * you can remove entries from a directory only if:
1126 *
1127 * you own the directory,
1128 * you own the entry,
1129 * the entry is a plain file and you have write access,
1130 * or you are privileged (checked in secpolicy...).
1131 *
1132 * The function returns 0 if remove access is granted.
1133 */
1134 int
1135 zfs_sticky_remove_access(znode_t *zdp, znode_t *zp, cred_t *cr)
1136 {
1137 uid_t uid;
1138 uid_t downer;
1139 uid_t fowner;
1140 zfsvfs_t *zfsvfs = ZTOZSB(zdp);
1141
1142 if (zfsvfs->z_replay)
1143 return (0);
1144
1145 if ((zdp->z_mode & S_ISVTX) == 0)
1146 return (0);
1147
1148 downer = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(ZTOI(zdp)->i_uid),
1149 cr, ZFS_OWNER);
1150 fowner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(ZTOI(zp)->i_uid),
1151 cr, ZFS_OWNER);
1152
1153 if ((uid = crgetuid(cr)) == downer || uid == fowner ||
1154 (S_ISDIR(ZTOI(zp)->i_mode) &&
1155 zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr) == 0))
1156 return (0);
1157 else
1158 return (secpolicy_vnode_remove(cr));
1159 }