]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/xfs/xfs_iops.c
xfs: convert to SPDX license tags
[mirror_ubuntu-jammy-kernel.git] / fs / xfs / xfs_iops.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
7b718769
NS
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
1da177e4 5 */
1da177e4
LT
6#include "xfs.h"
7#include "xfs_fs.h"
70a9883c 8#include "xfs_shared.h"
239880ef
DC
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
1da177e4 12#include "xfs_mount.h"
57062787 13#include "xfs_da_format.h"
1da177e4
LT
14#include "xfs_inode.h"
15#include "xfs_bmap.h"
68988114 16#include "xfs_bmap_util.h"
239880ef 17#include "xfs_acl.h"
239880ef 18#include "xfs_quota.h"
1da177e4 19#include "xfs_error.h"
1da177e4 20#include "xfs_attr.h"
239880ef 21#include "xfs_trans.h"
0b1b213f 22#include "xfs_trace.h"
27b52867 23#include "xfs_icache.h"
c24b5dfa 24#include "xfs_symlink.h"
0cb97766 25#include "xfs_da_btree.h"
1b767ee3 26#include "xfs_dir2.h"
99b6436b 27#include "xfs_trans_space.h"
781355c6 28#include "xfs_pnfs.h"
68a9f5e7 29#include "xfs_iomap.h"
1da177e4 30
16f7e0fe 31#include <linux/capability.h>
1da177e4 32#include <linux/xattr.h>
ef14f0c1 33#include <linux/posix_acl.h>
446ada4a 34#include <linux/security.h>
d2bb140e 35#include <linux/iomap.h>
5a0e3ad6 36#include <linux/slab.h>
c3b1b131 37#include <linux/iversion.h>
1da177e4 38
93a8614e
DC
39/*
40 * Directories have different lock order w.r.t. mmap_sem compared to regular
41 * files. This is due to readdir potentially triggering page faults on a user
42 * buffer inside filldir(), and this happens with the ilock on the directory
43 * held. For regular files, the lock order is the other way around - the
44 * mmap_sem is taken during the page fault, and then we lock the ilock to do
45 * block mapping. Hence we need a different class for the directory ilock so
46 * that lockdep can tell them apart.
47 */
48static struct lock_class_key xfs_nondir_ilock_class;
49static struct lock_class_key xfs_dir_ilock_class;
50
8d2a5e6e
DC
51static int
52xfs_initxattrs(
53 struct inode *inode,
54 const struct xattr *xattr_array,
55 void *fs_info)
9d8f13ba 56{
8d2a5e6e
DC
57 const struct xattr *xattr;
58 struct xfs_inode *ip = XFS_I(inode);
59 int error = 0;
9d8f13ba
MZ
60
61 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
2451337d 62 error = xfs_attr_set(ip, xattr->name, xattr->value,
a5a14de2 63 xattr->value_len, ATTR_SECURE);
9d8f13ba
MZ
64 if (error < 0)
65 break;
66 }
67 return error;
68}
69
446ada4a
NS
70/*
71 * Hook in SELinux. This is not quite correct yet, what we really need
72 * here (as we do for default ACLs) is a mechanism by which creation of
73 * these attrs can be journalled at inode creation time (along with the
74 * inode, of course, such that log replay can't cause these to be lost).
75 */
9d8f13ba 76
446ada4a 77STATIC int
416c6d5b 78xfs_init_security(
af048193 79 struct inode *inode,
2a7dba39
EP
80 struct inode *dir,
81 const struct qstr *qstr)
446ada4a 82{
2451337d 83 return security_inode_init_security(inode, dir, qstr,
a5a14de2 84 &xfs_initxattrs, NULL);
446ada4a
NS
85}
86
556b8b16
BN
87static void
88xfs_dentry_to_name(
fab8eef8
AG
89 struct xfs_name *namep,
90 struct dentry *dentry)
91{
92 namep->name = dentry->d_name.name;
93 namep->len = dentry->d_name.len;
94 namep->type = XFS_DIR3_FT_UNKNOWN;
95}
96
97static int
98xfs_dentry_mode_to_name(
556b8b16 99 struct xfs_name *namep,
0cb97766
DC
100 struct dentry *dentry,
101 int mode)
556b8b16
BN
102{
103 namep->name = dentry->d_name.name;
104 namep->len = dentry->d_name.len;
1fc4d33f 105 namep->type = xfs_mode_to_ftype(mode);
fab8eef8
AG
106
107 if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
108 return -EFSCORRUPTED;
109
110 return 0;
556b8b16
BN
111}
112
7989cb8e 113STATIC void
416c6d5b 114xfs_cleanup_inode(
739bfb2a 115 struct inode *dir,
af048193 116 struct inode *inode,
8f112e3b 117 struct dentry *dentry)
3a69c7dc 118{
556b8b16 119 struct xfs_name teardown;
3a69c7dc
YL
120
121 /* Oh, the horror.
220b5284 122 * If we can't add the ACL or we fail in
416c6d5b 123 * xfs_init_security we must back out.
3a69c7dc
YL
124 * ENOSPC can hit here, among other things.
125 */
fab8eef8 126 xfs_dentry_to_name(&teardown, dentry);
3a69c7dc 127
8f112e3b 128 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
3a69c7dc
YL
129}
130
1da177e4 131STATIC int
d540e43b 132xfs_generic_create(
1da177e4
LT
133 struct inode *dir,
134 struct dentry *dentry,
1a67aafb 135 umode_t mode,
d540e43b
BF
136 dev_t rdev,
137 bool tmpfile) /* unnamed file */
1da177e4 138{
db0bb7ba 139 struct inode *inode;
979ebab1 140 struct xfs_inode *ip = NULL;
2401dc29 141 struct posix_acl *default_acl, *acl;
556b8b16 142 struct xfs_name name;
1da177e4
LT
143 int error;
144
145 /*
146 * Irix uses Missed'em'V split, but doesn't want to see
147 * the upper 5 bits of (14bit) major.
148 */
517b5e8c
CH
149 if (S_ISCHR(mode) || S_ISBLK(mode)) {
150 if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
151 return -EINVAL;
517b5e8c
CH
152 } else {
153 rdev = 0;
154 }
1da177e4 155
2401dc29
CH
156 error = posix_acl_create(dir, &mode, &default_acl, &acl);
157 if (error)
158 return error;
1da177e4 159
fab8eef8
AG
160 /* Verify mode is valid also for tmpfile case */
161 error = xfs_dentry_mode_to_name(&name, dentry, mode);
162 if (unlikely(error))
163 goto out_free_acl;
164
d540e43b 165 if (!tmpfile) {
d540e43b
BF
166 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
167 } else {
a1f69417 168 error = xfs_create_tmpfile(XFS_I(dir), mode, &ip);
d540e43b 169 }
db0bb7ba
CH
170 if (unlikely(error))
171 goto out_free_acl;
446ada4a 172
01651646 173 inode = VFS_I(ip);
979ebab1 174
2a7dba39 175 error = xfs_init_security(inode, dir, &dentry->d_name);
db0bb7ba
CH
176 if (unlikely(error))
177 goto out_cleanup_inode;
178
2401dc29 179#ifdef CONFIG_XFS_POSIX_ACL
db0bb7ba 180 if (default_acl) {
8ba35875 181 error = __xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
2401dc29 182 if (error)
db0bb7ba 183 goto out_cleanup_inode;
1da177e4 184 }
2401dc29 185 if (acl) {
8ba35875 186 error = __xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
2401dc29
CH
187 if (error)
188 goto out_cleanup_inode;
189 }
190#endif
1da177e4 191
2b3d1d41
CH
192 xfs_setup_iops(ip);
193
d540e43b
BF
194 if (tmpfile)
195 d_tmpfile(dentry, inode);
196 else
197 d_instantiate(dentry, inode);
198
58c90473
DC
199 xfs_finish_inode_setup(ip);
200
2401dc29
CH
201 out_free_acl:
202 if (default_acl)
203 posix_acl_release(default_acl);
204 if (acl)
205 posix_acl_release(acl);
2451337d 206 return error;
db0bb7ba
CH
207
208 out_cleanup_inode:
58c90473 209 xfs_finish_inode_setup(ip);
d540e43b
BF
210 if (!tmpfile)
211 xfs_cleanup_inode(dir, inode, dentry);
212 iput(inode);
2401dc29 213 goto out_free_acl;
1da177e4
LT
214}
215
d540e43b
BF
216STATIC int
217xfs_vn_mknod(
218 struct inode *dir,
219 struct dentry *dentry,
220 umode_t mode,
221 dev_t rdev)
222{
223 return xfs_generic_create(dir, dentry, mode, rdev, false);
224}
225
1da177e4 226STATIC int
416c6d5b 227xfs_vn_create(
1da177e4
LT
228 struct inode *dir,
229 struct dentry *dentry,
4acdaf27 230 umode_t mode,
ebfc3b49 231 bool flags)
1da177e4 232{
416c6d5b 233 return xfs_vn_mknod(dir, dentry, mode, 0);
1da177e4
LT
234}
235
236STATIC int
416c6d5b 237xfs_vn_mkdir(
1da177e4
LT
238 struct inode *dir,
239 struct dentry *dentry,
18bb1db3 240 umode_t mode)
1da177e4 241{
416c6d5b 242 return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0);
1da177e4
LT
243}
244
245STATIC struct dentry *
416c6d5b 246xfs_vn_lookup(
1da177e4
LT
247 struct inode *dir,
248 struct dentry *dentry,
00cd8dd3 249 unsigned int flags)
1da177e4 250{
ef1f5e7a 251 struct xfs_inode *cip;
556b8b16 252 struct xfs_name name;
1da177e4
LT
253 int error;
254
255 if (dentry->d_name.len >= MAXNAMELEN)
256 return ERR_PTR(-ENAMETOOLONG);
257
fab8eef8 258 xfs_dentry_to_name(&name, dentry);
384f3ced 259 error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
67fcaa73 260 if (unlikely(error)) {
2451337d
DC
261 if (unlikely(error != -ENOENT))
262 return ERR_PTR(error);
1da177e4
LT
263 d_add(dentry, NULL);
264 return NULL;
265 }
266
01651646 267 return d_splice_alias(VFS_I(cip), dentry);
1da177e4
LT
268}
269
384f3ced
BN
270STATIC struct dentry *
271xfs_vn_ci_lookup(
272 struct inode *dir,
273 struct dentry *dentry,
00cd8dd3 274 unsigned int flags)
384f3ced
BN
275{
276 struct xfs_inode *ip;
277 struct xfs_name xname;
278 struct xfs_name ci_name;
279 struct qstr dname;
280 int error;
281
282 if (dentry->d_name.len >= MAXNAMELEN)
283 return ERR_PTR(-ENAMETOOLONG);
284
fab8eef8 285 xfs_dentry_to_name(&xname, dentry);
384f3ced
BN
286 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
287 if (unlikely(error)) {
2451337d
DC
288 if (unlikely(error != -ENOENT))
289 return ERR_PTR(error);
866d5dc9
BN
290 /*
291 * call d_add(dentry, NULL) here when d_drop_negative_children
292 * is called in xfs_vn_mknod (ie. allow negative dentries
293 * with CI filesystems).
294 */
384f3ced
BN
295 return NULL;
296 }
297
298 /* if exact match, just splice and exit */
299 if (!ci_name.name)
01651646 300 return d_splice_alias(VFS_I(ip), dentry);
384f3ced
BN
301
302 /* else case-insensitive match... */
303 dname.name = ci_name.name;
304 dname.len = ci_name.len;
e45b590b 305 dentry = d_add_ci(dentry, VFS_I(ip), &dname);
384f3ced
BN
306 kmem_free(ci_name.name);
307 return dentry;
308}
309
1da177e4 310STATIC int
416c6d5b 311xfs_vn_link(
1da177e4
LT
312 struct dentry *old_dentry,
313 struct inode *dir,
314 struct dentry *dentry)
315{
2b0143b5 316 struct inode *inode = d_inode(old_dentry);
556b8b16 317 struct xfs_name name;
1da177e4
LT
318 int error;
319
fab8eef8
AG
320 error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
321 if (unlikely(error))
322 return error;
1da177e4 323
556b8b16 324 error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
d9424b3c 325 if (unlikely(error))
2451337d 326 return error;
a3da7896 327
7de9c6ee 328 ihold(inode);
a3da7896
CH
329 d_instantiate(dentry, inode);
330 return 0;
1da177e4
LT
331}
332
333STATIC int
416c6d5b 334xfs_vn_unlink(
1da177e4
LT
335 struct inode *dir,
336 struct dentry *dentry)
337{
556b8b16 338 struct xfs_name name;
1da177e4
LT
339 int error;
340
fab8eef8 341 xfs_dentry_to_name(&name, dentry);
1da177e4 342
2b0143b5 343 error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
e5700704
CH
344 if (error)
345 return error;
346
347 /*
348 * With unlink, the VFS makes the dentry "negative": no inode,
349 * but still hashed. This is incompatible with case-insensitive
350 * mode, so invalidate (unhash) the dentry in CI-mode.
351 */
352 if (xfs_sb_version_hasasciici(&XFS_M(dir->i_sb)->m_sb))
353 d_invalidate(dentry);
354 return 0;
1da177e4
LT
355}
356
357STATIC int
416c6d5b 358xfs_vn_symlink(
1da177e4
LT
359 struct inode *dir,
360 struct dentry *dentry,
361 const char *symname)
362{
3937be5b
CH
363 struct inode *inode;
364 struct xfs_inode *cip = NULL;
556b8b16 365 struct xfs_name name;
1da177e4 366 int error;
576b1d67 367 umode_t mode;
1da177e4 368
3e5daf05 369 mode = S_IFLNK |
ce3b0f8d 370 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
fab8eef8
AG
371 error = xfs_dentry_mode_to_name(&name, dentry, mode);
372 if (unlikely(error))
373 goto out;
1da177e4 374
6c77b0ea 375 error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
3937be5b
CH
376 if (unlikely(error))
377 goto out;
378
01651646 379 inode = VFS_I(cip);
3937be5b 380
2a7dba39 381 error = xfs_init_security(inode, dir, &dentry->d_name);
3937be5b
CH
382 if (unlikely(error))
383 goto out_cleanup_inode;
384
2b3d1d41
CH
385 xfs_setup_iops(cip);
386
3937be5b 387 d_instantiate(dentry, inode);
58c90473 388 xfs_finish_inode_setup(cip);
3937be5b
CH
389 return 0;
390
391 out_cleanup_inode:
58c90473 392 xfs_finish_inode_setup(cip);
8f112e3b 393 xfs_cleanup_inode(dir, inode, dentry);
d540e43b 394 iput(inode);
3937be5b 395 out:
2451337d 396 return error;
1da177e4
LT
397}
398
1da177e4 399STATIC int
416c6d5b 400xfs_vn_rename(
1da177e4
LT
401 struct inode *odir,
402 struct dentry *odentry,
403 struct inode *ndir,
dbe1b5ca
CM
404 struct dentry *ndentry,
405 unsigned int flags)
1da177e4 406{
2b0143b5 407 struct inode *new_inode = d_inode(ndentry);
d31a1825 408 int omode = 0;
fab8eef8 409 int error;
556b8b16
BN
410 struct xfs_name oname;
411 struct xfs_name nname;
1da177e4 412
7dcf5c3e 413 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
dbe1b5ca
CM
414 return -EINVAL;
415
d31a1825
CM
416 /* if we are exchanging files, we need to set i_mode of both files */
417 if (flags & RENAME_EXCHANGE)
2b0143b5 418 omode = d_inode(ndentry)->i_mode;
d31a1825 419
fab8eef8
AG
420 error = xfs_dentry_mode_to_name(&oname, odentry, omode);
421 if (omode && unlikely(error))
422 return error;
423
424 error = xfs_dentry_mode_to_name(&nname, ndentry,
425 d_inode(odentry)->i_mode);
426 if (unlikely(error))
427 return error;
556b8b16 428
2b0143b5 429 return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
dbe1b5ca 430 XFS_I(ndir), &nname,
d31a1825 431 new_inode ? XFS_I(new_inode) : NULL, flags);
1da177e4
LT
432}
433
434/*
435 * careful here - this function can get called recursively, so
436 * we need to be very careful about how much stack we use.
437 * uio is kmalloced for this reason...
438 */
680baacb 439STATIC const char *
6b255391 440xfs_vn_get_link(
1da177e4 441 struct dentry *dentry,
6b255391 442 struct inode *inode,
fceef393 443 struct delayed_call *done)
1da177e4 444{
1da177e4 445 char *link;
804c83c3 446 int error = -ENOMEM;
1da177e4 447
6b255391
AV
448 if (!dentry)
449 return ERR_PTR(-ECHILD);
450
6eb0b8df 451 link = kmalloc(XFS_SYMLINK_MAXLEN+1, GFP_KERNEL);
804c83c3
CH
452 if (!link)
453 goto out_err;
1da177e4 454
2b0143b5 455 error = xfs_readlink(XFS_I(d_inode(dentry)), link);
804c83c3
CH
456 if (unlikely(error))
457 goto out_kfree;
1da177e4 458
fceef393
AV
459 set_delayed_call(done, kfree_link, link);
460 return link;
804c83c3
CH
461
462 out_kfree:
463 kfree(link);
464 out_err:
680baacb 465 return ERR_PTR(error);
1da177e4
LT
466}
467
30ee052e
CH
468STATIC const char *
469xfs_vn_get_link_inline(
470 struct dentry *dentry,
471 struct inode *inode,
472 struct delayed_call *done)
473{
474 ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
475 return XFS_I(inode)->i_df.if_u1.if_data;
476}
477
1da177e4 478STATIC int
416c6d5b 479xfs_vn_getattr(
a528d35e
DH
480 const struct path *path,
481 struct kstat *stat,
482 u32 request_mask,
483 unsigned int query_flags)
1da177e4 484{
a528d35e 485 struct inode *inode = d_inode(path->dentry);
c43f4087
CH
486 struct xfs_inode *ip = XFS_I(inode);
487 struct xfs_mount *mp = ip->i_mount;
488
cca28fb8 489 trace_xfs_getattr(ip);
c43f4087
CH
490
491 if (XFS_FORCED_SHUTDOWN(mp))
b474c7ae 492 return -EIO;
c43f4087
CH
493
494 stat->size = XFS_ISIZE(ip);
495 stat->dev = inode->i_sb->s_dev;
c19b3b05 496 stat->mode = inode->i_mode;
54d7b5c1 497 stat->nlink = inode->i_nlink;
7aab1b28
DE
498 stat->uid = inode->i_uid;
499 stat->gid = inode->i_gid;
c43f4087 500 stat->ino = ip->i_ino;
c43f4087 501 stat->atime = inode->i_atime;
f9581b14
CH
502 stat->mtime = inode->i_mtime;
503 stat->ctime = inode->i_ctime;
c43f4087
CH
504 stat->blocks =
505 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
506
5f955f26
DW
507 if (ip->i_d.di_version == 3) {
508 if (request_mask & STATX_BTIME) {
509 stat->result_mask |= STATX_BTIME;
510 stat->btime.tv_sec = ip->i_d.di_crtime.t_sec;
511 stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec;
512 }
513 }
514
515 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
516 stat->attributes |= STATX_ATTR_IMMUTABLE;
517 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
518 stat->attributes |= STATX_ATTR_APPEND;
519 if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
520 stat->attributes |= STATX_ATTR_NODUMP;
c43f4087
CH
521
522 switch (inode->i_mode & S_IFMT) {
523 case S_IFBLK:
524 case S_IFCHR:
525 stat->blksize = BLKDEV_IOSIZE;
66f36464 526 stat->rdev = inode->i_rdev;
c43f4087
CH
527 break;
528 default:
71ddabb9 529 if (XFS_IS_REALTIME_INODE(ip)) {
c43f4087
CH
530 /*
531 * If the file blocks are being allocated from a
532 * realtime volume, then return the inode's realtime
533 * extent size or the realtime volume's extent size.
534 */
535 stat->blksize =
536 xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog;
537 } else
538 stat->blksize = xfs_preferred_iosize(mp);
539 stat->rdev = 0;
540 break;
69e23b9a 541 }
c43f4087
CH
542
543 return 0;
1da177e4
LT
544}
545
56c19e89
DC
546static void
547xfs_setattr_mode(
56c19e89
DC
548 struct xfs_inode *ip,
549 struct iattr *iattr)
550{
0c3d88df
CH
551 struct inode *inode = VFS_I(ip);
552 umode_t mode = iattr->ia_mode;
56c19e89 553
56c19e89
DC
554 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
555
56c19e89
DC
556 inode->i_mode &= S_IFMT;
557 inode->i_mode |= mode & ~S_IFMT;
558}
559
52785112 560void
c91c46c1
CH
561xfs_setattr_time(
562 struct xfs_inode *ip,
563 struct iattr *iattr)
564{
565 struct inode *inode = VFS_I(ip);
566
567 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
568
3987848c 569 if (iattr->ia_valid & ATTR_ATIME)
c91c46c1 570 inode->i_atime = iattr->ia_atime;
3987848c 571 if (iattr->ia_valid & ATTR_CTIME)
c91c46c1 572 inode->i_ctime = iattr->ia_ctime;
3987848c 573 if (iattr->ia_valid & ATTR_MTIME)
c91c46c1 574 inode->i_mtime = iattr->ia_mtime;
c91c46c1
CH
575}
576
69bca807
JK
577static int
578xfs_vn_change_ok(
579 struct dentry *dentry,
580 struct iattr *iattr)
581{
31051c85 582 struct xfs_mount *mp = XFS_I(d_inode(dentry))->i_mount;
69bca807
JK
583
584 if (mp->m_flags & XFS_MOUNT_RDONLY)
585 return -EROFS;
586
587 if (XFS_FORCED_SHUTDOWN(mp))
588 return -EIO;
589
31051c85 590 return setattr_prepare(dentry, iattr);
69bca807
JK
591}
592
593/*
594 * Set non-size attributes of an inode.
595 *
596 * Caution: The caller of this function is responsible for calling
31051c85 597 * setattr_prepare() or otherwise verifying the change is fine.
69bca807 598 */
c4ed4243
CH
599int
600xfs_setattr_nonsize(
601 struct xfs_inode *ip,
602 struct iattr *iattr,
603 int flags)
604{
605 xfs_mount_t *mp = ip->i_mount;
606 struct inode *inode = VFS_I(ip);
607 int mask = iattr->ia_valid;
608 xfs_trans_t *tp;
609 int error;
7aab1b28
DE
610 kuid_t uid = GLOBAL_ROOT_UID, iuid = GLOBAL_ROOT_UID;
611 kgid_t gid = GLOBAL_ROOT_GID, igid = GLOBAL_ROOT_GID;
c4ed4243
CH
612 struct xfs_dquot *udqp = NULL, *gdqp = NULL;
613 struct xfs_dquot *olddquot1 = NULL, *olddquot2 = NULL;
614
c4ed4243
CH
615 ASSERT((mask & ATTR_SIZE) == 0);
616
617 /*
618 * If disk quotas is on, we make sure that the dquots do exist on disk,
619 * before we start any other transactions. Trying to do this later
620 * is messy. We don't care to take a readlock to look at the ids
621 * in inode here, because we can't hold it across the trans_reserve.
622 * If the IDs do change before we take the ilock, we're covered
623 * because the i_*dquot fields will get updated anyway.
624 */
625 if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {
626 uint qflags = 0;
627
628 if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
629 uid = iattr->ia_uid;
630 qflags |= XFS_QMOPT_UQUOTA;
631 } else {
7aab1b28 632 uid = inode->i_uid;
c4ed4243
CH
633 }
634 if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
635 gid = iattr->ia_gid;
636 qflags |= XFS_QMOPT_GQUOTA;
637 } else {
7aab1b28 638 gid = inode->i_gid;
c4ed4243
CH
639 }
640
641 /*
642 * We take a reference when we initialize udqp and gdqp,
643 * so it is important that we never blindly double trip on
644 * the same variable. See xfs_create() for an example.
645 */
646 ASSERT(udqp == NULL);
647 ASSERT(gdqp == NULL);
7aab1b28
DE
648 error = xfs_qm_vop_dqalloc(ip, xfs_kuid_to_uid(uid),
649 xfs_kgid_to_gid(gid),
650 xfs_get_projid(ip),
651 qflags, &udqp, &gdqp, NULL);
c4ed4243
CH
652 if (error)
653 return error;
654 }
655
253f4911 656 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
c4ed4243 657 if (error)
253f4911 658 goto out_dqrele;
c4ed4243
CH
659
660 xfs_ilock(ip, XFS_ILOCK_EXCL);
253f4911 661 xfs_trans_ijoin(tp, ip, 0);
c4ed4243
CH
662
663 /*
664 * Change file ownership. Must be the owner or privileged.
665 */
666 if (mask & (ATTR_UID|ATTR_GID)) {
667 /*
668 * These IDs could have changed since we last looked at them.
669 * But, we're assured that if the ownership did change
670 * while we didn't have the inode locked, inode's dquot(s)
671 * would have changed also.
672 */
7aab1b28
DE
673 iuid = inode->i_uid;
674 igid = inode->i_gid;
c4ed4243
CH
675 gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
676 uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
677
678 /*
679 * Do a quota reservation only if uid/gid is actually
680 * going to change.
681 */
682 if (XFS_IS_QUOTA_RUNNING(mp) &&
7aab1b28
DE
683 ((XFS_IS_UQUOTA_ON(mp) && !uid_eq(iuid, uid)) ||
684 (XFS_IS_GQUOTA_ON(mp) && !gid_eq(igid, gid)))) {
c4ed4243
CH
685 ASSERT(tp);
686 error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
92f8ff73 687 NULL, capable(CAP_FOWNER) ?
c4ed4243
CH
688 XFS_QMOPT_FORCE_RES : 0);
689 if (error) /* out of quota */
253f4911 690 goto out_cancel;
c4ed4243
CH
691 }
692 }
693
c4ed4243
CH
694 /*
695 * Change file ownership. Must be the owner or privileged.
696 */
697 if (mask & (ATTR_UID|ATTR_GID)) {
698 /*
699 * CAP_FSETID overrides the following restrictions:
700 *
701 * The set-user-ID and set-group-ID bits of a file will be
702 * cleared upon successful return from chown()
703 */
c19b3b05 704 if ((inode->i_mode & (S_ISUID|S_ISGID)) &&
c4ed4243 705 !capable(CAP_FSETID))
c19b3b05 706 inode->i_mode &= ~(S_ISUID|S_ISGID);
c4ed4243
CH
707
708 /*
709 * Change the ownerships and register quota modifications
710 * in the transaction.
711 */
7aab1b28 712 if (!uid_eq(iuid, uid)) {
c4ed4243
CH
713 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
714 ASSERT(mask & ATTR_UID);
715 ASSERT(udqp);
716 olddquot1 = xfs_qm_vop_chown(tp, ip,
717 &ip->i_udquot, udqp);
718 }
7aab1b28 719 ip->i_d.di_uid = xfs_kuid_to_uid(uid);
c4ed4243
CH
720 inode->i_uid = uid;
721 }
7aab1b28 722 if (!gid_eq(igid, gid)) {
c4ed4243 723 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
5a01dd54
JL
724 ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) ||
725 !XFS_IS_PQUOTA_ON(mp));
c4ed4243
CH
726 ASSERT(mask & ATTR_GID);
727 ASSERT(gdqp);
728 olddquot2 = xfs_qm_vop_chown(tp, ip,
729 &ip->i_gdquot, gdqp);
730 }
7aab1b28 731 ip->i_d.di_gid = xfs_kgid_to_gid(gid);
c4ed4243
CH
732 inode->i_gid = gid;
733 }
734 }
735
56c19e89 736 if (mask & ATTR_MODE)
0c3d88df 737 xfs_setattr_mode(ip, iattr);
c91c46c1
CH
738 if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
739 xfs_setattr_time(ip, iattr);
c4ed4243
CH
740
741 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
742
ff6d6af2 743 XFS_STATS_INC(mp, xs_ig_attrchg);
c4ed4243
CH
744
745 if (mp->m_flags & XFS_MOUNT_WSYNC)
746 xfs_trans_set_sync(tp);
70393313 747 error = xfs_trans_commit(tp);
c4ed4243
CH
748
749 xfs_iunlock(ip, XFS_ILOCK_EXCL);
750
751 /*
752 * Release any dquot(s) the inode had kept before chown.
753 */
754 xfs_qm_dqrele(olddquot1);
755 xfs_qm_dqrele(olddquot2);
756 xfs_qm_dqrele(udqp);
757 xfs_qm_dqrele(gdqp);
758
759 if (error)
b474c7ae 760 return error;
c4ed4243
CH
761
762 /*
763 * XXX(hch): Updating the ACL entries is not atomic vs the i_mode
764 * update. We could avoid this with linked transactions
765 * and passing down the transaction pointer all the way
766 * to attr_set. No previous user of the generic
767 * Posix ACL code seems to care about this issue either.
768 */
769 if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) {
2451337d 770 error = posix_acl_chmod(inode, inode->i_mode);
c4ed4243 771 if (error)
b474c7ae 772 return error;
c4ed4243
CH
773 }
774
775 return 0;
776
253f4911 777out_cancel:
4906e215 778 xfs_trans_cancel(tp);
253f4911 779out_dqrele:
c4ed4243
CH
780 xfs_qm_dqrele(udqp);
781 xfs_qm_dqrele(gdqp);
782 return error;
783}
784
69bca807
JK
785int
786xfs_vn_setattr_nonsize(
787 struct dentry *dentry,
788 struct iattr *iattr)
789{
790 struct xfs_inode *ip = XFS_I(d_inode(dentry));
791 int error;
792
793 trace_xfs_setattr(ip);
794
795 error = xfs_vn_change_ok(dentry, iattr);
796 if (error)
797 return error;
798 return xfs_setattr_nonsize(ip, iattr, 0);
799}
800
c4ed4243
CH
801/*
802 * Truncate file. Must have write permission and not be a directory.
69bca807
JK
803 *
804 * Caution: The caller of this function is responsible for calling
31051c85 805 * setattr_prepare() or otherwise verifying the change is fine.
c4ed4243 806 */
7bf7a193 807STATIC int
c4ed4243
CH
808xfs_setattr_size(
809 struct xfs_inode *ip,
76ca4c23 810 struct iattr *iattr)
c4ed4243
CH
811{
812 struct xfs_mount *mp = ip->i_mount;
813 struct inode *inode = VFS_I(ip);
673e8e59 814 xfs_off_t oldsize, newsize;
c4ed4243
CH
815 struct xfs_trans *tp;
816 int error;
f38996f5 817 uint lock_flags = 0;
5885ebda 818 bool did_zeroing = false;
c4ed4243 819
76ca4c23 820 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
e8e9ad42 821 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
c19b3b05 822 ASSERT(S_ISREG(inode->i_mode));
fe60a8a0
CH
823 ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
824 ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
c4ed4243 825
ce7ae151 826 oldsize = inode->i_size;
673e8e59
CH
827 newsize = iattr->ia_size;
828
c4ed4243
CH
829 /*
830 * Short circuit the truncate case for zero length files.
831 */
673e8e59 832 if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {
fe60a8a0 833 if (!(iattr->ia_valid & (ATTR_CTIME|ATTR_MTIME)))
76ca4c23 834 return 0;
681b1200
CH
835
836 /*
837 * Use the regular setattr path to update the timestamps.
838 */
681b1200
CH
839 iattr->ia_valid &= ~ATTR_SIZE;
840 return xfs_setattr_nonsize(ip, iattr, 0);
c4ed4243
CH
841 }
842
843 /*
844 * Make sure that the dquots are attached to the inode.
845 */
c14cfcca 846 error = xfs_qm_dqattach(ip);
c4ed4243 847 if (error)
76ca4c23 848 return error;
c4ed4243 849
f0c6bcba
CH
850 /*
851 * Wait for all direct I/O to complete.
852 */
853 inode_dio_wait(inode);
854
c4ed4243 855 /*
5885ebda
DC
856 * File data changes must be complete before we start the transaction to
857 * modify the inode. This needs to be done before joining the inode to
858 * the transaction because the inode cannot be unlocked once it is a
859 * part of the transaction.
860 *
f0c6bcba
CH
861 * Start with zeroing any data beyond EOF that we may expose on file
862 * extension, or zeroing out the rest of the block on a downward
863 * truncate.
c4ed4243 864 */
673e8e59 865 if (newsize > oldsize) {
f5c54717
CH
866 trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
867 error = iomap_zero_range(inode, oldsize, newsize - oldsize,
868 &did_zeroing, &xfs_iomap_ops);
f0c6bcba 869 } else {
459f0fbc
CH
870 error = iomap_truncate_page(inode, newsize, &did_zeroing,
871 &xfs_iomap_ops);
c4ed4243 872 }
c4ed4243 873
f0c6bcba
CH
874 if (error)
875 return error;
876
49abc3a8 877 /*
0f9160b4
DC
878 * We've already locked out new page faults, so now we can safely remove
879 * pages from the page cache knowing they won't get refaulted until we
880 * drop the XFS_MMAP_EXCL lock after the extent manipulations are
881 * complete. The truncate_setsize() call also cleans partial EOF page
882 * PTEs on extending truncates and hence ensures sub-page block size
883 * filesystems are correctly handled, too.
49abc3a8 884 *
0f9160b4
DC
885 * We have to do all the page cache truncate work outside the
886 * transaction context as the "lock" order is page lock->log space
887 * reservation as defined by extent allocation in the writeback path.
253f4911 888 * Hence a truncate can fail with ENOMEM from xfs_trans_alloc(), but
0f9160b4
DC
889 * having already truncated the in-memory version of the file (i.e. made
890 * user visible changes). There's not much we can do about this, except
891 * to hope that the caller sees ENOMEM and retries the truncate
892 * operation.
350976ae
EG
893 *
894 * And we update in-core i_size and truncate page cache beyond newsize
895 * before writeback the [di_size, newsize] range, so we're guaranteed
896 * not to write stale data past the new EOF on truncate down.
49abc3a8 897 */
49abc3a8 898 truncate_setsize(inode, newsize);
c4ed4243 899
350976ae
EG
900 /*
901 * We are going to log the inode size change in this transaction so
902 * any previous writes that are beyond the on disk EOF and the new
903 * EOF that have not been written out need to be written here. If we
904 * do not write the data out, we expose ourselves to the null files
905 * problem. Note that this includes any block zeroing we did above;
906 * otherwise those blocks may not be zeroed after a crash.
907 */
908 if (did_zeroing ||
909 (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {
910 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
911 ip->i_d.di_size, newsize - 1);
912 if (error)
913 return error;
914 }
915
253f4911 916 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
c4ed4243 917 if (error)
253f4911 918 return error;
c4ed4243 919
c4ed4243 920 lock_flags |= XFS_ILOCK_EXCL;
c4ed4243 921 xfs_ilock(ip, XFS_ILOCK_EXCL);
ddc3415a 922 xfs_trans_ijoin(tp, ip, 0);
c4ed4243
CH
923
924 /*
925 * Only change the c/mtime if we are changing the size or we are
926 * explicitly asked to change it. This handles the semantic difference
927 * between truncate() and ftruncate() as implemented in the VFS.
928 *
929 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
930 * special case where we need to update the times despite not having
931 * these flags set. For all other operations the VFS set these flags
932 * explicitly if it wants a timestamp update.
933 */
fe60a8a0
CH
934 if (newsize != oldsize &&
935 !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
c4ed4243 936 iattr->ia_ctime = iattr->ia_mtime =
c2050a45 937 current_time(inode);
fe60a8a0 938 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
c4ed4243
CH
939 }
940
673e8e59
CH
941 /*
942 * The first thing we do is set the size to new_size permanently on
943 * disk. This way we don't have to worry about anyone ever being able
944 * to look at the data being freed even in the face of a crash.
945 * What we're getting around here is the case where we free a block, it
946 * is allocated to another file, it is written to, and then we crash.
947 * If the new data gets written to the file but the log buffers
948 * containing the free and reallocation don't, then we'd end up with
949 * garbage in the blocks being freed. As long as we make the new size
950 * permanent before actually freeing any blocks it doesn't matter if
951 * they get written to.
952 */
953 ip->i_d.di_size = newsize;
673e8e59
CH
954 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
955
956 if (newsize <= oldsize) {
957 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, newsize);
c4ed4243 958 if (error)
4906e215 959 goto out_trans_cancel;
c4ed4243
CH
960
961 /*
962 * Truncated "down", so we're removing references to old data
963 * here - if we delay flushing for a long time, we expose
964 * ourselves unduly to the notorious NULL files problem. So,
965 * we mark this inode and flush it when the file is closed,
966 * and do not wait the usual (long) time for writeout.
967 */
968 xfs_iflags_set(ip, XFS_ITRUNCATED);
27b52867
BF
969
970 /* A truncate down always removes post-EOF blocks. */
971 xfs_inode_clear_eofblocks_tag(ip);
c4ed4243
CH
972 }
973
fe60a8a0 974 if (iattr->ia_valid & ATTR_MODE)
0c3d88df 975 xfs_setattr_mode(ip, iattr);
fe60a8a0 976 if (iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
c91c46c1 977 xfs_setattr_time(ip, iattr);
c4ed4243
CH
978
979 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
980
ff6d6af2 981 XFS_STATS_INC(mp, xs_ig_attrchg);
c4ed4243
CH
982
983 if (mp->m_flags & XFS_MOUNT_WSYNC)
984 xfs_trans_set_sync(tp);
985
70393313 986 error = xfs_trans_commit(tp);
c4ed4243
CH
987out_unlock:
988 if (lock_flags)
989 xfs_iunlock(ip, lock_flags);
990 return error;
991
c4ed4243 992out_trans_cancel:
4906e215 993 xfs_trans_cancel(tp);
c4ed4243
CH
994 goto out_unlock;
995}
996
69bca807
JK
997int
998xfs_vn_setattr_size(
999 struct dentry *dentry,
1000 struct iattr *iattr)
1001{
1002 struct xfs_inode *ip = XFS_I(d_inode(dentry));
1003 int error;
1004
1005 trace_xfs_setattr(ip);
1006
1007 error = xfs_vn_change_ok(dentry, iattr);
1008 if (error)
1009 return error;
1010 return xfs_setattr_size(ip, iattr);
1011}
1012
1da177e4 1013STATIC int
416c6d5b 1014xfs_vn_setattr(
76ca4c23
CH
1015 struct dentry *dentry,
1016 struct iattr *iattr)
1da177e4 1017{
76ca4c23
CH
1018 int error;
1019
1020 if (iattr->ia_valid & ATTR_SIZE) {
69bca807
JK
1021 struct xfs_inode *ip = XFS_I(d_inode(dentry));
1022 uint iolock = XFS_IOLOCK_EXCL;
781355c6 1023
65523218
CH
1024 error = xfs_break_layouts(d_inode(dentry), &iolock);
1025 if (error)
1026 return error;
e8e9ad42 1027
65523218 1028 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
0c187dc5 1029 error = xfs_vn_setattr_size(dentry, iattr);
65523218 1030 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
76ca4c23 1031 } else {
69bca807 1032 error = xfs_vn_setattr_nonsize(dentry, iattr);
76ca4c23
CH
1033 }
1034
2451337d 1035 return error;
1da177e4
LT
1036}
1037
69ff2826
CH
1038STATIC int
1039xfs_vn_update_time(
1040 struct inode *inode,
1041 struct timespec *now,
1042 int flags)
1043{
1044 struct xfs_inode *ip = XFS_I(inode);
1045 struct xfs_mount *mp = ip->i_mount;
c3b1b131 1046 int log_flags = XFS_ILOG_TIMESTAMP;
69ff2826
CH
1047 struct xfs_trans *tp;
1048 int error;
1049
1050 trace_xfs_update_time(ip);
1051
c3b1b131
CH
1052 if (inode->i_sb->s_flags & SB_LAZYTIME) {
1053 if (!((flags & S_VERSION) &&
1054 inode_maybe_inc_iversion(inode, false)))
1055 return generic_update_time(inode, now, flags);
1056
1057 /* Capture the iversion update that just occurred */
1058 log_flags |= XFS_ILOG_CORE;
1059 }
1060
253f4911
CH
1061 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
1062 if (error)
2451337d 1063 return error;
69ff2826
CH
1064
1065 xfs_ilock(ip, XFS_ILOCK_EXCL);
3987848c 1066 if (flags & S_CTIME)
69ff2826 1067 inode->i_ctime = *now;
3987848c 1068 if (flags & S_MTIME)
69ff2826 1069 inode->i_mtime = *now;
3987848c 1070 if (flags & S_ATIME)
69ff2826 1071 inode->i_atime = *now;
3987848c 1072
69ff2826 1073 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
c3b1b131 1074 xfs_trans_log_inode(tp, ip, log_flags);
70393313 1075 return xfs_trans_commit(tp);
69ff2826
CH
1076}
1077
f35642e2
ES
1078STATIC int
1079xfs_vn_fiemap(
1080 struct inode *inode,
1081 struct fiemap_extent_info *fieinfo,
1082 u64 start,
1083 u64 length)
1084{
f35642e2
ES
1085 int error;
1086
d2bb140e 1087 xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED);
1d4795e7
CH
1088 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1089 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
1090 error = iomap_fiemap(inode, fieinfo, start, length,
1091 &xfs_xattr_iomap_ops);
1092 } else {
1093 error = iomap_fiemap(inode, fieinfo, start, length,
1094 &xfs_iomap_ops);
1095 }
d2bb140e 1096 xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
f35642e2 1097
d2bb140e 1098 return error;
f35642e2
ES
1099}
1100
99b6436b
ZYW
1101STATIC int
1102xfs_vn_tmpfile(
1103 struct inode *dir,
1104 struct dentry *dentry,
1105 umode_t mode)
1106{
d540e43b 1107 return xfs_generic_create(dir, dentry, mode, 0, true);
99b6436b
ZYW
1108}
1109
41be8bed 1110static const struct inode_operations xfs_inode_operations = {
4e34e719 1111 .get_acl = xfs_get_acl,
2401dc29 1112 .set_acl = xfs_set_acl,
416c6d5b
NS
1113 .getattr = xfs_vn_getattr,
1114 .setattr = xfs_vn_setattr,
416c6d5b 1115 .listxattr = xfs_vn_listxattr,
f35642e2 1116 .fiemap = xfs_vn_fiemap,
69ff2826 1117 .update_time = xfs_vn_update_time,
1da177e4
LT
1118};
1119
41be8bed 1120static const struct inode_operations xfs_dir_inode_operations = {
416c6d5b
NS
1121 .create = xfs_vn_create,
1122 .lookup = xfs_vn_lookup,
1123 .link = xfs_vn_link,
1124 .unlink = xfs_vn_unlink,
1125 .symlink = xfs_vn_symlink,
1126 .mkdir = xfs_vn_mkdir,
8f112e3b
CH
1127 /*
1128 * Yes, XFS uses the same method for rmdir and unlink.
1129 *
1130 * There are some subtile differences deeper in the code,
1131 * but we use S_ISDIR to check for those.
1132 */
1133 .rmdir = xfs_vn_unlink,
416c6d5b 1134 .mknod = xfs_vn_mknod,
2773bf00 1135 .rename = xfs_vn_rename,
4e34e719 1136 .get_acl = xfs_get_acl,
2401dc29 1137 .set_acl = xfs_set_acl,
416c6d5b
NS
1138 .getattr = xfs_vn_getattr,
1139 .setattr = xfs_vn_setattr,
416c6d5b 1140 .listxattr = xfs_vn_listxattr,
69ff2826 1141 .update_time = xfs_vn_update_time,
99b6436b 1142 .tmpfile = xfs_vn_tmpfile,
1da177e4
LT
1143};
1144
41be8bed 1145static const struct inode_operations xfs_dir_ci_inode_operations = {
384f3ced
BN
1146 .create = xfs_vn_create,
1147 .lookup = xfs_vn_ci_lookup,
1148 .link = xfs_vn_link,
1149 .unlink = xfs_vn_unlink,
1150 .symlink = xfs_vn_symlink,
1151 .mkdir = xfs_vn_mkdir,
8f112e3b
CH
1152 /*
1153 * Yes, XFS uses the same method for rmdir and unlink.
1154 *
1155 * There are some subtile differences deeper in the code,
1156 * but we use S_ISDIR to check for those.
1157 */
1158 .rmdir = xfs_vn_unlink,
384f3ced 1159 .mknod = xfs_vn_mknod,
2773bf00 1160 .rename = xfs_vn_rename,
4e34e719 1161 .get_acl = xfs_get_acl,
2401dc29 1162 .set_acl = xfs_set_acl,
384f3ced
BN
1163 .getattr = xfs_vn_getattr,
1164 .setattr = xfs_vn_setattr,
384f3ced 1165 .listxattr = xfs_vn_listxattr,
69ff2826 1166 .update_time = xfs_vn_update_time,
99b6436b 1167 .tmpfile = xfs_vn_tmpfile,
384f3ced
BN
1168};
1169
41be8bed 1170static const struct inode_operations xfs_symlink_inode_operations = {
6b255391 1171 .get_link = xfs_vn_get_link,
416c6d5b
NS
1172 .getattr = xfs_vn_getattr,
1173 .setattr = xfs_vn_setattr,
416c6d5b 1174 .listxattr = xfs_vn_listxattr,
69ff2826 1175 .update_time = xfs_vn_update_time,
1da177e4 1176};
41be8bed 1177
30ee052e 1178static const struct inode_operations xfs_inline_symlink_inode_operations = {
30ee052e
CH
1179 .get_link = xfs_vn_get_link_inline,
1180 .getattr = xfs_vn_getattr,
1181 .setattr = xfs_vn_setattr,
30ee052e
CH
1182 .listxattr = xfs_vn_listxattr,
1183 .update_time = xfs_vn_update_time,
1184};
1185
ba23cba9
DW
1186/* Figure out if this file actually supports DAX. */
1187static bool
1188xfs_inode_supports_dax(
1189 struct xfs_inode *ip)
1190{
1191 struct xfs_mount *mp = ip->i_mount;
1192
1193 /* Only supported on non-reflinked files. */
1194 if (!S_ISREG(VFS_I(ip)->i_mode) || xfs_is_reflink_inode(ip))
1195 return false;
1196
1197 /* DAX mount option or DAX iflag must be set. */
1198 if (!(mp->m_flags & XFS_MOUNT_DAX) &&
1199 !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX))
1200 return false;
1201
1202 /* Block size must match page size */
1203 if (mp->m_sb.sb_blocksize != PAGE_SIZE)
1204 return false;
1205
1206 /* Device has to support DAX too. */
1207 return xfs_find_daxdev_for_inode(VFS_I(ip)) != NULL;
1208}
1209
41be8bed
CH
1210STATIC void
1211xfs_diflags_to_iflags(
1212 struct inode *inode,
1213 struct xfs_inode *ip)
1214{
cbe4dab1
DC
1215 uint16_t flags = ip->i_d.di_flags;
1216
1217 inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | S_SYNC |
1218 S_NOATIME | S_DAX);
1219
1220 if (flags & XFS_DIFLAG_IMMUTABLE)
41be8bed 1221 inode->i_flags |= S_IMMUTABLE;
cbe4dab1 1222 if (flags & XFS_DIFLAG_APPEND)
41be8bed 1223 inode->i_flags |= S_APPEND;
cbe4dab1 1224 if (flags & XFS_DIFLAG_SYNC)
41be8bed 1225 inode->i_flags |= S_SYNC;
cbe4dab1 1226 if (flags & XFS_DIFLAG_NOATIME)
41be8bed 1227 inode->i_flags |= S_NOATIME;
ba23cba9 1228 if (xfs_inode_supports_dax(ip))
cbe4dab1 1229 inode->i_flags |= S_DAX;
41be8bed
CH
1230}
1231
1232/*
2b3d1d41 1233 * Initialize the Linux inode.
bf904248 1234 *
58c90473
DC
1235 * When reading existing inodes from disk this is called directly from xfs_iget,
1236 * when creating a new inode it is called from xfs_ialloc after setting up the
1237 * inode. These callers have different criteria for clearing XFS_INEW, so leave
1238 * it up to the caller to deal with unlocking the inode appropriately.
41be8bed
CH
1239 */
1240void
1241xfs_setup_inode(
1242 struct xfs_inode *ip)
1243{
bf904248 1244 struct inode *inode = &ip->i_vnode;
ad22c7a0 1245 gfp_t gfp_mask;
bf904248
DC
1246
1247 inode->i_ino = ip->i_ino;
eaff8079 1248 inode->i_state = I_NEW;
646ec461
CH
1249
1250 inode_sb_list_add(inode);
c6f6cd06
CH
1251 /* make the inode look hashed for the writeback code */
1252 hlist_add_fake(&inode->i_hash);
41be8bed 1253
7aab1b28
DE
1254 inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid);
1255 inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid);
41be8bed 1256
41be8bed 1257 i_size_write(inode, ip->i_d.di_size);
41be8bed 1258 xfs_diflags_to_iflags(inode, ip);
41be8bed 1259
2b3d1d41 1260 if (S_ISDIR(inode->i_mode)) {
93a8614e 1261 lockdep_set_class(&ip->i_lock.mr_lock, &xfs_dir_ilock_class);
32c5483a 1262 ip->d_ops = ip->i_mount->m_dir_inode_ops;
2b3d1d41
CH
1263 } else {
1264 ip->d_ops = ip->i_mount->m_nondir_inode_ops;
1265 lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class);
41be8bed
CH
1266 }
1267
ad22c7a0
DC
1268 /*
1269 * Ensure all page cache allocations are done from GFP_NOFS context to
1270 * prevent direct reclaim recursion back into the filesystem and blowing
1271 * stacks or deadlocking.
1272 */
1273 gfp_mask = mapping_gfp_mask(inode->i_mapping);
1274 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS)));
1275
510792ee
CH
1276 /*
1277 * If there is no attribute fork no ACL can exist on this inode,
1278 * and it can't have any file capabilities attached to it either.
1279 */
1280 if (!XFS_IFORK_Q(ip)) {
1281 inode_has_no_xattr(inode);
6311b108 1282 cache_no_acl(inode);
510792ee 1283 }
41be8bed 1284}
2b3d1d41
CH
1285
1286void
1287xfs_setup_iops(
1288 struct xfs_inode *ip)
1289{
1290 struct inode *inode = &ip->i_vnode;
1291
41be8bed
CH
1292 switch (inode->i_mode & S_IFMT) {
1293 case S_IFREG:
1294 inode->i_op = &xfs_inode_operations;
1295 inode->i_fop = &xfs_file_operations;
6e2608df
DW
1296 if (IS_DAX(inode))
1297 inode->i_mapping->a_ops = &xfs_dax_aops;
1298 else
1299 inode->i_mapping->a_ops = &xfs_address_space_operations;
41be8bed
CH
1300 break;
1301 case S_IFDIR:
1302 if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
1303 inode->i_op = &xfs_dir_ci_inode_operations;
1304 else
1305 inode->i_op = &xfs_dir_inode_operations;
1306 inode->i_fop = &xfs_dir_file_operations;
1307 break;
1308 case S_IFLNK:
30ee052e
CH
1309 if (ip->i_df.if_flags & XFS_IFINLINE)
1310 inode->i_op = &xfs_inline_symlink_inode_operations;
1311 else
1312 inode->i_op = &xfs_symlink_inode_operations;
41be8bed
CH
1313 break;
1314 default:
1315 inode->i_op = &xfs_inode_operations;
1316 init_special_inode(inode, inode->i_mode, inode->i_rdev);
1317 break;
1318 }
41be8bed 1319}