]> git.proxmox.com Git - mirror_zfs-debian.git/blame - include/linux/vfs_compat.h
Merge branch 'add_breaks_replaces_zfs_initramfs' into 'master'
[mirror_zfs-debian.git] / include / linux / vfs_compat.h
CommitLineData
7268e1be
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
ea04106b 24 * Copyright (C) 2015 Jörg Thalheim.
7268e1be
BB
25 */
26
27#ifndef _ZFS_VFS_H
a08ee875 28#define _ZFS_VFS_H
7268e1be 29
ea04106b 30#include <sys/taskq.h>
cae5b340 31#include <sys/cred.h>
e10b0808 32#include <linux/backing-dev.h>
ea04106b 33
bdf4328b
BB
34/*
35 * 2.6.28 API change,
36 * Added insert_inode_locked() helper function, prior to this most callers
37 * used insert_inode_hash(). The older method doesn't check for collisions
38 * in the inode_hashtable but it still acceptible for use.
39 */
40#ifndef HAVE_INSERT_INODE_LOCKED
41static inline int
42insert_inode_locked(struct inode *ip)
43{
44 insert_inode_hash(ip);
45 return (0);
46}
47#endif /* HAVE_INSERT_INODE_LOCKED */
7268e1be 48
b3129792
PJ
49/*
50 * 2.6.35 API change,
51 * Add truncate_setsize() if it is not exported by the Linux kernel.
52 *
53 * Truncate the inode and pages associated with the inode. The pages are
54 * unmapped and removed from cache.
55 */
56#ifndef HAVE_TRUNCATE_SETSIZE
57static inline void
58truncate_setsize(struct inode *ip, loff_t new)
59{
60 struct address_space *mapping = ip->i_mapping;
61
62 i_size_write(ip, new);
63
64 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
65 truncate_inode_pages(mapping, new);
66 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
67}
68#endif /* HAVE_TRUNCATE_SETSIZE */
69
76659dc1 70/*
ea04106b
AX
71 * 2.6.32 - 2.6.33, bdi_setup_and_register() is not available.
72 * 2.6.34 - 3.19, bdi_setup_and_register() takes 3 arguments.
22929307
AX
73 * 4.0 - 4.11, bdi_setup_and_register() takes 2 arguments.
74 * 4.12 - x.y, super_setup_bdi_name() new interface.
76659dc1 75 */
22929307
AX
76#if defined(HAVE_SUPER_SETUP_BDI_NAME)
77extern atomic_long_t zfs_bdi_seq;
78
79static inline int
80zpl_bdi_setup(struct super_block *sb, char *name)
81{
82 return super_setup_bdi_name(sb, "%.28s-%ld", name,
83 atomic_long_inc_return(&zfs_bdi_seq));
84}
85static inline void
86zpl_bdi_destroy(struct super_block *sb)
87{
88}
89#elif defined(HAVE_2ARGS_BDI_SETUP_AND_REGISTER)
ea04106b 90static inline int
22929307
AX
91zpl_bdi_setup(struct super_block *sb, char *name)
92{
93 struct backing_dev_info *bdi;
94 int error;
95
96 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
97 error = bdi_setup_and_register(bdi, name);
98 if (error) {
99 kmem_free(bdi, sizeof (struct backing_dev_info));
100 return (error);
101 }
102
103 sb->s_bdi = bdi;
104
105 return (0);
106}
107static inline void
108zpl_bdi_destroy(struct super_block *sb)
ea04106b 109{
22929307
AX
110 struct backing_dev_info *bdi = sb->s_bdi;
111
112 bdi_destroy(bdi);
113 kmem_free(bdi, sizeof (struct backing_dev_info));
114 sb->s_bdi = NULL;
ea04106b
AX
115}
116#elif defined(HAVE_3ARGS_BDI_SETUP_AND_REGISTER)
117static inline int
22929307
AX
118zpl_bdi_setup(struct super_block *sb, char *name)
119{
120 struct backing_dev_info *bdi;
121 int error;
122
123 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
124 error = bdi_setup_and_register(bdi, name, BDI_CAP_MAP_COPY);
125 if (error) {
126 kmem_free(sb->s_bdi, sizeof (struct backing_dev_info));
127 return (error);
128 }
129
130 sb->s_bdi = bdi;
131
132 return (0);
133}
134static inline void
135zpl_bdi_destroy(struct super_block *sb)
ea04106b 136{
22929307
AX
137 struct backing_dev_info *bdi = sb->s_bdi;
138
139 bdi_destroy(bdi);
140 kmem_free(bdi, sizeof (struct backing_dev_info));
141 sb->s_bdi = NULL;
ea04106b
AX
142}
143#else
5547c2f1
BB
144extern atomic_long_t zfs_bdi_seq;
145
146static inline int
22929307 147zpl_bdi_setup(struct super_block *sb, char *name)
5547c2f1 148{
22929307 149 struct backing_dev_info *bdi;
5547c2f1
BB
150 int error;
151
22929307 152 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
5547c2f1 153 bdi->name = name;
ea04106b
AX
154 bdi->capabilities = BDI_CAP_MAP_COPY;
155
5547c2f1 156 error = bdi_init(bdi);
22929307
AX
157 if (error) {
158 kmem_free(bdi, sizeof (struct backing_dev_info));
5547c2f1 159 return (error);
22929307 160 }
5547c2f1 161
22929307 162 error = bdi_register(bdi, NULL, "%.28s-%ld", name,
5547c2f1
BB
163 atomic_long_inc_return(&zfs_bdi_seq));
164 if (error) {
165 bdi_destroy(bdi);
22929307 166 kmem_free(bdi, sizeof (struct backing_dev_info));
5547c2f1
BB
167 return (error);
168 }
169
22929307
AX
170 sb->s_bdi = bdi;
171
172 return (0);
173}
174static inline void
175zpl_bdi_destroy(struct super_block *sb)
176{
177 struct backing_dev_info *bdi = sb->s_bdi;
178
179 bdi_destroy(bdi);
180 kmem_free(bdi, sizeof (struct backing_dev_info));
181 sb->s_bdi = NULL;
5547c2f1 182}
ea04106b 183#endif
76659dc1 184
42f7b73b
AX
185/*
186 * 4.14 adds SB_* flag definitions, define them to MS_* equivalents
187 * if not set.
188 */
189#ifndef SB_RDONLY
190#define SB_RDONLY MS_RDONLY
191#endif
192
193#ifndef SB_SILENT
194#define SB_SILENT MS_SILENT
195#endif
196
197#ifndef SB_ACTIVE
198#define SB_ACTIVE MS_ACTIVE
199#endif
200
201#ifndef SB_POSIXACL
202#define SB_POSIXACL MS_POSIXACL
203#endif
204
205#ifndef SB_MANDLOCK
206#define SB_MANDLOCK MS_MANDLOCK
207#endif
208
7b3e34ba
BB
209/*
210 * 2.6.38 API change,
211 * LOOKUP_RCU flag introduced to distinguish rcu-walk from ref-walk cases.
212 */
213#ifndef LOOKUP_RCU
a08ee875 214#define LOOKUP_RCU 0x0
7b3e34ba
BB
215#endif /* LOOKUP_RCU */
216
28eb9213
DH
217/*
218 * 3.2-rc1 API change,
219 * Add set_nlink() if it is not exported by the Linux kernel.
220 *
221 * i_nlink is read-only in Linux 3.2, but it can be set directly in
222 * earlier kernels.
223 */
224#ifndef HAVE_SET_NLINK
225static inline void
226set_nlink(struct inode *inode, unsigned int nlink)
227{
228 inode->i_nlink = nlink;
229}
230#endif /* HAVE_SET_NLINK */
231
b39d3b9f
BB
232/*
233 * 3.3 API change,
234 * The VFS .create, .mkdir and .mknod callbacks were updated to take a
235 * umode_t type rather than an int. To cleanly handle both definitions
236 * the zpl_umode_t type is introduced and set accordingly.
237 */
95f5c63b 238#ifdef HAVE_MKDIR_UMODE_T
b39d3b9f
BB
239typedef umode_t zpl_umode_t;
240#else
241typedef int zpl_umode_t;
242#endif
243
739a1a82
RY
244/*
245 * 3.5 API change,
246 * The clear_inode() function replaces end_writeback() and introduces an
247 * ordering change regarding when the inode_sync_wait() occurs. See the
248 * configure check in config/kernel-clear-inode.m4 for full details.
249 */
250#if defined(HAVE_EVICT_INODE) && !defined(HAVE_CLEAR_INODE)
a08ee875 251#define clear_inode(ip) end_writeback(ip)
739a1a82
RY
252#endif /* HAVE_EVICT_INODE && !HAVE_CLEAR_INODE */
253
3c203610
YS
254/*
255 * 3.6 API change,
256 * The sget() helper function now takes the mount flags as an argument.
257 */
258#ifdef HAVE_5ARG_SGET
a08ee875 259#define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, fl, mtd)
3c203610 260#else
a08ee875 261#define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, mtd)
3c203610
YS
262#endif /* HAVE_5ARG_SGET */
263
c06d4368
AX
264#if defined(SEEK_HOLE) && defined(SEEK_DATA) && !defined(HAVE_LSEEK_EXECUTE)
265static inline loff_t
a08ee875
LG
266lseek_execute(
267 struct file *filp,
268 struct inode *inode,
269 loff_t offset,
270 loff_t maxsize)
c06d4368
AX
271{
272 if (offset < 0 && !(filp->f_mode & FMODE_UNSIGNED_OFFSET))
273 return (-EINVAL);
274
275 if (offset > maxsize)
276 return (-EINVAL);
277
278 if (offset != filp->f_pos) {
279 spin_lock(&filp->f_lock);
280 filp->f_pos = offset;
281 filp->f_version = 0;
282 spin_unlock(&filp->f_lock);
283 }
284
285 return (offset);
286}
287#endif /* SEEK_HOLE && SEEK_DATA && !HAVE_LSEEK_EXECUTE */
288
a08ee875
LG
289#if defined(CONFIG_FS_POSIX_ACL)
290/*
291 * These functions safely approximates the behavior of posix_acl_release()
292 * which cannot be used because it calls the GPL-only symbol kfree_rcu().
293 * The in-kernel version, which can access the RCU, frees the ACLs after
294 * the grace period expires. Because we're unsure how long that grace
295 * period may be this implementation conservatively delays for 60 seconds.
296 * This is several orders of magnitude larger than expected grace period.
297 * At 60 seconds the kernel will also begin issuing RCU stall warnings.
298 */
42f7b73b
AX
299#ifdef refcount_t
300#undef refcount_t
301#endif
302
a08ee875 303#include <linux/posix_acl.h>
a08ee875
LG
304
305#if defined(HAVE_POSIX_ACL_RELEASE) && !defined(HAVE_POSIX_ACL_RELEASE_GPL_ONLY)
a08ee875 306#define zpl_posix_acl_release(arg) posix_acl_release(arg)
a08ee875 307#else
68d83c55 308void zpl_posix_acl_release_impl(struct posix_acl *);
a08ee875
LG
309
310static inline void
311zpl_posix_acl_release(struct posix_acl *acl)
312{
313 if ((acl == NULL) || (acl == ACL_NOT_CACHED))
314 return;
42f7b73b
AX
315#ifdef HAVE_ACL_REFCOUNT
316 if (refcount_dec_and_test(&acl->a_refcount))
317 zpl_posix_acl_release_impl(acl);
318#else
68d83c55
AX
319 if (atomic_dec_and_test(&acl->a_refcount))
320 zpl_posix_acl_release_impl(acl);
42f7b73b 321#endif
a08ee875 322}
68d83c55 323#endif /* HAVE_POSIX_ACL_RELEASE */
a08ee875 324
68d83c55
AX
325#ifdef HAVE_SET_CACHED_ACL_USABLE
326#define zpl_set_cached_acl(ip, ty, n) set_cached_acl(ip, ty, n)
327#define zpl_forget_cached_acl(ip, ty) forget_cached_acl(ip, ty)
328#else
a08ee875 329static inline void
cae5b340
AX
330zpl_set_cached_acl(struct inode *ip, int type, struct posix_acl *newer)
331{
a08ee875
LG
332 struct posix_acl *older = NULL;
333
334 spin_lock(&ip->i_lock);
335
336 if ((newer != ACL_NOT_CACHED) && (newer != NULL))
337 posix_acl_dup(newer);
338
339 switch (type) {
340 case ACL_TYPE_ACCESS:
341 older = ip->i_acl;
342 rcu_assign_pointer(ip->i_acl, newer);
343 break;
344 case ACL_TYPE_DEFAULT:
345 older = ip->i_default_acl;
346 rcu_assign_pointer(ip->i_default_acl, newer);
347 break;
348 }
349
350 spin_unlock(&ip->i_lock);
351
352 zpl_posix_acl_release(older);
a08ee875
LG
353}
354
355static inline void
cae5b340
AX
356zpl_forget_cached_acl(struct inode *ip, int type)
357{
a08ee875
LG
358 zpl_set_cached_acl(ip, type, (struct posix_acl *)ACL_NOT_CACHED);
359}
68d83c55 360#endif /* HAVE_SET_CACHED_ACL_USABLE */
a08ee875 361
ea04106b
AX
362#ifndef HAVE___POSIX_ACL_CHMOD
363#ifdef HAVE_POSIX_ACL_CHMOD
364#define __posix_acl_chmod(acl, gfp, mode) posix_acl_chmod(acl, gfp, mode)
365#define __posix_acl_create(acl, gfp, mode) posix_acl_create(acl, gfp, mode)
a08ee875 366#else
a08ee875 367static inline int
cae5b340
AX
368__posix_acl_chmod(struct posix_acl **acl, int flags, umode_t umode)
369{
a08ee875
LG
370 struct posix_acl *oldacl = *acl;
371 mode_t mode = umode;
372 int error;
373
374 *acl = posix_acl_clone(*acl, flags);
375 zpl_posix_acl_release(oldacl);
376
377 if (!(*acl))
378 return (-ENOMEM);
379
380 error = posix_acl_chmod_masq(*acl, mode);
381 if (error) {
382 zpl_posix_acl_release(*acl);
383 *acl = NULL;
384 }
385
386 return (error);
387}
388
389static inline int
cae5b340
AX
390__posix_acl_create(struct posix_acl **acl, int flags, umode_t *umodep)
391{
a08ee875
LG
392 struct posix_acl *oldacl = *acl;
393 mode_t mode = *umodep;
394 int error;
395
396 *acl = posix_acl_clone(*acl, flags);
397 zpl_posix_acl_release(oldacl);
398
399 if (!(*acl))
400 return (-ENOMEM);
401
402 error = posix_acl_create_masq(*acl, &mode);
403 *umodep = mode;
404
405 if (error < 0) {
406 zpl_posix_acl_release(*acl);
407 *acl = NULL;
408 }
409
410 return (error);
411}
412#endif /* HAVE_POSIX_ACL_CHMOD */
ea04106b
AX
413#endif /* HAVE___POSIX_ACL_CHMOD */
414
415#ifdef HAVE_POSIX_ACL_EQUIV_MODE_UMODE_T
416typedef umode_t zpl_equivmode_t;
417#else
418typedef mode_t zpl_equivmode_t;
419#endif /* HAVE_POSIX_ACL_EQUIV_MODE_UMODE_T */
a08ee875 420
87dac73d
AX
421/*
422 * 4.8 API change,
423 * posix_acl_valid() now must be passed a namespace, the namespace from
424 * from super block associated with the given inode is used for this purpose.
425 */
426#ifdef HAVE_POSIX_ACL_VALID_WITH_NS
427#define zpl_posix_acl_valid(ip, acl) posix_acl_valid(ip->i_sb->s_user_ns, acl)
428#else
429#define zpl_posix_acl_valid(ip, acl) posix_acl_valid(acl)
430#endif
431
42f7b73b
AX
432#define refcount_t zfs_refcount_t
433
87dac73d 434#endif /* CONFIG_FS_POSIX_ACL */
a08ee875 435
ea04106b
AX
436/*
437 * 2.6.38 API change,
438 * The is_owner_or_cap() function was renamed to inode_owner_or_capable().
439 */
440#ifdef HAVE_INODE_OWNER_OR_CAPABLE
441#define zpl_inode_owner_or_capable(ip) inode_owner_or_capable(ip)
a08ee875 442#else
ea04106b
AX
443#define zpl_inode_owner_or_capable(ip) is_owner_or_cap(ip)
444#endif /* HAVE_INODE_OWNER_OR_CAPABLE */
445
446/*
447 * 3.19 API change
448 * struct access f->f_dentry->d_inode was replaced by accessor function
449 * file_inode(f)
450 */
451#ifndef HAVE_FILE_INODE
452static inline struct inode *file_inode(const struct file *f)
453{
454 return (f->f_dentry->d_inode);
455}
456#endif /* HAVE_FILE_INODE */
a08ee875 457
cae5b340
AX
458/*
459 * 4.1 API change
460 * struct access file->f_path.dentry was replaced by accessor function
461 * file_dentry(f)
462 */
463#ifndef HAVE_FILE_DENTRY
464static inline struct dentry *file_dentry(const struct file *f)
465{
466 return (f->f_path.dentry);
467}
468#endif /* HAVE_FILE_DENTRY */
469
470#ifdef HAVE_KUID_HELPERS
471static inline uid_t zfs_uid_read_impl(struct inode *ip)
472{
473#ifdef HAVE_SUPER_USER_NS
474 return (from_kuid(ip->i_sb->s_user_ns, ip->i_uid));
475#else
476 return (from_kuid(kcred->user_ns, ip->i_uid));
477#endif
478}
479
480static inline uid_t zfs_uid_read(struct inode *ip)
481{
482 return (zfs_uid_read_impl(ip));
483}
484
485static inline gid_t zfs_gid_read_impl(struct inode *ip)
486{
487#ifdef HAVE_SUPER_USER_NS
488 return (from_kgid(ip->i_sb->s_user_ns, ip->i_gid));
489#else
490 return (from_kgid(kcred->user_ns, ip->i_gid));
491#endif
492}
493
494static inline gid_t zfs_gid_read(struct inode *ip)
495{
496 return (zfs_gid_read_impl(ip));
497}
498
499static inline void zfs_uid_write(struct inode *ip, uid_t uid)
500{
501#ifdef HAVE_SUPER_USER_NS
502 ip->i_uid = make_kuid(ip->i_sb->s_user_ns, uid);
503#else
504 ip->i_uid = make_kuid(kcred->user_ns, uid);
505#endif
506}
507
508static inline void zfs_gid_write(struct inode *ip, gid_t gid)
509{
510#ifdef HAVE_SUPER_USER_NS
511 ip->i_gid = make_kgid(ip->i_sb->s_user_ns, gid);
512#else
513 ip->i_gid = make_kgid(kcred->user_ns, gid);
514#endif
515}
516
517#else
518static inline uid_t zfs_uid_read(struct inode *ip)
519{
520 return (ip->i_uid);
521}
522
523static inline gid_t zfs_gid_read(struct inode *ip)
524{
525 return (ip->i_gid);
526}
527
528static inline void zfs_uid_write(struct inode *ip, uid_t uid)
529{
530 ip->i_uid = uid;
531}
532
533static inline void zfs_gid_write(struct inode *ip, gid_t gid)
534{
535 ip->i_gid = gid;
536}
537#endif
538
e10b0808
AX
539/*
540 * 2.6.38 API change
541 */
542#ifdef HAVE_FOLLOW_DOWN_ONE
543#define zpl_follow_down_one(path) follow_down_one(path)
544#define zpl_follow_up(path) follow_up(path)
545#else
546#define zpl_follow_down_one(path) follow_down(path)
547#define zpl_follow_up(path) follow_up(path)
548#endif
549
68d83c55
AX
550/*
551 * 4.9 API change
552 */
553#ifndef HAVE_SETATTR_PREPARE
554static inline int
555setattr_prepare(struct dentry *dentry, struct iattr *ia)
556{
557 return (inode_change_ok(dentry->d_inode, ia));
558}
559#endif
560
22929307
AX
561/*
562 * 4.11 API change
563 * These macros are defined by kernel 4.11. We define them so that the same
564 * code builds under kernels < 4.11 and >= 4.11. The macros are set to 0 so
565 * that it will create obvious failures if they are accidentally used when built
566 * against a kernel >= 4.11.
567 */
568
569#ifndef STATX_BASIC_STATS
570#define STATX_BASIC_STATS 0
571#endif
572
573#ifndef AT_STATX_SYNC_AS_STAT
574#define AT_STATX_SYNC_AS_STAT 0
575#endif
576
577/*
578 * 4.11 API change
579 * 4.11 takes struct path *, < 4.11 takes vfsmount *
580 */
581
582#ifdef HAVE_VFSMOUNT_IOPS_GETATTR
583#define ZPL_GETATTR_WRAPPER(func) \
584static int \
585func(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) \
586{ \
587 struct path path = { .mnt = mnt, .dentry = dentry }; \
588 return func##_impl(&path, stat, STATX_BASIC_STATS, \
589 AT_STATX_SYNC_AS_STAT); \
590}
591#elif defined(HAVE_PATH_IOPS_GETATTR)
592#define ZPL_GETATTR_WRAPPER(func) \
593static int \
594func(const struct path *path, struct kstat *stat, u32 request_mask, \
595 unsigned int query_flags) \
596{ \
597 return (func##_impl(path, stat, request_mask, query_flags)); \
598}
599#else
600#error
601#endif
602
603/*
604 * 4.9 API change
605 * Preferred interface to get the current FS time.
606 */
607#if !defined(HAVE_CURRENT_TIME)
608static inline struct timespec
609current_time(struct inode *ip)
610{
611 return (timespec_trunc(current_kernel_time(), ip->i_sb->s_time_gran));
612}
613#endif
614
42f7b73b
AX
615/*
616 * 4.16 API change
617 * Added iversion interface for managing inode version field.
618 */
619#ifdef HAVE_INODE_SET_IVERSION
620#include <linux/iversion.h>
621#else
622static inline void
623inode_set_iversion(struct inode *ip, u64 val)
624{
625 ip->i_version = val;
626}
627#endif
628
7268e1be 629#endif /* _ZFS_VFS_H */