]> git.proxmox.com Git - mirror_zfs.git/blob - include/linux/vfs_compat.h
Linux 5.0 compat: ASM_BUG macro
[mirror_zfs.git] / include / linux / vfs_compat.h
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
24 * Copyright (C) 2015 Jörg Thalheim.
25 */
26
27 #ifndef _ZFS_VFS_H
28 #define _ZFS_VFS_H
29
30 #include <sys/taskq.h>
31 #include <sys/cred.h>
32 #include <linux/backing-dev.h>
33 #include <linux/compat.h>
34
35 /*
36 * 2.6.28 API change,
37 * Added insert_inode_locked() helper function, prior to this most callers
38 * used insert_inode_hash(). The older method doesn't check for collisions
39 * in the inode_hashtable but it still acceptible for use.
40 */
41 #ifndef HAVE_INSERT_INODE_LOCKED
42 static inline int
43 insert_inode_locked(struct inode *ip)
44 {
45 insert_inode_hash(ip);
46 return (0);
47 }
48 #endif /* HAVE_INSERT_INODE_LOCKED */
49
50 /*
51 * 2.6.35 API change,
52 * Add truncate_setsize() if it is not exported by the Linux kernel.
53 *
54 * Truncate the inode and pages associated with the inode. The pages are
55 * unmapped and removed from cache.
56 */
57 #ifndef HAVE_TRUNCATE_SETSIZE
58 static inline void
59 truncate_setsize(struct inode *ip, loff_t new)
60 {
61 struct address_space *mapping = ip->i_mapping;
62
63 i_size_write(ip, new);
64
65 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
66 truncate_inode_pages(mapping, new);
67 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
68 }
69 #endif /* HAVE_TRUNCATE_SETSIZE */
70
71 /*
72 * 2.6.32 - 2.6.33, bdi_setup_and_register() is not available.
73 * 2.6.34 - 3.19, bdi_setup_and_register() takes 3 arguments.
74 * 4.0 - 4.11, bdi_setup_and_register() takes 2 arguments.
75 * 4.12 - x.y, super_setup_bdi_name() new interface.
76 */
77 #if defined(HAVE_SUPER_SETUP_BDI_NAME)
78 extern atomic_long_t zfs_bdi_seq;
79
80 static inline int
81 zpl_bdi_setup(struct super_block *sb, char *name)
82 {
83 return super_setup_bdi_name(sb, "%.28s-%ld", name,
84 atomic_long_inc_return(&zfs_bdi_seq));
85 }
86 static inline void
87 zpl_bdi_destroy(struct super_block *sb)
88 {
89 }
90 #elif defined(HAVE_2ARGS_BDI_SETUP_AND_REGISTER)
91 static inline int
92 zpl_bdi_setup(struct super_block *sb, char *name)
93 {
94 struct backing_dev_info *bdi;
95 int error;
96
97 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
98 error = bdi_setup_and_register(bdi, name);
99 if (error) {
100 kmem_free(bdi, sizeof (struct backing_dev_info));
101 return (error);
102 }
103
104 sb->s_bdi = bdi;
105
106 return (0);
107 }
108 static inline void
109 zpl_bdi_destroy(struct super_block *sb)
110 {
111 struct backing_dev_info *bdi = sb->s_bdi;
112
113 bdi_destroy(bdi);
114 kmem_free(bdi, sizeof (struct backing_dev_info));
115 sb->s_bdi = NULL;
116 }
117 #elif defined(HAVE_3ARGS_BDI_SETUP_AND_REGISTER)
118 static inline int
119 zpl_bdi_setup(struct super_block *sb, char *name)
120 {
121 struct backing_dev_info *bdi;
122 int error;
123
124 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
125 error = bdi_setup_and_register(bdi, name, BDI_CAP_MAP_COPY);
126 if (error) {
127 kmem_free(sb->s_bdi, sizeof (struct backing_dev_info));
128 return (error);
129 }
130
131 sb->s_bdi = bdi;
132
133 return (0);
134 }
135 static inline void
136 zpl_bdi_destroy(struct super_block *sb)
137 {
138 struct backing_dev_info *bdi = sb->s_bdi;
139
140 bdi_destroy(bdi);
141 kmem_free(bdi, sizeof (struct backing_dev_info));
142 sb->s_bdi = NULL;
143 }
144 #else
145 extern atomic_long_t zfs_bdi_seq;
146
147 static inline int
148 zpl_bdi_setup(struct super_block *sb, char *name)
149 {
150 struct backing_dev_info *bdi;
151 int error;
152
153 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
154 bdi->name = name;
155 bdi->capabilities = BDI_CAP_MAP_COPY;
156
157 error = bdi_init(bdi);
158 if (error) {
159 kmem_free(bdi, sizeof (struct backing_dev_info));
160 return (error);
161 }
162
163 error = bdi_register(bdi, NULL, "%.28s-%ld", name,
164 atomic_long_inc_return(&zfs_bdi_seq));
165 if (error) {
166 bdi_destroy(bdi);
167 kmem_free(bdi, sizeof (struct backing_dev_info));
168 return (error);
169 }
170
171 sb->s_bdi = bdi;
172
173 return (0);
174 }
175 static inline void
176 zpl_bdi_destroy(struct super_block *sb)
177 {
178 struct backing_dev_info *bdi = sb->s_bdi;
179
180 bdi_destroy(bdi);
181 kmem_free(bdi, sizeof (struct backing_dev_info));
182 sb->s_bdi = NULL;
183 }
184 #endif
185
186 /*
187 * 4.14 adds SB_* flag definitions, define them to MS_* equivalents
188 * if not set.
189 */
190 #ifndef SB_RDONLY
191 #define SB_RDONLY MS_RDONLY
192 #endif
193
194 #ifndef SB_SILENT
195 #define SB_SILENT MS_SILENT
196 #endif
197
198 #ifndef SB_ACTIVE
199 #define SB_ACTIVE MS_ACTIVE
200 #endif
201
202 #ifndef SB_POSIXACL
203 #define SB_POSIXACL MS_POSIXACL
204 #endif
205
206 #ifndef SB_MANDLOCK
207 #define SB_MANDLOCK MS_MANDLOCK
208 #endif
209
210 #ifndef SB_NOATIME
211 #define SB_NOATIME MS_NOATIME
212 #endif
213
214 /*
215 * 2.6.38 API change,
216 * LOOKUP_RCU flag introduced to distinguish rcu-walk from ref-walk cases.
217 */
218 #ifndef LOOKUP_RCU
219 #define LOOKUP_RCU 0x0
220 #endif /* LOOKUP_RCU */
221
222 /*
223 * 3.2-rc1 API change,
224 * Add set_nlink() if it is not exported by the Linux kernel.
225 *
226 * i_nlink is read-only in Linux 3.2, but it can be set directly in
227 * earlier kernels.
228 */
229 #ifndef HAVE_SET_NLINK
230 static inline void
231 set_nlink(struct inode *inode, unsigned int nlink)
232 {
233 inode->i_nlink = nlink;
234 }
235 #endif /* HAVE_SET_NLINK */
236
237 /*
238 * 3.3 API change,
239 * The VFS .create, .mkdir and .mknod callbacks were updated to take a
240 * umode_t type rather than an int. To cleanly handle both definitions
241 * the zpl_umode_t type is introduced and set accordingly.
242 */
243 #ifdef HAVE_MKDIR_UMODE_T
244 typedef umode_t zpl_umode_t;
245 #else
246 typedef int zpl_umode_t;
247 #endif
248
249 /*
250 * 3.5 API change,
251 * The clear_inode() function replaces end_writeback() and introduces an
252 * ordering change regarding when the inode_sync_wait() occurs. See the
253 * configure check in config/kernel-clear-inode.m4 for full details.
254 */
255 #if defined(HAVE_EVICT_INODE) && !defined(HAVE_CLEAR_INODE)
256 #define clear_inode(ip) end_writeback(ip)
257 #endif /* HAVE_EVICT_INODE && !HAVE_CLEAR_INODE */
258
259 /*
260 * 3.6 API change,
261 * The sget() helper function now takes the mount flags as an argument.
262 */
263 #ifdef HAVE_5ARG_SGET
264 #define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, fl, mtd)
265 #else
266 #define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, mtd)
267 #endif /* HAVE_5ARG_SGET */
268
269 #if defined(SEEK_HOLE) && defined(SEEK_DATA) && !defined(HAVE_LSEEK_EXECUTE)
270 static inline loff_t
271 lseek_execute(
272 struct file *filp,
273 struct inode *inode,
274 loff_t offset,
275 loff_t maxsize)
276 {
277 if (offset < 0 && !(filp->f_mode & FMODE_UNSIGNED_OFFSET))
278 return (-EINVAL);
279
280 if (offset > maxsize)
281 return (-EINVAL);
282
283 if (offset != filp->f_pos) {
284 spin_lock(&filp->f_lock);
285 filp->f_pos = offset;
286 filp->f_version = 0;
287 spin_unlock(&filp->f_lock);
288 }
289
290 return (offset);
291 }
292 #endif /* SEEK_HOLE && SEEK_DATA && !HAVE_LSEEK_EXECUTE */
293
294 #if defined(CONFIG_FS_POSIX_ACL)
295 /*
296 * These functions safely approximates the behavior of posix_acl_release()
297 * which cannot be used because it calls the GPL-only symbol kfree_rcu().
298 * The in-kernel version, which can access the RCU, frees the ACLs after
299 * the grace period expires. Because we're unsure how long that grace
300 * period may be this implementation conservatively delays for 60 seconds.
301 * This is several orders of magnitude larger than expected grace period.
302 * At 60 seconds the kernel will also begin issuing RCU stall warnings.
303 */
304
305 #include <linux/posix_acl.h>
306
307 #if defined(HAVE_POSIX_ACL_RELEASE) && !defined(HAVE_POSIX_ACL_RELEASE_GPL_ONLY)
308 #define zpl_posix_acl_release(arg) posix_acl_release(arg)
309 #else
310 void zpl_posix_acl_release_impl(struct posix_acl *);
311
312 static inline void
313 zpl_posix_acl_release(struct posix_acl *acl)
314 {
315 if ((acl == NULL) || (acl == ACL_NOT_CACHED))
316 return;
317 #ifdef HAVE_ACL_REFCOUNT
318 if (refcount_dec_and_test(&acl->a_refcount))
319 zpl_posix_acl_release_impl(acl);
320 #else
321 if (atomic_dec_and_test(&acl->a_refcount))
322 zpl_posix_acl_release_impl(acl);
323 #endif
324 }
325 #endif /* HAVE_POSIX_ACL_RELEASE */
326
327 #ifdef HAVE_SET_CACHED_ACL_USABLE
328 #define zpl_set_cached_acl(ip, ty, n) set_cached_acl(ip, ty, n)
329 #define zpl_forget_cached_acl(ip, ty) forget_cached_acl(ip, ty)
330 #else
331 static inline void
332 zpl_set_cached_acl(struct inode *ip, int type, struct posix_acl *newer)
333 {
334 struct posix_acl *older = NULL;
335
336 spin_lock(&ip->i_lock);
337
338 if ((newer != ACL_NOT_CACHED) && (newer != NULL))
339 posix_acl_dup(newer);
340
341 switch (type) {
342 case ACL_TYPE_ACCESS:
343 older = ip->i_acl;
344 rcu_assign_pointer(ip->i_acl, newer);
345 break;
346 case ACL_TYPE_DEFAULT:
347 older = ip->i_default_acl;
348 rcu_assign_pointer(ip->i_default_acl, newer);
349 break;
350 }
351
352 spin_unlock(&ip->i_lock);
353
354 zpl_posix_acl_release(older);
355 }
356
357 static inline void
358 zpl_forget_cached_acl(struct inode *ip, int type)
359 {
360 zpl_set_cached_acl(ip, type, (struct posix_acl *)ACL_NOT_CACHED);
361 }
362 #endif /* HAVE_SET_CACHED_ACL_USABLE */
363
364 #ifndef HAVE___POSIX_ACL_CHMOD
365 #ifdef HAVE_POSIX_ACL_CHMOD
366 #define __posix_acl_chmod(acl, gfp, mode) posix_acl_chmod(acl, gfp, mode)
367 #define __posix_acl_create(acl, gfp, mode) posix_acl_create(acl, gfp, mode)
368 #else
369 static inline int
370 __posix_acl_chmod(struct posix_acl **acl, int flags, umode_t umode)
371 {
372 struct posix_acl *oldacl = *acl;
373 mode_t mode = umode;
374 int error;
375
376 *acl = posix_acl_clone(*acl, flags);
377 zpl_posix_acl_release(oldacl);
378
379 if (!(*acl))
380 return (-ENOMEM);
381
382 error = posix_acl_chmod_masq(*acl, mode);
383 if (error) {
384 zpl_posix_acl_release(*acl);
385 *acl = NULL;
386 }
387
388 return (error);
389 }
390
391 static inline int
392 __posix_acl_create(struct posix_acl **acl, int flags, umode_t *umodep)
393 {
394 struct posix_acl *oldacl = *acl;
395 mode_t mode = *umodep;
396 int error;
397
398 *acl = posix_acl_clone(*acl, flags);
399 zpl_posix_acl_release(oldacl);
400
401 if (!(*acl))
402 return (-ENOMEM);
403
404 error = posix_acl_create_masq(*acl, &mode);
405 *umodep = mode;
406
407 if (error < 0) {
408 zpl_posix_acl_release(*acl);
409 *acl = NULL;
410 }
411
412 return (error);
413 }
414 #endif /* HAVE_POSIX_ACL_CHMOD */
415 #endif /* HAVE___POSIX_ACL_CHMOD */
416
417 #ifdef HAVE_POSIX_ACL_EQUIV_MODE_UMODE_T
418 typedef umode_t zpl_equivmode_t;
419 #else
420 typedef mode_t zpl_equivmode_t;
421 #endif /* HAVE_POSIX_ACL_EQUIV_MODE_UMODE_T */
422
423 /*
424 * 4.8 API change,
425 * posix_acl_valid() now must be passed a namespace, the namespace from
426 * from super block associated with the given inode is used for this purpose.
427 */
428 #ifdef HAVE_POSIX_ACL_VALID_WITH_NS
429 #define zpl_posix_acl_valid(ip, acl) posix_acl_valid(ip->i_sb->s_user_ns, acl)
430 #else
431 #define zpl_posix_acl_valid(ip, acl) posix_acl_valid(acl)
432 #endif
433
434 #endif /* CONFIG_FS_POSIX_ACL */
435
436 /*
437 * 2.6.38 API change,
438 * The is_owner_or_cap() function was renamed to inode_owner_or_capable().
439 */
440 #ifdef HAVE_INODE_OWNER_OR_CAPABLE
441 #define zpl_inode_owner_or_capable(ip) inode_owner_or_capable(ip)
442 #else
443 #define zpl_inode_owner_or_capable(ip) is_owner_or_cap(ip)
444 #endif /* HAVE_INODE_OWNER_OR_CAPABLE */
445
446 /*
447 * 3.19 API change
448 * struct access f->f_dentry->d_inode was replaced by accessor function
449 * file_inode(f)
450 */
451 #ifndef HAVE_FILE_INODE
452 static inline struct inode *file_inode(const struct file *f)
453 {
454 return (f->f_dentry->d_inode);
455 }
456 #endif /* HAVE_FILE_INODE */
457
458 /*
459 * 4.1 API change
460 * struct access file->f_path.dentry was replaced by accessor function
461 * file_dentry(f)
462 */
463 #ifndef HAVE_FILE_DENTRY
464 static inline struct dentry *file_dentry(const struct file *f)
465 {
466 return (f->f_path.dentry);
467 }
468 #endif /* HAVE_FILE_DENTRY */
469
470 #ifdef HAVE_KUID_HELPERS
471 static inline uid_t zfs_uid_read_impl(struct inode *ip)
472 {
473 #ifdef HAVE_SUPER_USER_NS
474 return (from_kuid(ip->i_sb->s_user_ns, ip->i_uid));
475 #else
476 return (from_kuid(kcred->user_ns, ip->i_uid));
477 #endif
478 }
479
480 static inline uid_t zfs_uid_read(struct inode *ip)
481 {
482 return (zfs_uid_read_impl(ip));
483 }
484
485 static inline gid_t zfs_gid_read_impl(struct inode *ip)
486 {
487 #ifdef HAVE_SUPER_USER_NS
488 return (from_kgid(ip->i_sb->s_user_ns, ip->i_gid));
489 #else
490 return (from_kgid(kcred->user_ns, ip->i_gid));
491 #endif
492 }
493
494 static inline gid_t zfs_gid_read(struct inode *ip)
495 {
496 return (zfs_gid_read_impl(ip));
497 }
498
499 static inline void zfs_uid_write(struct inode *ip, uid_t uid)
500 {
501 #ifdef HAVE_SUPER_USER_NS
502 ip->i_uid = make_kuid(ip->i_sb->s_user_ns, uid);
503 #else
504 ip->i_uid = make_kuid(kcred->user_ns, uid);
505 #endif
506 }
507
508 static inline void zfs_gid_write(struct inode *ip, gid_t gid)
509 {
510 #ifdef HAVE_SUPER_USER_NS
511 ip->i_gid = make_kgid(ip->i_sb->s_user_ns, gid);
512 #else
513 ip->i_gid = make_kgid(kcred->user_ns, gid);
514 #endif
515 }
516
517 #else
518 static inline uid_t zfs_uid_read(struct inode *ip)
519 {
520 return (ip->i_uid);
521 }
522
523 static inline gid_t zfs_gid_read(struct inode *ip)
524 {
525 return (ip->i_gid);
526 }
527
528 static inline void zfs_uid_write(struct inode *ip, uid_t uid)
529 {
530 ip->i_uid = uid;
531 }
532
533 static inline void zfs_gid_write(struct inode *ip, gid_t gid)
534 {
535 ip->i_gid = gid;
536 }
537 #endif
538
539 /*
540 * 2.6.38 API change
541 */
542 #ifdef HAVE_FOLLOW_DOWN_ONE
543 #define zpl_follow_down_one(path) follow_down_one(path)
544 #define zpl_follow_up(path) follow_up(path)
545 #else
546 #define zpl_follow_down_one(path) follow_down(path)
547 #define zpl_follow_up(path) follow_up(path)
548 #endif
549
550 /*
551 * 4.9 API change
552 */
553 #ifndef HAVE_SETATTR_PREPARE
554 static inline int
555 setattr_prepare(struct dentry *dentry, struct iattr *ia)
556 {
557 return (inode_change_ok(dentry->d_inode, ia));
558 }
559 #endif
560
561 /*
562 * 4.11 API change
563 * These macros are defined by kernel 4.11. We define them so that the same
564 * code builds under kernels < 4.11 and >= 4.11. The macros are set to 0 so
565 * that it will create obvious failures if they are accidentally used when built
566 * against a kernel >= 4.11.
567 */
568
569 #ifndef STATX_BASIC_STATS
570 #define STATX_BASIC_STATS 0
571 #endif
572
573 #ifndef AT_STATX_SYNC_AS_STAT
574 #define AT_STATX_SYNC_AS_STAT 0
575 #endif
576
577 /*
578 * 4.11 API change
579 * 4.11 takes struct path *, < 4.11 takes vfsmount *
580 */
581
582 #ifdef HAVE_VFSMOUNT_IOPS_GETATTR
583 #define ZPL_GETATTR_WRAPPER(func) \
584 static int \
585 func(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) \
586 { \
587 struct path path = { .mnt = mnt, .dentry = dentry }; \
588 return func##_impl(&path, stat, STATX_BASIC_STATS, \
589 AT_STATX_SYNC_AS_STAT); \
590 }
591 #elif defined(HAVE_PATH_IOPS_GETATTR)
592 #define ZPL_GETATTR_WRAPPER(func) \
593 static int \
594 func(const struct path *path, struct kstat *stat, u32 request_mask, \
595 unsigned int query_flags) \
596 { \
597 return (func##_impl(path, stat, request_mask, query_flags)); \
598 }
599 #else
600 #error
601 #endif
602
603 /*
604 * 4.9 API change
605 * Preferred interface to get the current FS time.
606 */
607 #if !defined(HAVE_CURRENT_TIME)
608 static inline struct timespec
609 current_time(struct inode *ip)
610 {
611 return (timespec_trunc(current_kernel_time(), ip->i_sb->s_time_gran));
612 }
613 #endif
614
615 /*
616 * 4.16 API change
617 * Added iversion interface for managing inode version field.
618 */
619 #ifdef HAVE_INODE_SET_IVERSION
620 #include <linux/iversion.h>
621 #else
622 static inline void
623 inode_set_iversion(struct inode *ip, u64 val)
624 {
625 ip->i_version = val;
626 }
627 #endif
628
629 /*
630 * Returns true when called in the context of a 32-bit system call.
631 */
632 static inline int
633 zpl_is_32bit_api(void)
634 {
635 #ifdef CONFIG_COMPAT
636 #ifdef HAVE_IN_COMPAT_SYSCALL
637 return (in_compat_syscall());
638 #else
639 return (is_compat_task());
640 #endif
641 #else
642 return (BITS_PER_LONG == 32);
643 #endif
644 }
645
646 #endif /* _ZFS_VFS_H */