]> git.proxmox.com Git - mirror_zfs-debian.git/blob - include/linux/vfs_compat.h
Merge branch 'add_breaks_replaces_zfs_initramfs' into 'master'
[mirror_zfs-debian.git] / include / linux / vfs_compat.h
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
24 * Copyright (C) 2015 Jörg Thalheim.
25 */
26
27 #ifndef _ZFS_VFS_H
28 #define _ZFS_VFS_H
29
30 #include <sys/taskq.h>
31 #include <sys/cred.h>
32 #include <linux/backing-dev.h>
33
34 /*
35 * 2.6.28 API change,
36 * Added insert_inode_locked() helper function, prior to this most callers
37 * used insert_inode_hash(). The older method doesn't check for collisions
38 * in the inode_hashtable but it still acceptible for use.
39 */
40 #ifndef HAVE_INSERT_INODE_LOCKED
41 static inline int
42 insert_inode_locked(struct inode *ip)
43 {
44 insert_inode_hash(ip);
45 return (0);
46 }
47 #endif /* HAVE_INSERT_INODE_LOCKED */
48
49 /*
50 * 2.6.35 API change,
51 * Add truncate_setsize() if it is not exported by the Linux kernel.
52 *
53 * Truncate the inode and pages associated with the inode. The pages are
54 * unmapped and removed from cache.
55 */
56 #ifndef HAVE_TRUNCATE_SETSIZE
57 static inline void
58 truncate_setsize(struct inode *ip, loff_t new)
59 {
60 struct address_space *mapping = ip->i_mapping;
61
62 i_size_write(ip, new);
63
64 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
65 truncate_inode_pages(mapping, new);
66 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
67 }
68 #endif /* HAVE_TRUNCATE_SETSIZE */
69
70 /*
71 * 2.6.32 - 2.6.33, bdi_setup_and_register() is not available.
72 * 2.6.34 - 3.19, bdi_setup_and_register() takes 3 arguments.
73 * 4.0 - 4.11, bdi_setup_and_register() takes 2 arguments.
74 * 4.12 - x.y, super_setup_bdi_name() new interface.
75 */
76 #if defined(HAVE_SUPER_SETUP_BDI_NAME)
77 extern atomic_long_t zfs_bdi_seq;
78
79 static inline int
80 zpl_bdi_setup(struct super_block *sb, char *name)
81 {
82 return super_setup_bdi_name(sb, "%.28s-%ld", name,
83 atomic_long_inc_return(&zfs_bdi_seq));
84 }
85 static inline void
86 zpl_bdi_destroy(struct super_block *sb)
87 {
88 }
89 #elif defined(HAVE_2ARGS_BDI_SETUP_AND_REGISTER)
90 static inline int
91 zpl_bdi_setup(struct super_block *sb, char *name)
92 {
93 struct backing_dev_info *bdi;
94 int error;
95
96 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
97 error = bdi_setup_and_register(bdi, name);
98 if (error) {
99 kmem_free(bdi, sizeof (struct backing_dev_info));
100 return (error);
101 }
102
103 sb->s_bdi = bdi;
104
105 return (0);
106 }
107 static inline void
108 zpl_bdi_destroy(struct super_block *sb)
109 {
110 struct backing_dev_info *bdi = sb->s_bdi;
111
112 bdi_destroy(bdi);
113 kmem_free(bdi, sizeof (struct backing_dev_info));
114 sb->s_bdi = NULL;
115 }
116 #elif defined(HAVE_3ARGS_BDI_SETUP_AND_REGISTER)
117 static inline int
118 zpl_bdi_setup(struct super_block *sb, char *name)
119 {
120 struct backing_dev_info *bdi;
121 int error;
122
123 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
124 error = bdi_setup_and_register(bdi, name, BDI_CAP_MAP_COPY);
125 if (error) {
126 kmem_free(sb->s_bdi, sizeof (struct backing_dev_info));
127 return (error);
128 }
129
130 sb->s_bdi = bdi;
131
132 return (0);
133 }
134 static inline void
135 zpl_bdi_destroy(struct super_block *sb)
136 {
137 struct backing_dev_info *bdi = sb->s_bdi;
138
139 bdi_destroy(bdi);
140 kmem_free(bdi, sizeof (struct backing_dev_info));
141 sb->s_bdi = NULL;
142 }
143 #else
144 extern atomic_long_t zfs_bdi_seq;
145
146 static inline int
147 zpl_bdi_setup(struct super_block *sb, char *name)
148 {
149 struct backing_dev_info *bdi;
150 int error;
151
152 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
153 bdi->name = name;
154 bdi->capabilities = BDI_CAP_MAP_COPY;
155
156 error = bdi_init(bdi);
157 if (error) {
158 kmem_free(bdi, sizeof (struct backing_dev_info));
159 return (error);
160 }
161
162 error = bdi_register(bdi, NULL, "%.28s-%ld", name,
163 atomic_long_inc_return(&zfs_bdi_seq));
164 if (error) {
165 bdi_destroy(bdi);
166 kmem_free(bdi, sizeof (struct backing_dev_info));
167 return (error);
168 }
169
170 sb->s_bdi = bdi;
171
172 return (0);
173 }
174 static inline void
175 zpl_bdi_destroy(struct super_block *sb)
176 {
177 struct backing_dev_info *bdi = sb->s_bdi;
178
179 bdi_destroy(bdi);
180 kmem_free(bdi, sizeof (struct backing_dev_info));
181 sb->s_bdi = NULL;
182 }
183 #endif
184
185 /*
186 * 4.14 adds SB_* flag definitions, define them to MS_* equivalents
187 * if not set.
188 */
189 #ifndef SB_RDONLY
190 #define SB_RDONLY MS_RDONLY
191 #endif
192
193 #ifndef SB_SILENT
194 #define SB_SILENT MS_SILENT
195 #endif
196
197 #ifndef SB_ACTIVE
198 #define SB_ACTIVE MS_ACTIVE
199 #endif
200
201 #ifndef SB_POSIXACL
202 #define SB_POSIXACL MS_POSIXACL
203 #endif
204
205 #ifndef SB_MANDLOCK
206 #define SB_MANDLOCK MS_MANDLOCK
207 #endif
208
209 /*
210 * 2.6.38 API change,
211 * LOOKUP_RCU flag introduced to distinguish rcu-walk from ref-walk cases.
212 */
213 #ifndef LOOKUP_RCU
214 #define LOOKUP_RCU 0x0
215 #endif /* LOOKUP_RCU */
216
217 /*
218 * 3.2-rc1 API change,
219 * Add set_nlink() if it is not exported by the Linux kernel.
220 *
221 * i_nlink is read-only in Linux 3.2, but it can be set directly in
222 * earlier kernels.
223 */
224 #ifndef HAVE_SET_NLINK
225 static inline void
226 set_nlink(struct inode *inode, unsigned int nlink)
227 {
228 inode->i_nlink = nlink;
229 }
230 #endif /* HAVE_SET_NLINK */
231
232 /*
233 * 3.3 API change,
234 * The VFS .create, .mkdir and .mknod callbacks were updated to take a
235 * umode_t type rather than an int. To cleanly handle both definitions
236 * the zpl_umode_t type is introduced and set accordingly.
237 */
238 #ifdef HAVE_MKDIR_UMODE_T
239 typedef umode_t zpl_umode_t;
240 #else
241 typedef int zpl_umode_t;
242 #endif
243
244 /*
245 * 3.5 API change,
246 * The clear_inode() function replaces end_writeback() and introduces an
247 * ordering change regarding when the inode_sync_wait() occurs. See the
248 * configure check in config/kernel-clear-inode.m4 for full details.
249 */
250 #if defined(HAVE_EVICT_INODE) && !defined(HAVE_CLEAR_INODE)
251 #define clear_inode(ip) end_writeback(ip)
252 #endif /* HAVE_EVICT_INODE && !HAVE_CLEAR_INODE */
253
254 /*
255 * 3.6 API change,
256 * The sget() helper function now takes the mount flags as an argument.
257 */
258 #ifdef HAVE_5ARG_SGET
259 #define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, fl, mtd)
260 #else
261 #define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, mtd)
262 #endif /* HAVE_5ARG_SGET */
263
264 #if defined(SEEK_HOLE) && defined(SEEK_DATA) && !defined(HAVE_LSEEK_EXECUTE)
265 static inline loff_t
266 lseek_execute(
267 struct file *filp,
268 struct inode *inode,
269 loff_t offset,
270 loff_t maxsize)
271 {
272 if (offset < 0 && !(filp->f_mode & FMODE_UNSIGNED_OFFSET))
273 return (-EINVAL);
274
275 if (offset > maxsize)
276 return (-EINVAL);
277
278 if (offset != filp->f_pos) {
279 spin_lock(&filp->f_lock);
280 filp->f_pos = offset;
281 filp->f_version = 0;
282 spin_unlock(&filp->f_lock);
283 }
284
285 return (offset);
286 }
287 #endif /* SEEK_HOLE && SEEK_DATA && !HAVE_LSEEK_EXECUTE */
288
289 #if defined(CONFIG_FS_POSIX_ACL)
290 /*
291 * These functions safely approximates the behavior of posix_acl_release()
292 * which cannot be used because it calls the GPL-only symbol kfree_rcu().
293 * The in-kernel version, which can access the RCU, frees the ACLs after
294 * the grace period expires. Because we're unsure how long that grace
295 * period may be this implementation conservatively delays for 60 seconds.
296 * This is several orders of magnitude larger than expected grace period.
297 * At 60 seconds the kernel will also begin issuing RCU stall warnings.
298 */
299 #ifdef refcount_t
300 #undef refcount_t
301 #endif
302
303 #include <linux/posix_acl.h>
304
305 #if defined(HAVE_POSIX_ACL_RELEASE) && !defined(HAVE_POSIX_ACL_RELEASE_GPL_ONLY)
306 #define zpl_posix_acl_release(arg) posix_acl_release(arg)
307 #else
308 void zpl_posix_acl_release_impl(struct posix_acl *);
309
310 static inline void
311 zpl_posix_acl_release(struct posix_acl *acl)
312 {
313 if ((acl == NULL) || (acl == ACL_NOT_CACHED))
314 return;
315 #ifdef HAVE_ACL_REFCOUNT
316 if (refcount_dec_and_test(&acl->a_refcount))
317 zpl_posix_acl_release_impl(acl);
318 #else
319 if (atomic_dec_and_test(&acl->a_refcount))
320 zpl_posix_acl_release_impl(acl);
321 #endif
322 }
323 #endif /* HAVE_POSIX_ACL_RELEASE */
324
325 #ifdef HAVE_SET_CACHED_ACL_USABLE
326 #define zpl_set_cached_acl(ip, ty, n) set_cached_acl(ip, ty, n)
327 #define zpl_forget_cached_acl(ip, ty) forget_cached_acl(ip, ty)
328 #else
329 static inline void
330 zpl_set_cached_acl(struct inode *ip, int type, struct posix_acl *newer)
331 {
332 struct posix_acl *older = NULL;
333
334 spin_lock(&ip->i_lock);
335
336 if ((newer != ACL_NOT_CACHED) && (newer != NULL))
337 posix_acl_dup(newer);
338
339 switch (type) {
340 case ACL_TYPE_ACCESS:
341 older = ip->i_acl;
342 rcu_assign_pointer(ip->i_acl, newer);
343 break;
344 case ACL_TYPE_DEFAULT:
345 older = ip->i_default_acl;
346 rcu_assign_pointer(ip->i_default_acl, newer);
347 break;
348 }
349
350 spin_unlock(&ip->i_lock);
351
352 zpl_posix_acl_release(older);
353 }
354
355 static inline void
356 zpl_forget_cached_acl(struct inode *ip, int type)
357 {
358 zpl_set_cached_acl(ip, type, (struct posix_acl *)ACL_NOT_CACHED);
359 }
360 #endif /* HAVE_SET_CACHED_ACL_USABLE */
361
362 #ifndef HAVE___POSIX_ACL_CHMOD
363 #ifdef HAVE_POSIX_ACL_CHMOD
364 #define __posix_acl_chmod(acl, gfp, mode) posix_acl_chmod(acl, gfp, mode)
365 #define __posix_acl_create(acl, gfp, mode) posix_acl_create(acl, gfp, mode)
366 #else
367 static inline int
368 __posix_acl_chmod(struct posix_acl **acl, int flags, umode_t umode)
369 {
370 struct posix_acl *oldacl = *acl;
371 mode_t mode = umode;
372 int error;
373
374 *acl = posix_acl_clone(*acl, flags);
375 zpl_posix_acl_release(oldacl);
376
377 if (!(*acl))
378 return (-ENOMEM);
379
380 error = posix_acl_chmod_masq(*acl, mode);
381 if (error) {
382 zpl_posix_acl_release(*acl);
383 *acl = NULL;
384 }
385
386 return (error);
387 }
388
389 static inline int
390 __posix_acl_create(struct posix_acl **acl, int flags, umode_t *umodep)
391 {
392 struct posix_acl *oldacl = *acl;
393 mode_t mode = *umodep;
394 int error;
395
396 *acl = posix_acl_clone(*acl, flags);
397 zpl_posix_acl_release(oldacl);
398
399 if (!(*acl))
400 return (-ENOMEM);
401
402 error = posix_acl_create_masq(*acl, &mode);
403 *umodep = mode;
404
405 if (error < 0) {
406 zpl_posix_acl_release(*acl);
407 *acl = NULL;
408 }
409
410 return (error);
411 }
412 #endif /* HAVE_POSIX_ACL_CHMOD */
413 #endif /* HAVE___POSIX_ACL_CHMOD */
414
415 #ifdef HAVE_POSIX_ACL_EQUIV_MODE_UMODE_T
416 typedef umode_t zpl_equivmode_t;
417 #else
418 typedef mode_t zpl_equivmode_t;
419 #endif /* HAVE_POSIX_ACL_EQUIV_MODE_UMODE_T */
420
421 /*
422 * 4.8 API change,
423 * posix_acl_valid() now must be passed a namespace, the namespace from
424 * from super block associated with the given inode is used for this purpose.
425 */
426 #ifdef HAVE_POSIX_ACL_VALID_WITH_NS
427 #define zpl_posix_acl_valid(ip, acl) posix_acl_valid(ip->i_sb->s_user_ns, acl)
428 #else
429 #define zpl_posix_acl_valid(ip, acl) posix_acl_valid(acl)
430 #endif
431
432 #define refcount_t zfs_refcount_t
433
434 #endif /* CONFIG_FS_POSIX_ACL */
435
436 /*
437 * 2.6.38 API change,
438 * The is_owner_or_cap() function was renamed to inode_owner_or_capable().
439 */
440 #ifdef HAVE_INODE_OWNER_OR_CAPABLE
441 #define zpl_inode_owner_or_capable(ip) inode_owner_or_capable(ip)
442 #else
443 #define zpl_inode_owner_or_capable(ip) is_owner_or_cap(ip)
444 #endif /* HAVE_INODE_OWNER_OR_CAPABLE */
445
446 /*
447 * 3.19 API change
448 * struct access f->f_dentry->d_inode was replaced by accessor function
449 * file_inode(f)
450 */
451 #ifndef HAVE_FILE_INODE
452 static inline struct inode *file_inode(const struct file *f)
453 {
454 return (f->f_dentry->d_inode);
455 }
456 #endif /* HAVE_FILE_INODE */
457
458 /*
459 * 4.1 API change
460 * struct access file->f_path.dentry was replaced by accessor function
461 * file_dentry(f)
462 */
463 #ifndef HAVE_FILE_DENTRY
464 static inline struct dentry *file_dentry(const struct file *f)
465 {
466 return (f->f_path.dentry);
467 }
468 #endif /* HAVE_FILE_DENTRY */
469
470 #ifdef HAVE_KUID_HELPERS
471 static inline uid_t zfs_uid_read_impl(struct inode *ip)
472 {
473 #ifdef HAVE_SUPER_USER_NS
474 return (from_kuid(ip->i_sb->s_user_ns, ip->i_uid));
475 #else
476 return (from_kuid(kcred->user_ns, ip->i_uid));
477 #endif
478 }
479
480 static inline uid_t zfs_uid_read(struct inode *ip)
481 {
482 return (zfs_uid_read_impl(ip));
483 }
484
485 static inline gid_t zfs_gid_read_impl(struct inode *ip)
486 {
487 #ifdef HAVE_SUPER_USER_NS
488 return (from_kgid(ip->i_sb->s_user_ns, ip->i_gid));
489 #else
490 return (from_kgid(kcred->user_ns, ip->i_gid));
491 #endif
492 }
493
494 static inline gid_t zfs_gid_read(struct inode *ip)
495 {
496 return (zfs_gid_read_impl(ip));
497 }
498
499 static inline void zfs_uid_write(struct inode *ip, uid_t uid)
500 {
501 #ifdef HAVE_SUPER_USER_NS
502 ip->i_uid = make_kuid(ip->i_sb->s_user_ns, uid);
503 #else
504 ip->i_uid = make_kuid(kcred->user_ns, uid);
505 #endif
506 }
507
508 static inline void zfs_gid_write(struct inode *ip, gid_t gid)
509 {
510 #ifdef HAVE_SUPER_USER_NS
511 ip->i_gid = make_kgid(ip->i_sb->s_user_ns, gid);
512 #else
513 ip->i_gid = make_kgid(kcred->user_ns, gid);
514 #endif
515 }
516
517 #else
518 static inline uid_t zfs_uid_read(struct inode *ip)
519 {
520 return (ip->i_uid);
521 }
522
523 static inline gid_t zfs_gid_read(struct inode *ip)
524 {
525 return (ip->i_gid);
526 }
527
528 static inline void zfs_uid_write(struct inode *ip, uid_t uid)
529 {
530 ip->i_uid = uid;
531 }
532
533 static inline void zfs_gid_write(struct inode *ip, gid_t gid)
534 {
535 ip->i_gid = gid;
536 }
537 #endif
538
539 /*
540 * 2.6.38 API change
541 */
542 #ifdef HAVE_FOLLOW_DOWN_ONE
543 #define zpl_follow_down_one(path) follow_down_one(path)
544 #define zpl_follow_up(path) follow_up(path)
545 #else
546 #define zpl_follow_down_one(path) follow_down(path)
547 #define zpl_follow_up(path) follow_up(path)
548 #endif
549
550 /*
551 * 4.9 API change
552 */
553 #ifndef HAVE_SETATTR_PREPARE
554 static inline int
555 setattr_prepare(struct dentry *dentry, struct iattr *ia)
556 {
557 return (inode_change_ok(dentry->d_inode, ia));
558 }
559 #endif
560
561 /*
562 * 4.11 API change
563 * These macros are defined by kernel 4.11. We define them so that the same
564 * code builds under kernels < 4.11 and >= 4.11. The macros are set to 0 so
565 * that it will create obvious failures if they are accidentally used when built
566 * against a kernel >= 4.11.
567 */
568
569 #ifndef STATX_BASIC_STATS
570 #define STATX_BASIC_STATS 0
571 #endif
572
573 #ifndef AT_STATX_SYNC_AS_STAT
574 #define AT_STATX_SYNC_AS_STAT 0
575 #endif
576
577 /*
578 * 4.11 API change
579 * 4.11 takes struct path *, < 4.11 takes vfsmount *
580 */
581
582 #ifdef HAVE_VFSMOUNT_IOPS_GETATTR
583 #define ZPL_GETATTR_WRAPPER(func) \
584 static int \
585 func(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) \
586 { \
587 struct path path = { .mnt = mnt, .dentry = dentry }; \
588 return func##_impl(&path, stat, STATX_BASIC_STATS, \
589 AT_STATX_SYNC_AS_STAT); \
590 }
591 #elif defined(HAVE_PATH_IOPS_GETATTR)
592 #define ZPL_GETATTR_WRAPPER(func) \
593 static int \
594 func(const struct path *path, struct kstat *stat, u32 request_mask, \
595 unsigned int query_flags) \
596 { \
597 return (func##_impl(path, stat, request_mask, query_flags)); \
598 }
599 #else
600 #error
601 #endif
602
603 /*
604 * 4.9 API change
605 * Preferred interface to get the current FS time.
606 */
607 #if !defined(HAVE_CURRENT_TIME)
608 static inline struct timespec
609 current_time(struct inode *ip)
610 {
611 return (timespec_trunc(current_kernel_time(), ip->i_sb->s_time_gran));
612 }
613 #endif
614
615 /*
616 * 4.16 API change
617 * Added iversion interface for managing inode version field.
618 */
619 #ifdef HAVE_INODE_SET_IVERSION
620 #include <linux/iversion.h>
621 #else
622 static inline void
623 inode_set_iversion(struct inode *ip, u64 val)
624 {
625 ip->i_version = val;
626 }
627 #endif
628
629 #endif /* _ZFS_VFS_H */