4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
24 * Copyright (C) 2015 Jörg Thalheim.
30 #include <sys/taskq.h>
31 #include <linux/backing-dev.h>
35 * Added insert_inode_locked() helper function, prior to this most callers
36 * used insert_inode_hash(). The older method doesn't check for collisions
37 * in the inode_hashtable but it still acceptible for use.
39 #ifndef HAVE_INSERT_INODE_LOCKED
41 insert_inode_locked(struct inode
*ip
)
43 insert_inode_hash(ip
);
46 #endif /* HAVE_INSERT_INODE_LOCKED */
50 * Add truncate_setsize() if it is not exported by the Linux kernel.
52 * Truncate the inode and pages associated with the inode. The pages are
53 * unmapped and removed from cache.
55 #ifndef HAVE_TRUNCATE_SETSIZE
57 truncate_setsize(struct inode
*ip
, loff_t
new)
59 struct address_space
*mapping
= ip
->i_mapping
;
61 i_size_write(ip
, new);
63 unmap_mapping_range(mapping
, new + PAGE_SIZE
- 1, 0, 1);
64 truncate_inode_pages(mapping
, new);
65 unmap_mapping_range(mapping
, new + PAGE_SIZE
- 1, 0, 1);
67 #endif /* HAVE_TRUNCATE_SETSIZE */
70 * 2.6.32 - 2.6.33, bdi_setup_and_register() is not available.
71 * 2.6.34 - 3.19, bdi_setup_and_register() takes 3 arguments.
72 * 4.0 - x.y, bdi_setup_and_register() takes 2 arguments.
74 #if defined(HAVE_2ARGS_BDI_SETUP_AND_REGISTER)
76 zpl_bdi_setup_and_register(struct backing_dev_info
*bdi
, char *name
)
78 return (bdi_setup_and_register(bdi
, name
));
80 #elif defined(HAVE_3ARGS_BDI_SETUP_AND_REGISTER)
82 zpl_bdi_setup_and_register(struct backing_dev_info
*bdi
, char *name
)
84 return (bdi_setup_and_register(bdi
, name
, BDI_CAP_MAP_COPY
));
87 extern atomic_long_t zfs_bdi_seq
;
90 zpl_bdi_setup_and_register(struct backing_dev_info
*bdi
, char *name
)
96 bdi
->capabilities
= BDI_CAP_MAP_COPY
;
98 error
= bdi_init(bdi
);
102 sprintf(tmp
, "%.28s%s", name
, "-%d");
103 error
= bdi_register(bdi
, NULL
, tmp
,
104 atomic_long_inc_return(&zfs_bdi_seq
));
116 * LOOKUP_RCU flag introduced to distinguish rcu-walk from ref-walk cases.
119 #define LOOKUP_RCU 0x0
120 #endif /* LOOKUP_RCU */
123 * 3.2-rc1 API change,
124 * Add set_nlink() if it is not exported by the Linux kernel.
126 * i_nlink is read-only in Linux 3.2, but it can be set directly in
129 #ifndef HAVE_SET_NLINK
131 set_nlink(struct inode
*inode
, unsigned int nlink
)
133 inode
->i_nlink
= nlink
;
135 #endif /* HAVE_SET_NLINK */
139 * The VFS .create, .mkdir and .mknod callbacks were updated to take a
140 * umode_t type rather than an int. To cleanly handle both definitions
141 * the zpl_umode_t type is introduced and set accordingly.
143 #ifdef HAVE_MKDIR_UMODE_T
144 typedef umode_t zpl_umode_t
;
146 typedef int zpl_umode_t
;
151 * The clear_inode() function replaces end_writeback() and introduces an
152 * ordering change regarding when the inode_sync_wait() occurs. See the
153 * configure check in config/kernel-clear-inode.m4 for full details.
155 #if defined(HAVE_EVICT_INODE) && !defined(HAVE_CLEAR_INODE)
156 #define clear_inode(ip) end_writeback(ip)
157 #endif /* HAVE_EVICT_INODE && !HAVE_CLEAR_INODE */
161 * The sget() helper function now takes the mount flags as an argument.
163 #ifdef HAVE_5ARG_SGET
164 #define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, fl, mtd)
166 #define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, mtd)
167 #endif /* HAVE_5ARG_SGET */
169 #if defined(SEEK_HOLE) && defined(SEEK_DATA) && !defined(HAVE_LSEEK_EXECUTE)
177 if (offset
< 0 && !(filp
->f_mode
& FMODE_UNSIGNED_OFFSET
))
180 if (offset
> maxsize
)
183 if (offset
!= filp
->f_pos
) {
184 spin_lock(&filp
->f_lock
);
185 filp
->f_pos
= offset
;
187 spin_unlock(&filp
->f_lock
);
192 #endif /* SEEK_HOLE && SEEK_DATA && !HAVE_LSEEK_EXECUTE */
194 #if defined(CONFIG_FS_POSIX_ACL)
196 * These functions safely approximates the behavior of posix_acl_release()
197 * which cannot be used because it calls the GPL-only symbol kfree_rcu().
198 * The in-kernel version, which can access the RCU, frees the ACLs after
199 * the grace period expires. Because we're unsure how long that grace
200 * period may be this implementation conservatively delays for 60 seconds.
201 * This is several orders of magnitude larger than expected grace period.
202 * At 60 seconds the kernel will also begin issuing RCU stall warnings.
204 #include <linux/posix_acl.h>
206 #if defined(HAVE_POSIX_ACL_RELEASE) && !defined(HAVE_POSIX_ACL_RELEASE_GPL_ONLY)
207 #define zpl_posix_acl_release(arg) posix_acl_release(arg)
209 void zpl_posix_acl_release_impl(struct posix_acl
*);
212 zpl_posix_acl_release(struct posix_acl
*acl
)
214 if ((acl
== NULL
) || (acl
== ACL_NOT_CACHED
))
217 if (atomic_dec_and_test(&acl
->a_refcount
))
218 zpl_posix_acl_release_impl(acl
);
220 #endif /* HAVE_POSIX_ACL_RELEASE */
222 #ifdef HAVE_SET_CACHED_ACL_USABLE
223 #define zpl_set_cached_acl(ip, ty, n) set_cached_acl(ip, ty, n)
224 #define zpl_forget_cached_acl(ip, ty) forget_cached_acl(ip, ty)
227 zpl_set_cached_acl(struct inode
*ip
, int type
, struct posix_acl
*newer
) {
228 struct posix_acl
*older
= NULL
;
230 spin_lock(&ip
->i_lock
);
232 if ((newer
!= ACL_NOT_CACHED
) && (newer
!= NULL
))
233 posix_acl_dup(newer
);
236 case ACL_TYPE_ACCESS
:
238 rcu_assign_pointer(ip
->i_acl
, newer
);
240 case ACL_TYPE_DEFAULT
:
241 older
= ip
->i_default_acl
;
242 rcu_assign_pointer(ip
->i_default_acl
, newer
);
246 spin_unlock(&ip
->i_lock
);
248 zpl_posix_acl_release(older
);
252 zpl_forget_cached_acl(struct inode
*ip
, int type
) {
253 zpl_set_cached_acl(ip
, type
, (struct posix_acl
*)ACL_NOT_CACHED
);
255 #endif /* HAVE_SET_CACHED_ACL_USABLE */
257 #ifndef HAVE___POSIX_ACL_CHMOD
258 #ifdef HAVE_POSIX_ACL_CHMOD
259 #define __posix_acl_chmod(acl, gfp, mode) posix_acl_chmod(acl, gfp, mode)
260 #define __posix_acl_create(acl, gfp, mode) posix_acl_create(acl, gfp, mode)
263 __posix_acl_chmod(struct posix_acl
**acl
, int flags
, umode_t umode
) {
264 struct posix_acl
*oldacl
= *acl
;
268 *acl
= posix_acl_clone(*acl
, flags
);
269 zpl_posix_acl_release(oldacl
);
274 error
= posix_acl_chmod_masq(*acl
, mode
);
276 zpl_posix_acl_release(*acl
);
284 __posix_acl_create(struct posix_acl
**acl
, int flags
, umode_t
*umodep
) {
285 struct posix_acl
*oldacl
= *acl
;
286 mode_t mode
= *umodep
;
289 *acl
= posix_acl_clone(*acl
, flags
);
290 zpl_posix_acl_release(oldacl
);
295 error
= posix_acl_create_masq(*acl
, &mode
);
299 zpl_posix_acl_release(*acl
);
305 #endif /* HAVE_POSIX_ACL_CHMOD */
306 #endif /* HAVE___POSIX_ACL_CHMOD */
308 #ifdef HAVE_POSIX_ACL_EQUIV_MODE_UMODE_T
309 typedef umode_t zpl_equivmode_t
;
311 typedef mode_t zpl_equivmode_t
;
312 #endif /* HAVE_POSIX_ACL_EQUIV_MODE_UMODE_T */
316 * posix_acl_valid() now must be passed a namespace, the namespace from
317 * from super block associated with the given inode is used for this purpose.
319 #ifdef HAVE_POSIX_ACL_VALID_WITH_NS
320 #define zpl_posix_acl_valid(ip, acl) posix_acl_valid(ip->i_sb->s_user_ns, acl)
322 #define zpl_posix_acl_valid(ip, acl) posix_acl_valid(acl)
325 #endif /* CONFIG_FS_POSIX_ACL */
329 * The is_owner_or_cap() function was renamed to inode_owner_or_capable().
331 #ifdef HAVE_INODE_OWNER_OR_CAPABLE
332 #define zpl_inode_owner_or_capable(ip) inode_owner_or_capable(ip)
334 #define zpl_inode_owner_or_capable(ip) is_owner_or_cap(ip)
335 #endif /* HAVE_INODE_OWNER_OR_CAPABLE */
339 * struct access f->f_dentry->d_inode was replaced by accessor function
342 #ifndef HAVE_FILE_INODE
343 static inline struct inode
*file_inode(const struct file
*f
)
345 return (f
->f_dentry
->d_inode
);
347 #endif /* HAVE_FILE_INODE */
352 #ifdef HAVE_FOLLOW_DOWN_ONE
353 #define zpl_follow_down_one(path) follow_down_one(path)
354 #define zpl_follow_up(path) follow_up(path)
356 #define zpl_follow_down_one(path) follow_down(path)
357 #define zpl_follow_up(path) follow_up(path)
363 #ifndef HAVE_SETATTR_PREPARE
365 setattr_prepare(struct dentry
*dentry
, struct iattr
*ia
)
367 return (inode_change_ok(dentry
->d_inode
, ia
));
373 * These macros are defined by kernel 4.11. We define them so that the same
374 * code builds under kernels < 4.11 and >= 4.11. The macros are set to 0 so
375 * that it will create obvious failures if they are accidentally used when built
376 * against a kernel >= 4.11.
379 #ifndef STATX_BASIC_STATS
380 #define STATX_BASIC_STATS 0
383 #ifndef AT_STATX_SYNC_AS_STAT
384 #define AT_STATX_SYNC_AS_STAT 0
389 * 4.11 takes struct path *, < 4.11 takes vfsmount *
392 #ifdef HAVE_VFSMOUNT_IOPS_GETATTR
393 #define ZPL_GETATTR_WRAPPER(func) \
395 func(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) \
397 struct path path = { .mnt = mnt, .dentry = dentry }; \
398 return func##_impl(&path, stat, STATX_BASIC_STATS, \
399 AT_STATX_SYNC_AS_STAT); \
401 #elif defined(HAVE_PATH_IOPS_GETATTR)
402 #define ZPL_GETATTR_WRAPPER(func) \
404 func(const struct path *path, struct kstat *stat, u32 request_mask, \
405 unsigned int query_flags) \
407 return (func##_impl(path, stat, request_mask, query_flags)); \
414 #endif /* _ZFS_VFS_H */