]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - zfs/include/linux/vfs_compat.h
UBUNTU: SAUCE: (noup) Update spl to 0.6.5.9-1ubuntu2, zfs to 0.6.5.9-5ubuntu7
[mirror_ubuntu-artful-kernel.git] / zfs / include / linux / vfs_compat.h
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
24 * Copyright (C) 2015 Jörg Thalheim.
25 */
26
27 #ifndef _ZFS_VFS_H
28 #define _ZFS_VFS_H
29
30 #include <sys/taskq.h>
31 #include <linux/backing-dev.h>
32
33 /*
34 * 2.6.28 API change,
35 * Added insert_inode_locked() helper function, prior to this most callers
36 * used insert_inode_hash(). The older method doesn't check for collisions
37 * in the inode_hashtable but it still acceptible for use.
38 */
39 #ifndef HAVE_INSERT_INODE_LOCKED
40 static inline int
41 insert_inode_locked(struct inode *ip)
42 {
43 insert_inode_hash(ip);
44 return (0);
45 }
46 #endif /* HAVE_INSERT_INODE_LOCKED */
47
48 /*
49 * 2.6.35 API change,
50 * Add truncate_setsize() if it is not exported by the Linux kernel.
51 *
52 * Truncate the inode and pages associated with the inode. The pages are
53 * unmapped and removed from cache.
54 */
55 #ifndef HAVE_TRUNCATE_SETSIZE
56 static inline void
57 truncate_setsize(struct inode *ip, loff_t new)
58 {
59 struct address_space *mapping = ip->i_mapping;
60
61 i_size_write(ip, new);
62
63 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
64 truncate_inode_pages(mapping, new);
65 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
66 }
67 #endif /* HAVE_TRUNCATE_SETSIZE */
68
69 /*
70 * 2.6.32 - 2.6.33, bdi_setup_and_register() is not available.
71 * 2.6.34 - 3.19, bdi_setup_and_register() takes 3 arguments.
72 * 4.0 - 4.11, bdi_setup_and_register() takes 2 arguments.
73 * 4.12 - x.y, super_setup_bdi_name() new interface.
74 */
75 #if defined(HAVE_SUPER_SETUP_BDI_NAME)
76 extern atomic_long_t zfs_bdi_seq;
77
78 static inline int
79 zpl_bdi_setup(struct super_block *sb, char *name)
80 {
81 return super_setup_bdi_name(sb, "%.28s-%ld", name,
82 atomic_long_inc_return(&zfs_bdi_seq));
83 }
84 static inline void
85 zpl_bdi_destroy(struct super_block *sb)
86 {
87 }
88 #elif defined(HAVE_2ARGS_BDI_SETUP_AND_REGISTER)
89 static inline int
90 zpl_bdi_setup(struct super_block *sb, char *name)
91 {
92 struct backing_dev_info *bdi;
93 int error;
94
95 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
96 error = bdi_setup_and_register(bdi, name);
97 if (error) {
98 kmem_free(bdi, sizeof (struct backing_dev_info));
99 return (error);
100 }
101
102 sb->s_bdi = bdi;
103
104 return (0);
105 }
106 static inline void
107 zpl_bdi_destroy(struct super_block *sb)
108 {
109 struct backing_dev_info *bdi = sb->s_bdi;
110
111 bdi_destroy(bdi);
112 kmem_free(bdi, sizeof (struct backing_dev_info));
113 sb->s_bdi = NULL;
114 }
115 #elif defined(HAVE_3ARGS_BDI_SETUP_AND_REGISTER)
116 static inline int
117 zpl_bdi_setup(struct super_block *sb, char *name)
118 {
119 struct backing_dev_info *bdi;
120 int error;
121
122 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
123 error = bdi_setup_and_register(bdi, name, BDI_CAP_MAP_COPY);
124 if (error) {
125 kmem_free(sb->s_bdi, sizeof (struct backing_dev_info));
126 return (error);
127 }
128
129 sb->s_bdi = bdi;
130
131 return (0);
132 }
133 static inline void
134 zpl_bdi_destroy(struct super_block *sb)
135 {
136 struct backing_dev_info *bdi = sb->s_bdi;
137
138 bdi_destroy(bdi);
139 kmem_free(bdi, sizeof (struct backing_dev_info));
140 sb->s_bdi = NULL;
141 }
142 #else
143 extern atomic_long_t zfs_bdi_seq;
144
145 static inline int
146 zpl_bdi_setup(struct super_block *sb, char *name)
147 {
148 struct backing_dev_info *bdi;
149 int error;
150
151 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
152 bdi->name = name;
153 bdi->capabilities = BDI_CAP_MAP_COPY;
154
155 error = bdi_init(bdi);
156 if (error) {
157 kmem_free(bdi, sizeof (struct backing_dev_info));
158 return (error);
159 }
160
161 error = bdi_register(bdi, NULL, "%.28s-%ld", name,
162 atomic_long_inc_return(&zfs_bdi_seq));
163 if (error) {
164 bdi_destroy(bdi);
165 kmem_free(bdi, sizeof (struct backing_dev_info));
166 return (error);
167 }
168
169 sb->s_bdi = bdi;
170
171 return (0);
172 }
173 static inline void
174 zpl_bdi_destroy(struct super_block *sb)
175 {
176 struct backing_dev_info *bdi = sb->s_bdi;
177
178 bdi_destroy(bdi);
179 kmem_free(bdi, sizeof (struct backing_dev_info));
180 sb->s_bdi = NULL;
181 }
182 #endif
183
184 /*
185 * 2.6.38 API change,
186 * LOOKUP_RCU flag introduced to distinguish rcu-walk from ref-walk cases.
187 */
188 #ifndef LOOKUP_RCU
189 #define LOOKUP_RCU 0x0
190 #endif /* LOOKUP_RCU */
191
192 /*
193 * 3.2-rc1 API change,
194 * Add set_nlink() if it is not exported by the Linux kernel.
195 *
196 * i_nlink is read-only in Linux 3.2, but it can be set directly in
197 * earlier kernels.
198 */
199 #ifndef HAVE_SET_NLINK
200 static inline void
201 set_nlink(struct inode *inode, unsigned int nlink)
202 {
203 inode->i_nlink = nlink;
204 }
205 #endif /* HAVE_SET_NLINK */
206
207 /*
208 * 3.3 API change,
209 * The VFS .create, .mkdir and .mknod callbacks were updated to take a
210 * umode_t type rather than an int. To cleanly handle both definitions
211 * the zpl_umode_t type is introduced and set accordingly.
212 */
213 #ifdef HAVE_MKDIR_UMODE_T
214 typedef umode_t zpl_umode_t;
215 #else
216 typedef int zpl_umode_t;
217 #endif
218
219 /*
220 * 3.5 API change,
221 * The clear_inode() function replaces end_writeback() and introduces an
222 * ordering change regarding when the inode_sync_wait() occurs. See the
223 * configure check in config/kernel-clear-inode.m4 for full details.
224 */
225 #if defined(HAVE_EVICT_INODE) && !defined(HAVE_CLEAR_INODE)
226 #define clear_inode(ip) end_writeback(ip)
227 #endif /* HAVE_EVICT_INODE && !HAVE_CLEAR_INODE */
228
229 /*
230 * 3.6 API change,
231 * The sget() helper function now takes the mount flags as an argument.
232 */
233 #ifdef HAVE_5ARG_SGET
234 #define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, fl, mtd)
235 #else
236 #define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, mtd)
237 #endif /* HAVE_5ARG_SGET */
238
239 #if defined(SEEK_HOLE) && defined(SEEK_DATA) && !defined(HAVE_LSEEK_EXECUTE)
240 static inline loff_t
241 lseek_execute(
242 struct file *filp,
243 struct inode *inode,
244 loff_t offset,
245 loff_t maxsize)
246 {
247 if (offset < 0 && !(filp->f_mode & FMODE_UNSIGNED_OFFSET))
248 return (-EINVAL);
249
250 if (offset > maxsize)
251 return (-EINVAL);
252
253 if (offset != filp->f_pos) {
254 spin_lock(&filp->f_lock);
255 filp->f_pos = offset;
256 filp->f_version = 0;
257 spin_unlock(&filp->f_lock);
258 }
259
260 return (offset);
261 }
262 #endif /* SEEK_HOLE && SEEK_DATA && !HAVE_LSEEK_EXECUTE */
263
264 #if defined(CONFIG_FS_POSIX_ACL)
265 /*
266 * These functions safely approximates the behavior of posix_acl_release()
267 * which cannot be used because it calls the GPL-only symbol kfree_rcu().
268 * The in-kernel version, which can access the RCU, frees the ACLs after
269 * the grace period expires. Because we're unsure how long that grace
270 * period may be this implementation conservatively delays for 60 seconds.
271 * This is several orders of magnitude larger than expected grace period.
272 * At 60 seconds the kernel will also begin issuing RCU stall warnings.
273 */
274 #include <linux/posix_acl.h>
275
276 #if defined(HAVE_POSIX_ACL_RELEASE) && !defined(HAVE_POSIX_ACL_RELEASE_GPL_ONLY)
277 #define zpl_posix_acl_release(arg) posix_acl_release(arg)
278 #else
279 void zpl_posix_acl_release_impl(struct posix_acl *);
280
281 static inline void
282 zpl_posix_acl_release(struct posix_acl *acl)
283 {
284 if ((acl == NULL) || (acl == ACL_NOT_CACHED))
285 return;
286
287 if (atomic_dec_and_test(&acl->a_refcount))
288 zpl_posix_acl_release_impl(acl);
289 }
290 #endif /* HAVE_POSIX_ACL_RELEASE */
291
292 #ifdef HAVE_SET_CACHED_ACL_USABLE
293 #define zpl_set_cached_acl(ip, ty, n) set_cached_acl(ip, ty, n)
294 #define zpl_forget_cached_acl(ip, ty) forget_cached_acl(ip, ty)
295 #else
296 static inline void
297 zpl_set_cached_acl(struct inode *ip, int type, struct posix_acl *newer) {
298 struct posix_acl *older = NULL;
299
300 spin_lock(&ip->i_lock);
301
302 if ((newer != ACL_NOT_CACHED) && (newer != NULL))
303 posix_acl_dup(newer);
304
305 switch (type) {
306 case ACL_TYPE_ACCESS:
307 older = ip->i_acl;
308 rcu_assign_pointer(ip->i_acl, newer);
309 break;
310 case ACL_TYPE_DEFAULT:
311 older = ip->i_default_acl;
312 rcu_assign_pointer(ip->i_default_acl, newer);
313 break;
314 }
315
316 spin_unlock(&ip->i_lock);
317
318 zpl_posix_acl_release(older);
319 }
320
321 static inline void
322 zpl_forget_cached_acl(struct inode *ip, int type) {
323 zpl_set_cached_acl(ip, type, (struct posix_acl *)ACL_NOT_CACHED);
324 }
325 #endif /* HAVE_SET_CACHED_ACL_USABLE */
326
327 #ifndef HAVE___POSIX_ACL_CHMOD
328 #ifdef HAVE_POSIX_ACL_CHMOD
329 #define __posix_acl_chmod(acl, gfp, mode) posix_acl_chmod(acl, gfp, mode)
330 #define __posix_acl_create(acl, gfp, mode) posix_acl_create(acl, gfp, mode)
331 #else
332 static inline int
333 __posix_acl_chmod(struct posix_acl **acl, int flags, umode_t umode) {
334 struct posix_acl *oldacl = *acl;
335 mode_t mode = umode;
336 int error;
337
338 *acl = posix_acl_clone(*acl, flags);
339 zpl_posix_acl_release(oldacl);
340
341 if (!(*acl))
342 return (-ENOMEM);
343
344 error = posix_acl_chmod_masq(*acl, mode);
345 if (error) {
346 zpl_posix_acl_release(*acl);
347 *acl = NULL;
348 }
349
350 return (error);
351 }
352
353 static inline int
354 __posix_acl_create(struct posix_acl **acl, int flags, umode_t *umodep) {
355 struct posix_acl *oldacl = *acl;
356 mode_t mode = *umodep;
357 int error;
358
359 *acl = posix_acl_clone(*acl, flags);
360 zpl_posix_acl_release(oldacl);
361
362 if (!(*acl))
363 return (-ENOMEM);
364
365 error = posix_acl_create_masq(*acl, &mode);
366 *umodep = mode;
367
368 if (error < 0) {
369 zpl_posix_acl_release(*acl);
370 *acl = NULL;
371 }
372
373 return (error);
374 }
375 #endif /* HAVE_POSIX_ACL_CHMOD */
376 #endif /* HAVE___POSIX_ACL_CHMOD */
377
378 #ifdef HAVE_POSIX_ACL_EQUIV_MODE_UMODE_T
379 typedef umode_t zpl_equivmode_t;
380 #else
381 typedef mode_t zpl_equivmode_t;
382 #endif /* HAVE_POSIX_ACL_EQUIV_MODE_UMODE_T */
383
384 /*
385 * 4.8 API change,
386 * posix_acl_valid() now must be passed a namespace, the namespace from
387 * from super block associated with the given inode is used for this purpose.
388 */
389 #ifdef HAVE_POSIX_ACL_VALID_WITH_NS
390 #define zpl_posix_acl_valid(ip, acl) posix_acl_valid(ip->i_sb->s_user_ns, acl)
391 #else
392 #define zpl_posix_acl_valid(ip, acl) posix_acl_valid(acl)
393 #endif
394
395 #endif /* CONFIG_FS_POSIX_ACL */
396
397 /*
398 * 2.6.38 API change,
399 * The is_owner_or_cap() function was renamed to inode_owner_or_capable().
400 */
401 #ifdef HAVE_INODE_OWNER_OR_CAPABLE
402 #define zpl_inode_owner_or_capable(ip) inode_owner_or_capable(ip)
403 #else
404 #define zpl_inode_owner_or_capable(ip) is_owner_or_cap(ip)
405 #endif /* HAVE_INODE_OWNER_OR_CAPABLE */
406
407 /*
408 * 3.19 API change
409 * struct access f->f_dentry->d_inode was replaced by accessor function
410 * file_inode(f)
411 */
412 #ifndef HAVE_FILE_INODE
413 static inline struct inode *file_inode(const struct file *f)
414 {
415 return (f->f_dentry->d_inode);
416 }
417 #endif /* HAVE_FILE_INODE */
418
419 /*
420 * 2.6.38 API change
421 */
422 #ifdef HAVE_FOLLOW_DOWN_ONE
423 #define zpl_follow_down_one(path) follow_down_one(path)
424 #define zpl_follow_up(path) follow_up(path)
425 #else
426 #define zpl_follow_down_one(path) follow_down(path)
427 #define zpl_follow_up(path) follow_up(path)
428 #endif
429
430 /*
431 * 4.9 API change
432 */
433 #ifndef HAVE_SETATTR_PREPARE
434 static inline int
435 setattr_prepare(struct dentry *dentry, struct iattr *ia)
436 {
437 return (inode_change_ok(dentry->d_inode, ia));
438 }
439 #endif
440
441 /*
442 * 4.11 API change
443 * These macros are defined by kernel 4.11. We define them so that the same
444 * code builds under kernels < 4.11 and >= 4.11. The macros are set to 0 so
445 * that it will create obvious failures if they are accidentally used when built
446 * against a kernel >= 4.11.
447 */
448
449 #ifndef STATX_BASIC_STATS
450 #define STATX_BASIC_STATS 0
451 #endif
452
453 #ifndef AT_STATX_SYNC_AS_STAT
454 #define AT_STATX_SYNC_AS_STAT 0
455 #endif
456
457 /*
458 * 4.11 API change
459 * 4.11 takes struct path *, < 4.11 takes vfsmount *
460 */
461
462 #ifdef HAVE_VFSMOUNT_IOPS_GETATTR
463 #define ZPL_GETATTR_WRAPPER(func) \
464 static int \
465 func(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) \
466 { \
467 struct path path = { .mnt = mnt, .dentry = dentry }; \
468 return func##_impl(&path, stat, STATX_BASIC_STATS, \
469 AT_STATX_SYNC_AS_STAT); \
470 }
471 #elif defined(HAVE_PATH_IOPS_GETATTR)
472 #define ZPL_GETATTR_WRAPPER(func) \
473 static int \
474 func(const struct path *path, struct kstat *stat, u32 request_mask, \
475 unsigned int query_flags) \
476 { \
477 return (func##_impl(path, stat, request_mask, query_flags)); \
478 }
479 #else
480 #error
481 #endif
482
483 /*
484 * 4.9 API change
485 * Preferred interface to get the current FS time.
486 */
487 #if !defined(HAVE_CURRENT_TIME)
488 static inline struct timespec
489 current_time(struct inode *ip)
490 {
491 return (timespec_trunc(current_kernel_time(), ip->i_sb->s_time_gran));
492 }
493 #endif
494
495 #endif /* _ZFS_VFS_H */