]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/zpl_super.c
cstyle: Resolve C style issues
[mirror_zfs.git] / module / zfs / zpl_super.c
CommitLineData
51f0bbe4
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
23 */
24
25
26#include <sys/zfs_vfsops.h>
27#include <sys/zfs_vnops.h>
28#include <sys/zfs_znode.h>
ebe7e575 29#include <sys/zfs_ctldir.h>
51f0bbe4
BB
30#include <sys/zpl.h>
31
32
33static struct inode *
34zpl_inode_alloc(struct super_block *sb)
35{
36 struct inode *ip;
37
38 VERIFY3S(zfs_inode_alloc(sb, &ip), ==, 0);
39 ip->i_version = 1;
40
41 return (ip);
42}
43
44static void
45zpl_inode_destroy(struct inode *ip)
46{
d1d7e268 47 ASSERT(atomic_read(&ip->i_count) == 0);
51f0bbe4
BB
48 zfs_inode_destroy(ip);
49}
50
8780c539
BB
51/*
52 * Called from __mark_inode_dirty() to reflect that something in the
53 * inode has changed. We use it to ensure the znode system attributes
54 * are always strictly update to date with respect to the inode.
55 */
56#ifdef HAVE_DIRTY_INODE_WITH_FLAGS
57static void
58zpl_dirty_inode(struct inode *ip, int flags)
59{
60 zfs_dirty_inode(ip, flags);
61}
62#else
63static void
64zpl_dirty_inode(struct inode *ip)
65{
66 zfs_dirty_inode(ip, 0);
67}
68#endif /* HAVE_DIRTY_INODE_WITH_FLAGS */
69
2c395def
BB
70/*
71 * When ->drop_inode() is called its return value indicates if the
72 * inode should be evicted from the inode cache. If the inode is
73 * unhashed and has no links the default policy is to evict it
74 * immediately.
75 *
76 * Prior to 2.6.36 this eviction was accomplished by the vfs calling
77 * ->delete_inode(). It was ->delete_inode()'s responsibility to
78 * truncate the inode pages and call clear_inode(). The call to
79 * clear_inode() synchronously invalidates all the buffers and
80 * calls ->clear_inode(). It was ->clear_inode()'s responsibility
81 * to cleanup and filesystem specific data before freeing the inode.
82 *
83 * This elaborate mechanism was replaced by ->evict_inode() which
84 * does the job of both ->delete_inode() and ->clear_inode(). It
85 * will be called exactly once, and when it returns the inode must
739a1a82
RY
86 * be in a state where it can simply be freed.i
87 *
88 * The ->evict_inode() callback must minimally truncate the inode pages,
89 * and call clear_inode(). For 2.6.35 and later kernels this will
90 * simply update the inode state, with the sync occurring before the
91 * truncate in evict(). For earlier kernels clear_inode() maps to
92 * end_writeback() which is responsible for completing all outstanding
93 * write back. In either case, once this is done it is safe to cleanup
94 * any remaining inode specific data via zfs_inactive().
2c395def
BB
95 * remaining filesystem specific data.
96 */
97#ifdef HAVE_EVICT_INODE
51f0bbe4 98static void
2c395def 99zpl_evict_inode(struct inode *ip)
51f0bbe4 100{
b3129792 101 truncate_setsize(ip, 0);
739a1a82 102 clear_inode(ip);
2c395def 103 zfs_inactive(ip);
51f0bbe4
BB
104}
105
2c395def
BB
106#else
107
51f0bbe4 108static void
2c395def 109zpl_clear_inode(struct inode *ip)
51f0bbe4
BB
110{
111 zfs_inactive(ip);
112}
113
2c395def
BB
114static void
115zpl_inode_delete(struct inode *ip)
116{
b3129792 117 truncate_setsize(ip, 0);
2c395def
BB
118 clear_inode(ip);
119}
120
121#endif /* HAVE_EVICT_INODE */
122
51f0bbe4
BB
123static void
124zpl_put_super(struct super_block *sb)
125{
126 int error;
127
128 error = -zfs_umount(sb);
129 ASSERT3S(error, <=, 0);
130}
131
03f9ba9d
BB
132static int
133zpl_sync_fs(struct super_block *sb, int wait)
134{
0d3ac5e7 135 cred_t *cr = CRED();
03f9ba9d
BB
136 int error;
137
0d3ac5e7 138 crhold(cr);
03f9ba9d 139 error = -zfs_sync(sb, wait, cr);
0d3ac5e7 140 crfree(cr);
03f9ba9d
BB
141 ASSERT3S(error, <=, 0);
142
143 return (error);
144}
145
51f0bbe4
BB
146static int
147zpl_statfs(struct dentry *dentry, struct kstatfs *statp)
148{
149 int error;
150
151 error = -zfs_statvfs(dentry, statp);
152 ASSERT3S(error, <=, 0);
153
154 return (error);
155}
156
0de19dad
BB
157static int
158zpl_remount_fs(struct super_block *sb, int *flags, char *data)
159{
160 int error;
161 error = -zfs_remount(sb, flags, data);
162 ASSERT3S(error, <=, 0);
163
164 return (error);
165}
166
ebe7e575
BB
167static void
168zpl_umount_begin(struct super_block *sb)
169{
170 zfs_sb_t *zsb = sb->s_fs_info;
171 int count;
172
173 /*
174 * Best effort to unmount snapshots in .zfs/snapshot/. Normally this
175 * isn't required because snapshots have the MNT_SHRINKABLE flag set.
176 */
177 if (zsb->z_ctldir)
178 (void) zfsctl_unmount_snapshots(zsb, MNT_FORCE, &count);
179}
180
47621f3d 181/*
023699cd
MM
182 * ZFS specific features must be explicitly handled here, the VFS will
183 * automatically handled the following generic functionality.
184 *
185 * MNT_NOSUID,
186 * MNT_NODEV,
187 * MNT_NOEXEC,
188 * MNT_NOATIME,
189 * MNT_NODIRATIME,
190 * MNT_READONLY,
191 * MNT_STRICTATIME,
192 * MS_SYNCHRONOUS,
193 * MS_DIRSYNC,
194 * MS_MANDLOCK.
47621f3d 195 */
51f0bbe4 196static int
023699cd 197__zpl_show_options(struct seq_file *seq, zfs_sb_t *zsb)
51f0bbe4 198{
47621f3d
BB
199 seq_printf(seq, ",%s", zsb->z_flags & ZSB_XATTR ? "xattr" : "noxattr");
200
b695c34e 201#ifdef CONFIG_FS_POSIX_ACL
023699cd
MM
202 switch (zsb->z_acl_type) {
203 case ZFS_ACLTYPE_POSIXACL:
204 seq_puts(seq, ",posixacl");
205 break;
206 default:
207 seq_puts(seq, ",noacl");
208 break;
209 }
b695c34e 210#endif /* CONFIG_FS_POSIX_ACL */
023699cd 211
47621f3d
BB
212 return (0);
213}
023699cd
MM
214
215#ifdef HAVE_SHOW_OPTIONS_WITH_DENTRY
216static int
217zpl_show_options(struct seq_file *seq, struct dentry *root)
218{
d1d7e268 219 return (__zpl_show_options(seq, root->d_sb->s_fs_info));
023699cd 220}
47621f3d
BB
221#else
222static int
223zpl_show_options(struct seq_file *seq, struct vfsmount *vfsp)
224{
d1d7e268 225 return (__zpl_show_options(seq, vfsp->mnt_sb->s_fs_info));
51f0bbe4 226}
47621f3d 227#endif /* HAVE_SHOW_OPTIONS_WITH_DENTRY */
51f0bbe4
BB
228
229static int
230zpl_fill_super(struct super_block *sb, void *data, int silent)
231{
232 int error;
233
234 error = -zfs_domount(sb, data, silent);
235 ASSERT3S(error, <=, 0);
236
237 return (error);
238}
239
2cf7f52b
BB
240#ifdef HAVE_MOUNT_NODEV
241static struct dentry *
242zpl_mount(struct file_system_type *fs_type, int flags,
243 const char *osname, void *data)
244{
245 zpl_mount_data_t zmd = { osname, data };
246
d1d7e268 247 return (mount_nodev(fs_type, flags, &zmd, zpl_fill_super));
2cf7f52b
BB
248}
249#else
51f0bbe4
BB
250static int
251zpl_get_sb(struct file_system_type *fs_type, int flags,
252 const char *osname, void *data, struct vfsmount *mnt)
253{
2cf7f52b 254 zpl_mount_data_t zmd = { osname, data };
51f0bbe4 255
d1d7e268 256 return (get_sb_nodev(fs_type, flags, &zmd, zpl_fill_super, mnt));
51f0bbe4 257}
2cf7f52b 258#endif /* HAVE_MOUNT_NODEV */
51f0bbe4
BB
259
260static void
261zpl_kill_sb(struct super_block *sb)
262{
ebe7e575 263 zfs_preumount(sb);
51f0bbe4 264 kill_anon_super(sb);
dba1d705
BB
265
266#ifdef HAVE_S_INSTANCES_LIST_HEAD
267 sb->s_instances.next = &(zpl_fs_type.fs_supers);
268#endif /* HAVE_S_INSTANCES_LIST_HEAD */
51f0bbe4
BB
269}
270
ab26409d
BB
271#ifdef HAVE_SHRINK
272/*
273 * Linux 3.1 - 3.x API
274 *
275 * The Linux 3.1 API introduced per-sb cache shrinkers to replace the
276 * global ones. This allows us a mechanism to cleanly target a specific
277 * zfs file system when the dnode and inode caches grow too large.
278 *
279 * In addition, the 3.0 kernel added the iterate_supers_type() helper
280 * function which is used to safely walk all of the zfs file systems.
281 */
282static void
283zpl_prune_sb(struct super_block *sb, void *arg)
284{
285 int objects = 0;
286 int error;
287
288 error = -zfs_sb_prune(sb, *(unsigned long *)arg, &objects);
289 ASSERT3S(error, <=, 0);
ab26409d
BB
290}
291
292void
293zpl_prune_sbs(int64_t bytes_to_scan, void *private)
294{
d1d7e268 295 unsigned long nr_to_scan = (bytes_to_scan / sizeof (znode_t));
ab26409d
BB
296
297 iterate_supers_type(&zpl_fs_type, zpl_prune_sb, &nr_to_scan);
298 kmem_reap();
299}
300#else
301/*
302 * Linux 2.6.x - 3.0 API
303 *
304 * These are best effort interfaces are provided by the SPL to induce
305 * the Linux VM subsystem to reclaim a fraction of the both dnode and
306 * inode caches. Ideally, we want to just target the zfs file systems
307 * however our only option is to reclaim from them all.
308 */
309void
310zpl_prune_sbs(int64_t bytes_to_scan, void *private)
311{
d1d7e268 312 unsigned long nr_to_scan = (bytes_to_scan / sizeof (znode_t));
ab26409d 313
d1d7e268
MK
314 shrink_dcache_memory(nr_to_scan, GFP_KERNEL);
315 shrink_icache_memory(nr_to_scan, GFP_KERNEL);
316 kmem_reap();
ab26409d
BB
317}
318#endif /* HAVE_SHRINK */
319
320#ifdef HAVE_NR_CACHED_OBJECTS
321static int
322zpl_nr_cached_objects(struct super_block *sb)
323{
324 zfs_sb_t *zsb = sb->s_fs_info;
325 int nr;
326
327 mutex_enter(&zsb->z_znodes_lock);
328 nr = zsb->z_nr_znodes;
329 mutex_exit(&zsb->z_znodes_lock);
330
331 return (nr);
332}
333#endif /* HAVE_NR_CACHED_OBJECTS */
334
335#ifdef HAVE_FREE_CACHED_OBJECTS
336/*
337 * Attempt to evict some meta data from the cache. The ARC operates in
338 * terms of bytes while the Linux VFS uses objects. Now because this is
339 * just a best effort eviction and the exact values aren't critical so we
340 * extrapolate from an object count to a byte size using the znode_t size.
341 */
342static void
343zpl_free_cached_objects(struct super_block *sb, int nr_to_scan)
344{
d1d7e268 345 arc_adjust_meta(nr_to_scan * sizeof (znode_t), B_FALSE);
ab26409d
BB
346}
347#endif /* HAVE_FREE_CACHED_OBJECTS */
348
51f0bbe4 349const struct super_operations zpl_super_operations = {
ab26409d
BB
350 .alloc_inode = zpl_inode_alloc,
351 .destroy_inode = zpl_inode_destroy,
8780c539 352 .dirty_inode = zpl_dirty_inode,
ab26409d
BB
353 .write_inode = NULL,
354 .drop_inode = NULL,
2c395def 355#ifdef HAVE_EVICT_INODE
ab26409d 356 .evict_inode = zpl_evict_inode,
2c395def 357#else
ab26409d
BB
358 .clear_inode = zpl_clear_inode,
359 .delete_inode = zpl_inode_delete,
2c395def 360#endif /* HAVE_EVICT_INODE */
ab26409d 361 .put_super = zpl_put_super,
ab26409d
BB
362 .sync_fs = zpl_sync_fs,
363 .statfs = zpl_statfs,
364 .remount_fs = zpl_remount_fs,
ebe7e575 365 .umount_begin = zpl_umount_begin,
ab26409d
BB
366 .show_options = zpl_show_options,
367 .show_stats = NULL,
368#ifdef HAVE_NR_CACHED_OBJECTS
369 .nr_cached_objects = zpl_nr_cached_objects,
370#endif /* HAVE_NR_CACHED_OBJECTS */
371#ifdef HAVE_FREE_CACHED_OBJECTS
372 .free_cached_objects = zpl_free_cached_objects,
373#endif /* HAVE_FREE_CACHED_OBJECTS */
51f0bbe4
BB
374};
375
51f0bbe4 376struct file_system_type zpl_fs_type = {
ab26409d
BB
377 .owner = THIS_MODULE,
378 .name = ZFS_DRIVER,
2cf7f52b 379#ifdef HAVE_MOUNT_NODEV
ab26409d 380 .mount = zpl_mount,
2cf7f52b 381#else
ab26409d 382 .get_sb = zpl_get_sb,
2cf7f52b 383#endif /* HAVE_MOUNT_NODEV */
ab26409d 384 .kill_sb = zpl_kill_sb,
51f0bbe4 385};