]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zpl_super.c
Linux 3.18 compat: Snapshot auto-mounting
[mirror_zfs.git] / module / zfs / zpl_super.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
23 */
24
25
26 #include <sys/zfs_vfsops.h>
27 #include <sys/zfs_vnops.h>
28 #include <sys/zfs_znode.h>
29 #include <sys/zfs_ctldir.h>
30 #include <sys/zpl.h>
31
32
33 static struct inode *
34 zpl_inode_alloc(struct super_block *sb)
35 {
36 struct inode *ip;
37
38 VERIFY3S(zfs_inode_alloc(sb, &ip), ==, 0);
39 ip->i_version = 1;
40
41 return (ip);
42 }
43
44 static void
45 zpl_inode_destroy(struct inode *ip)
46 {
47 ASSERT(atomic_read(&ip->i_count) == 0);
48 zfs_inode_destroy(ip);
49 }
50
51 /*
52 * Called from __mark_inode_dirty() to reflect that something in the
53 * inode has changed. We use it to ensure the znode system attributes
54 * are always strictly update to date with respect to the inode.
55 */
56 #ifdef HAVE_DIRTY_INODE_WITH_FLAGS
57 static void
58 zpl_dirty_inode(struct inode *ip, int flags)
59 {
60 fstrans_cookie_t cookie;
61
62 cookie = spl_fstrans_mark();
63 zfs_dirty_inode(ip, flags);
64 spl_fstrans_unmark(cookie);
65 }
66 #else
67 static void
68 zpl_dirty_inode(struct inode *ip)
69 {
70 fstrans_cookie_t cookie;
71
72 cookie = spl_fstrans_mark();
73 zfs_dirty_inode(ip, 0);
74 spl_fstrans_unmark(cookie);
75 }
76 #endif /* HAVE_DIRTY_INODE_WITH_FLAGS */
77
78 /*
79 * When ->drop_inode() is called its return value indicates if the
80 * inode should be evicted from the inode cache. If the inode is
81 * unhashed and has no links the default policy is to evict it
82 * immediately.
83 *
84 * Prior to 2.6.36 this eviction was accomplished by the vfs calling
85 * ->delete_inode(). It was ->delete_inode()'s responsibility to
86 * truncate the inode pages and call clear_inode(). The call to
87 * clear_inode() synchronously invalidates all the buffers and
88 * calls ->clear_inode(). It was ->clear_inode()'s responsibility
89 * to cleanup and filesystem specific data before freeing the inode.
90 *
91 * This elaborate mechanism was replaced by ->evict_inode() which
92 * does the job of both ->delete_inode() and ->clear_inode(). It
93 * will be called exactly once, and when it returns the inode must
94 * be in a state where it can simply be freed.i
95 *
96 * The ->evict_inode() callback must minimally truncate the inode pages,
97 * and call clear_inode(). For 2.6.35 and later kernels this will
98 * simply update the inode state, with the sync occurring before the
99 * truncate in evict(). For earlier kernels clear_inode() maps to
100 * end_writeback() which is responsible for completing all outstanding
101 * write back. In either case, once this is done it is safe to cleanup
102 * any remaining inode specific data via zfs_inactive().
103 * remaining filesystem specific data.
104 */
105 #ifdef HAVE_EVICT_INODE
106 static void
107 zpl_evict_inode(struct inode *ip)
108 {
109 fstrans_cookie_t cookie;
110
111 cookie = spl_fstrans_mark();
112 truncate_setsize(ip, 0);
113 clear_inode(ip);
114 zfs_inactive(ip);
115 spl_fstrans_unmark(cookie);
116 }
117
118 #else
119
120 static void
121 zpl_drop_inode(struct inode *ip)
122 {
123 generic_delete_inode(ip);
124 }
125
126 static void
127 zpl_clear_inode(struct inode *ip)
128 {
129 fstrans_cookie_t cookie;
130
131 cookie = spl_fstrans_mark();
132 zfs_inactive(ip);
133 spl_fstrans_unmark(cookie);
134 }
135
136 static void
137 zpl_inode_delete(struct inode *ip)
138 {
139 truncate_setsize(ip, 0);
140 clear_inode(ip);
141 }
142 #endif /* HAVE_EVICT_INODE */
143
144 static void
145 zpl_put_super(struct super_block *sb)
146 {
147 fstrans_cookie_t cookie;
148 int error;
149
150 cookie = spl_fstrans_mark();
151 error = -zfs_umount(sb);
152 spl_fstrans_unmark(cookie);
153 ASSERT3S(error, <=, 0);
154 }
155
156 static int
157 zpl_sync_fs(struct super_block *sb, int wait)
158 {
159 fstrans_cookie_t cookie;
160 cred_t *cr = CRED();
161 int error;
162
163 crhold(cr);
164 cookie = spl_fstrans_mark();
165 error = -zfs_sync(sb, wait, cr);
166 spl_fstrans_unmark(cookie);
167 crfree(cr);
168 ASSERT3S(error, <=, 0);
169
170 return (error);
171 }
172
173 static int
174 zpl_statfs(struct dentry *dentry, struct kstatfs *statp)
175 {
176 fstrans_cookie_t cookie;
177 int error;
178
179 cookie = spl_fstrans_mark();
180 error = -zfs_statvfs(dentry, statp);
181 spl_fstrans_unmark(cookie);
182 ASSERT3S(error, <=, 0);
183
184 return (error);
185 }
186
187 static int
188 zpl_remount_fs(struct super_block *sb, int *flags, char *data)
189 {
190 fstrans_cookie_t cookie;
191 int error;
192
193 cookie = spl_fstrans_mark();
194 error = -zfs_remount(sb, flags, data);
195 spl_fstrans_unmark(cookie);
196 ASSERT3S(error, <=, 0);
197
198 return (error);
199 }
200
201 /*
202 * ZFS specific features must be explicitly handled here, the VFS will
203 * automatically handled the following generic functionality.
204 *
205 * MNT_NOSUID,
206 * MNT_NODEV,
207 * MNT_NOEXEC,
208 * MNT_NOATIME,
209 * MNT_NODIRATIME,
210 * MNT_READONLY,
211 * MNT_STRICTATIME,
212 * MS_SYNCHRONOUS,
213 * MS_DIRSYNC,
214 * MS_MANDLOCK.
215 */
216 static int
217 __zpl_show_options(struct seq_file *seq, zfs_sb_t *zsb)
218 {
219 seq_printf(seq, ",%s", zsb->z_flags & ZSB_XATTR ? "xattr" : "noxattr");
220
221 #ifdef CONFIG_FS_POSIX_ACL
222 switch (zsb->z_acl_type) {
223 case ZFS_ACLTYPE_POSIXACL:
224 seq_puts(seq, ",posixacl");
225 break;
226 default:
227 seq_puts(seq, ",noacl");
228 break;
229 }
230 #endif /* CONFIG_FS_POSIX_ACL */
231
232 return (0);
233 }
234
235 #ifdef HAVE_SHOW_OPTIONS_WITH_DENTRY
236 static int
237 zpl_show_options(struct seq_file *seq, struct dentry *root)
238 {
239 return (__zpl_show_options(seq, root->d_sb->s_fs_info));
240 }
241 #else
242 static int
243 zpl_show_options(struct seq_file *seq, struct vfsmount *vfsp)
244 {
245 return (__zpl_show_options(seq, vfsp->mnt_sb->s_fs_info));
246 }
247 #endif /* HAVE_SHOW_OPTIONS_WITH_DENTRY */
248
249 static int
250 zpl_fill_super(struct super_block *sb, void *data, int silent)
251 {
252 fstrans_cookie_t cookie;
253 int error;
254
255 cookie = spl_fstrans_mark();
256 error = -zfs_domount(sb, data, silent);
257 spl_fstrans_unmark(cookie);
258 ASSERT3S(error, <=, 0);
259
260 return (error);
261 }
262
263 #ifdef HAVE_MOUNT_NODEV
264 static struct dentry *
265 zpl_mount(struct file_system_type *fs_type, int flags,
266 const char *osname, void *data)
267 {
268 zpl_mount_data_t zmd = { osname, data };
269
270 return (mount_nodev(fs_type, flags, &zmd, zpl_fill_super));
271 }
272 #else
273 static int
274 zpl_get_sb(struct file_system_type *fs_type, int flags,
275 const char *osname, void *data, struct vfsmount *mnt)
276 {
277 zpl_mount_data_t zmd = { osname, data };
278
279 return (get_sb_nodev(fs_type, flags, &zmd, zpl_fill_super, mnt));
280 }
281 #endif /* HAVE_MOUNT_NODEV */
282
283 static void
284 zpl_kill_sb(struct super_block *sb)
285 {
286 zfs_preumount(sb);
287 kill_anon_super(sb);
288
289 #ifdef HAVE_S_INSTANCES_LIST_HEAD
290 sb->s_instances.next = &(zpl_fs_type.fs_supers);
291 #endif /* HAVE_S_INSTANCES_LIST_HEAD */
292 }
293
294 void
295 zpl_prune_sb(int64_t nr_to_scan, void *arg)
296 {
297 struct super_block *sb = (struct super_block *)arg;
298 int objects = 0;
299
300 (void) -zfs_sb_prune(sb, nr_to_scan, &objects);
301 }
302
303 #ifdef HAVE_NR_CACHED_OBJECTS
304 static int
305 zpl_nr_cached_objects(struct super_block *sb)
306 {
307 zfs_sb_t *zsb = sb->s_fs_info;
308 int nr;
309
310 mutex_enter(&zsb->z_znodes_lock);
311 nr = zsb->z_nr_znodes;
312 mutex_exit(&zsb->z_znodes_lock);
313
314 return (nr);
315 }
316 #endif /* HAVE_NR_CACHED_OBJECTS */
317
318 #ifdef HAVE_FREE_CACHED_OBJECTS
319 /*
320 * Attempt to evict some meta data from the cache. The ARC operates in
321 * terms of bytes while the Linux VFS uses objects. Now because this is
322 * just a best effort eviction and the exact values aren't critical so we
323 * extrapolate from an object count to a byte size using the znode_t size.
324 */
325 static void
326 zpl_free_cached_objects(struct super_block *sb, int nr_to_scan)
327 {
328 /* noop */
329 }
330 #endif /* HAVE_FREE_CACHED_OBJECTS */
331
332 const struct super_operations zpl_super_operations = {
333 .alloc_inode = zpl_inode_alloc,
334 .destroy_inode = zpl_inode_destroy,
335 .dirty_inode = zpl_dirty_inode,
336 .write_inode = NULL,
337 #ifdef HAVE_EVICT_INODE
338 .evict_inode = zpl_evict_inode,
339 #else
340 .drop_inode = zpl_drop_inode,
341 .clear_inode = zpl_clear_inode,
342 .delete_inode = zpl_inode_delete,
343 #endif /* HAVE_EVICT_INODE */
344 .put_super = zpl_put_super,
345 .sync_fs = zpl_sync_fs,
346 .statfs = zpl_statfs,
347 .remount_fs = zpl_remount_fs,
348 .show_options = zpl_show_options,
349 .show_stats = NULL,
350 #ifdef HAVE_NR_CACHED_OBJECTS
351 .nr_cached_objects = zpl_nr_cached_objects,
352 #endif /* HAVE_NR_CACHED_OBJECTS */
353 #ifdef HAVE_FREE_CACHED_OBJECTS
354 .free_cached_objects = zpl_free_cached_objects,
355 #endif /* HAVE_FREE_CACHED_OBJECTS */
356 };
357
358 struct file_system_type zpl_fs_type = {
359 .owner = THIS_MODULE,
360 .name = ZFS_DRIVER,
361 #ifdef HAVE_MOUNT_NODEV
362 .mount = zpl_mount,
363 #else
364 .get_sb = zpl_get_sb,
365 #endif /* HAVE_MOUNT_NODEV */
366 .kill_sb = zpl_kill_sb,
367 };