]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - fs/xfs/xfs_super.c
xfs: convert mount option parsing to tokens
[mirror_ubuntu-eoan-kernel.git] / fs / xfs / xfs_super.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include "xfs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_sb.h"
25 #include "xfs_mount.h"
26 #include "xfs_da_format.h"
27 #include "xfs_inode.h"
28 #include "xfs_btree.h"
29 #include "xfs_bmap.h"
30 #include "xfs_alloc.h"
31 #include "xfs_error.h"
32 #include "xfs_fsops.h"
33 #include "xfs_trans.h"
34 #include "xfs_buf_item.h"
35 #include "xfs_log.h"
36 #include "xfs_log_priv.h"
37 #include "xfs_da_btree.h"
38 #include "xfs_dir2.h"
39 #include "xfs_extfree_item.h"
40 #include "xfs_mru_cache.h"
41 #include "xfs_inode_item.h"
42 #include "xfs_icache.h"
43 #include "xfs_trace.h"
44 #include "xfs_icreate_item.h"
45 #include "xfs_filestream.h"
46 #include "xfs_quota.h"
47 #include "xfs_sysfs.h"
48
49 #include <linux/namei.h>
50 #include <linux/init.h>
51 #include <linux/slab.h>
52 #include <linux/mount.h>
53 #include <linux/mempool.h>
54 #include <linux/writeback.h>
55 #include <linux/kthread.h>
56 #include <linux/freezer.h>
57 #include <linux/parser.h>
58
59 static const struct super_operations xfs_super_operations;
60 static kmem_zone_t *xfs_ioend_zone;
61 mempool_t *xfs_ioend_pool;
62
63 static struct kset *xfs_kset; /* top-level xfs sysfs dir */
64 #ifdef DEBUG
65 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
66 #endif
67
68 /*
69 * Table driven mount option parser.
70 */
71 enum {
72 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_biosize,
73 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
74 Opt_mtpt, Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
75 Opt_allocsize, Opt_norecovery, Opt_barrier, Opt_nobarrier,
76 Opt_inode64, Opt_inode32, Opt_ikeep, Opt_noikeep,
77 Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, Opt_filestreams,
78 Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, Opt_prjquota,
79 Opt_uquota, Opt_gquota, Opt_pquota,
80 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
81 Opt_discard, Opt_nodiscard, Opt_dax, Opt_err,
82 };
83
84 static const match_table_t tokens = {
85 {Opt_logbufs, "logbufs=%u"}, /* number of XFS log buffers */
86 {Opt_logbsize, "logbsize=%s"}, /* size of XFS log buffers */
87 {Opt_logdev, "logdev=%s"}, /* log device */
88 {Opt_rtdev, "rtdev=%s"}, /* realtime I/O device */
89 {Opt_biosize, "biosize=%u"}, /* log2 of preferred buffered io size */
90 {Opt_wsync, "wsync"}, /* safe-mode nfs compatible mount */
91 {Opt_noalign, "noalign"}, /* turn off stripe alignment */
92 {Opt_swalloc, "swalloc"}, /* turn on stripe width allocation */
93 {Opt_sunit, "sunit=%u"}, /* data volume stripe unit */
94 {Opt_swidth, "swidth=%u"}, /* data volume stripe width */
95 {Opt_nouuid, "nouuid"}, /* ignore filesystem UUID */
96 {Opt_mtpt, "mtpt"}, /* filesystem mount point */
97 {Opt_grpid, "grpid"}, /* group-ID from parent directory */
98 {Opt_nogrpid, "nogrpid"}, /* group-ID from current process */
99 {Opt_bsdgroups, "bsdgroups"}, /* group-ID from parent directory */
100 {Opt_sysvgroups,"sysvgroups"}, /* group-ID from current process */
101 {Opt_allocsize, "allocsize=%s"},/* preferred allocation size */
102 {Opt_norecovery,"norecovery"}, /* don't run XFS recovery */
103 {Opt_barrier, "barrier"}, /* use writer barriers for log write and
104 * unwritten extent conversion */
105 {Opt_nobarrier, "nobarrier"}, /* .. disable */
106 {Opt_inode64, "inode64"}, /* inodes can be allocated anywhere */
107 {Opt_inode32, "inode32"}, /* inode allocation limited to
108 * XFS_MAXINUMBER_32 */
109 {Opt_ikeep, "ikeep"}, /* do not free empty inode clusters */
110 {Opt_noikeep, "noikeep"}, /* free empty inode clusters */
111 {Opt_largeio, "largeio"}, /* report large I/O sizes in stat() */
112 {Opt_nolargeio, "nolargeio"}, /* do not report large I/O sizes
113 * in stat(). */
114 {Opt_attr2, "attr2"}, /* do use attr2 attribute format */
115 {Opt_noattr2, "noattr2"}, /* do not use attr2 attribute format */
116 {Opt_filestreams,"filestreams"},/* use filestreams allocator */
117 {Opt_quota, "quota"}, /* disk quotas (user) */
118 {Opt_noquota, "noquota"}, /* no quotas */
119 {Opt_usrquota, "usrquota"}, /* user quota enabled */
120 {Opt_grpquota, "grpquota"}, /* group quota enabled */
121 {Opt_prjquota, "prjquota"}, /* project quota enabled */
122 {Opt_uquota, "uquota"}, /* user quota (IRIX variant) */
123 {Opt_gquota, "gquota"}, /* group quota (IRIX variant) */
124 {Opt_pquota, "pquota"}, /* project quota (IRIX variant) */
125 {Opt_uqnoenforce,"uqnoenforce"},/* user quota limit enforcement */
126 {Opt_gqnoenforce,"gqnoenforce"},/* group quota limit enforcement */
127 {Opt_pqnoenforce,"pqnoenforce"},/* project quota limit enforcement */
128 {Opt_qnoenforce, "qnoenforce"}, /* same as uqnoenforce */
129 {Opt_discard, "discard"}, /* Discard unused blocks */
130 {Opt_nodiscard, "nodiscard"}, /* Do not discard unused blocks */
131
132 {Opt_dax, "dax"}, /* Enable direct access to bdev pages */
133 {Opt_err, NULL},
134 };
135
136
137 STATIC int
138 suffix_kstrtoint(const substring_t *s, unsigned int base, int *res)
139 {
140 int last, shift_left_factor = 0, _res;
141 char *value;
142 int ret = 0;
143
144 value = match_strdup(s);
145 if (!value)
146 return -ENOMEM;
147
148 last = strlen(value) - 1;
149 if (value[last] == 'K' || value[last] == 'k') {
150 shift_left_factor = 10;
151 value[last] = '\0';
152 }
153 if (value[last] == 'M' || value[last] == 'm') {
154 shift_left_factor = 20;
155 value[last] = '\0';
156 }
157 if (value[last] == 'G' || value[last] == 'g') {
158 shift_left_factor = 30;
159 value[last] = '\0';
160 }
161
162 if (kstrtoint(value, base, &_res))
163 ret = -EINVAL;
164 kfree(value);
165 *res = _res << shift_left_factor;
166 return ret;
167 }
168
169 /*
170 * This function fills in xfs_mount_t fields based on mount args.
171 * Note: the superblock has _not_ yet been read in.
172 *
173 * Note that this function leaks the various device name allocations on
174 * failure. The caller takes care of them.
175 */
176 STATIC int
177 xfs_parseargs(
178 struct xfs_mount *mp,
179 char *options)
180 {
181 struct super_block *sb = mp->m_super;
182 char *p;
183 substring_t args[MAX_OPT_ARGS];
184 int dsunit = 0;
185 int dswidth = 0;
186 int iosize = 0;
187 __uint8_t iosizelog = 0;
188
189 /*
190 * set up the mount name first so all the errors will refer to the
191 * correct device.
192 */
193 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
194 if (!mp->m_fsname)
195 return -ENOMEM;
196 mp->m_fsname_len = strlen(mp->m_fsname) + 1;
197
198 /*
199 * Copy binary VFS mount flags we are interested in.
200 */
201 if (sb->s_flags & MS_RDONLY)
202 mp->m_flags |= XFS_MOUNT_RDONLY;
203 if (sb->s_flags & MS_DIRSYNC)
204 mp->m_flags |= XFS_MOUNT_DIRSYNC;
205 if (sb->s_flags & MS_SYNCHRONOUS)
206 mp->m_flags |= XFS_MOUNT_WSYNC;
207
208 /*
209 * Set some default flags that could be cleared by the mount option
210 * parsing.
211 */
212 mp->m_flags |= XFS_MOUNT_BARRIER;
213 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
214
215 /*
216 * These can be overridden by the mount option parsing.
217 */
218 mp->m_logbufs = -1;
219 mp->m_logbsize = -1;
220
221 if (!options)
222 goto done;
223
224 while ((p = strsep(&options, ",")) != NULL) {
225 int token;
226
227 if (!*p)
228 continue;
229
230 token = match_token(p, tokens, args);
231 switch (token) {
232 case Opt_logbufs:
233 if (match_int(args, &mp->m_logbufs))
234 return -EINVAL;
235 break;
236 case Opt_logbsize:
237 if (suffix_kstrtoint(args, 10, &mp->m_logbsize))
238 return -EINVAL;
239 break;
240 case Opt_logdev:
241 mp->m_logname = match_strdup(args);
242 if (!mp->m_logname)
243 return -ENOMEM;
244 break;
245 case Opt_mtpt:
246 xfs_warn(mp, "%s option not allowed on this system", p);
247 return -EINVAL;
248 case Opt_rtdev:
249 mp->m_rtname = match_strdup(args);
250 if (!mp->m_rtname)
251 return -ENOMEM;
252 break;
253 case Opt_allocsize:
254 case Opt_biosize:
255 if (suffix_kstrtoint(args, 10, &iosize))
256 return -EINVAL;
257 iosizelog = ffs(iosize) - 1;
258 break;
259 case Opt_grpid:
260 case Opt_bsdgroups:
261 mp->m_flags |= XFS_MOUNT_GRPID;
262 break;
263 case Opt_nogrpid:
264 case Opt_sysvgroups:
265 mp->m_flags &= ~XFS_MOUNT_GRPID;
266 break;
267 case Opt_wsync:
268 mp->m_flags |= XFS_MOUNT_WSYNC;
269 break;
270 case Opt_norecovery:
271 mp->m_flags |= XFS_MOUNT_NORECOVERY;
272 break;
273 case Opt_noalign:
274 mp->m_flags |= XFS_MOUNT_NOALIGN;
275 break;
276 case Opt_swalloc:
277 mp->m_flags |= XFS_MOUNT_SWALLOC;
278 break;
279 case Opt_sunit:
280 if (match_int(args, &dsunit))
281 return -EINVAL;
282 break;
283 case Opt_swidth:
284 if (match_int(args, &dswidth))
285 return -EINVAL;
286 break;
287 case Opt_inode32:
288 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
289 break;
290 case Opt_inode64:
291 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
292 break;
293 case Opt_nouuid:
294 mp->m_flags |= XFS_MOUNT_NOUUID;
295 break;
296 case Opt_barrier:
297 mp->m_flags |= XFS_MOUNT_BARRIER;
298 break;
299 case Opt_nobarrier:
300 mp->m_flags &= ~XFS_MOUNT_BARRIER;
301 break;
302 case Opt_ikeep:
303 mp->m_flags |= XFS_MOUNT_IKEEP;
304 break;
305 case Opt_noikeep:
306 mp->m_flags &= ~XFS_MOUNT_IKEEP;
307 break;
308 case Opt_largeio:
309 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
310 break;
311 case Opt_nolargeio:
312 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
313 break;
314 case Opt_attr2:
315 mp->m_flags |= XFS_MOUNT_ATTR2;
316 break;
317 case Opt_noattr2:
318 mp->m_flags &= ~XFS_MOUNT_ATTR2;
319 mp->m_flags |= XFS_MOUNT_NOATTR2;
320 break;
321 case Opt_filestreams:
322 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
323 break;
324 case Opt_noquota:
325 mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
326 mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
327 mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
328 break;
329 case Opt_quota:
330 case Opt_uquota:
331 case Opt_usrquota:
332 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
333 XFS_UQUOTA_ENFD);
334 break;
335 case Opt_qnoenforce:
336 case Opt_uqnoenforce:
337 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
338 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
339 break;
340 case Opt_pquota:
341 case Opt_prjquota:
342 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
343 XFS_PQUOTA_ENFD);
344 break;
345 case Opt_pqnoenforce:
346 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
347 mp->m_qflags &= ~XFS_PQUOTA_ENFD;
348 case Opt_gquota:
349 case Opt_grpquota:
350 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
351 XFS_GQUOTA_ENFD);
352 break;
353 case Opt_gqnoenforce:
354 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
355 mp->m_qflags &= ~XFS_GQUOTA_ENFD;
356 break;
357 case Opt_discard:
358 mp->m_flags |= XFS_MOUNT_DISCARD;
359 break;
360 case Opt_nodiscard:
361 mp->m_flags &= ~XFS_MOUNT_DISCARD;
362 break;
363 #ifdef CONFIG_FS_DAX
364 case Opt_dax:
365 mp->m_flags |= XFS_MOUNT_DAX;
366 break;
367 #endif
368 default:
369 xfs_warn(mp, "unknown mount option [%s].", p);
370 return -EINVAL;
371 }
372 }
373
374 /*
375 * no recovery flag requires a read-only mount
376 */
377 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
378 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
379 xfs_warn(mp, "no-recovery mounts must be read-only.");
380 return -EINVAL;
381 }
382
383 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
384 xfs_warn(mp,
385 "sunit and swidth options incompatible with the noalign option");
386 return -EINVAL;
387 }
388
389 #ifndef CONFIG_XFS_QUOTA
390 if (XFS_IS_QUOTA_RUNNING(mp)) {
391 xfs_warn(mp, "quota support not available in this kernel.");
392 return -EINVAL;
393 }
394 #endif
395
396 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
397 xfs_warn(mp, "sunit and swidth must be specified together");
398 return -EINVAL;
399 }
400
401 if (dsunit && (dswidth % dsunit != 0)) {
402 xfs_warn(mp,
403 "stripe width (%d) must be a multiple of the stripe unit (%d)",
404 dswidth, dsunit);
405 return -EINVAL;
406 }
407
408 done:
409 if (dsunit && !(mp->m_flags & XFS_MOUNT_NOALIGN)) {
410 /*
411 * At this point the superblock has not been read
412 * in, therefore we do not know the block size.
413 * Before the mount call ends we will convert
414 * these to FSBs.
415 */
416 mp->m_dalign = dsunit;
417 mp->m_swidth = dswidth;
418 }
419
420 if (mp->m_logbufs != -1 &&
421 mp->m_logbufs != 0 &&
422 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
423 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
424 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
425 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
426 return -EINVAL;
427 }
428 if (mp->m_logbsize != -1 &&
429 mp->m_logbsize != 0 &&
430 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
431 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
432 !is_power_of_2(mp->m_logbsize))) {
433 xfs_warn(mp,
434 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
435 mp->m_logbsize);
436 return -EINVAL;
437 }
438
439 if (iosizelog) {
440 if (iosizelog > XFS_MAX_IO_LOG ||
441 iosizelog < XFS_MIN_IO_LOG) {
442 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
443 iosizelog, XFS_MIN_IO_LOG,
444 XFS_MAX_IO_LOG);
445 return -EINVAL;
446 }
447
448 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
449 mp->m_readio_log = iosizelog;
450 mp->m_writeio_log = iosizelog;
451 }
452
453 return 0;
454 }
455
456 struct proc_xfs_info {
457 uint64_t flag;
458 char *str;
459 };
460
461 STATIC int
462 xfs_showargs(
463 struct xfs_mount *mp,
464 struct seq_file *m)
465 {
466 static struct proc_xfs_info xfs_info_set[] = {
467 /* the few simple ones we can get from the mount struct */
468 { XFS_MOUNT_IKEEP, ",ikeep" },
469 { XFS_MOUNT_WSYNC, ",wsync" },
470 { XFS_MOUNT_NOALIGN, ",noalign" },
471 { XFS_MOUNT_SWALLOC, ",swalloc" },
472 { XFS_MOUNT_NOUUID, ",nouuid" },
473 { XFS_MOUNT_NORECOVERY, ",norecovery" },
474 { XFS_MOUNT_ATTR2, ",attr2" },
475 { XFS_MOUNT_FILESTREAMS, ",filestreams" },
476 { XFS_MOUNT_GRPID, ",grpid" },
477 { XFS_MOUNT_DISCARD, ",discard" },
478 { XFS_MOUNT_SMALL_INUMS, ",inode32" },
479 { XFS_MOUNT_DAX, ",dax" },
480 { 0, NULL }
481 };
482 static struct proc_xfs_info xfs_info_unset[] = {
483 /* the few simple ones we can get from the mount struct */
484 { XFS_MOUNT_COMPAT_IOSIZE, ",largeio" },
485 { XFS_MOUNT_BARRIER, ",nobarrier" },
486 { XFS_MOUNT_SMALL_INUMS, ",inode64" },
487 { 0, NULL }
488 };
489 struct proc_xfs_info *xfs_infop;
490
491 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
492 if (mp->m_flags & xfs_infop->flag)
493 seq_puts(m, xfs_infop->str);
494 }
495 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
496 if (!(mp->m_flags & xfs_infop->flag))
497 seq_puts(m, xfs_infop->str);
498 }
499
500 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
501 seq_printf(m, ",allocsize=%dk",
502 (int)(1 << mp->m_writeio_log) >> 10);
503
504 if (mp->m_logbufs > 0)
505 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
506 if (mp->m_logbsize > 0)
507 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
508
509 if (mp->m_logname)
510 seq_show_option(m, "logdev", mp->m_logname);
511 if (mp->m_rtname)
512 seq_show_option(m, "rtdev", mp->m_rtname);
513
514 if (mp->m_dalign > 0)
515 seq_printf(m, ",sunit=%d",
516 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
517 if (mp->m_swidth > 0)
518 seq_printf(m, ",swidth=%d",
519 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
520
521 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
522 seq_puts(m, ",usrquota");
523 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
524 seq_puts(m, ",uqnoenforce");
525
526 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
527 if (mp->m_qflags & XFS_PQUOTA_ENFD)
528 seq_puts(m, ",prjquota");
529 else
530 seq_puts(m, ",pqnoenforce");
531 }
532 if (mp->m_qflags & XFS_GQUOTA_ACCT) {
533 if (mp->m_qflags & XFS_GQUOTA_ENFD)
534 seq_puts(m, ",grpquota");
535 else
536 seq_puts(m, ",gqnoenforce");
537 }
538
539 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
540 seq_puts(m, ",noquota");
541
542 return 0;
543 }
544 __uint64_t
545 xfs_max_file_offset(
546 unsigned int blockshift)
547 {
548 unsigned int pagefactor = 1;
549 unsigned int bitshift = BITS_PER_LONG - 1;
550
551 /* Figure out maximum filesize, on Linux this can depend on
552 * the filesystem blocksize (on 32 bit platforms).
553 * __block_write_begin does this in an [unsigned] long...
554 * page->index << (PAGE_CACHE_SHIFT - bbits)
555 * So, for page sized blocks (4K on 32 bit platforms),
556 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
557 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
558 * but for smaller blocksizes it is less (bbits = log2 bsize).
559 * Note1: get_block_t takes a long (implicit cast from above)
560 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
561 * can optionally convert the [unsigned] long from above into
562 * an [unsigned] long long.
563 */
564
565 #if BITS_PER_LONG == 32
566 # if defined(CONFIG_LBDAF)
567 ASSERT(sizeof(sector_t) == 8);
568 pagefactor = PAGE_CACHE_SIZE;
569 bitshift = BITS_PER_LONG;
570 # else
571 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
572 # endif
573 #endif
574
575 return (((__uint64_t)pagefactor) << bitshift) - 1;
576 }
577
578 /*
579 * xfs_set_inode32() and xfs_set_inode64() are passed an agcount
580 * because in the growfs case, mp->m_sb.sb_agcount is not updated
581 * yet to the potentially higher ag count.
582 */
583 xfs_agnumber_t
584 xfs_set_inode32(struct xfs_mount *mp, xfs_agnumber_t agcount)
585 {
586 xfs_agnumber_t index = 0;
587 xfs_agnumber_t maxagi = 0;
588 xfs_sb_t *sbp = &mp->m_sb;
589 xfs_agnumber_t max_metadata;
590 xfs_agino_t agino;
591 xfs_ino_t ino;
592 xfs_perag_t *pag;
593
594 /* Calculate how much should be reserved for inodes to meet
595 * the max inode percentage.
596 */
597 if (mp->m_maxicount) {
598 __uint64_t icount;
599
600 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
601 do_div(icount, 100);
602 icount += sbp->sb_agblocks - 1;
603 do_div(icount, sbp->sb_agblocks);
604 max_metadata = icount;
605 } else {
606 max_metadata = agcount;
607 }
608
609 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
610
611 for (index = 0; index < agcount; index++) {
612 ino = XFS_AGINO_TO_INO(mp, index, agino);
613
614 if (ino > XFS_MAXINUMBER_32) {
615 pag = xfs_perag_get(mp, index);
616 pag->pagi_inodeok = 0;
617 pag->pagf_metadata = 0;
618 xfs_perag_put(pag);
619 continue;
620 }
621
622 pag = xfs_perag_get(mp, index);
623 pag->pagi_inodeok = 1;
624 maxagi++;
625 if (index < max_metadata)
626 pag->pagf_metadata = 1;
627 xfs_perag_put(pag);
628 }
629 mp->m_flags |= (XFS_MOUNT_32BITINODES |
630 XFS_MOUNT_SMALL_INUMS);
631
632 return maxagi;
633 }
634
635 xfs_agnumber_t
636 xfs_set_inode64(struct xfs_mount *mp, xfs_agnumber_t agcount)
637 {
638 xfs_agnumber_t index = 0;
639
640 for (index = 0; index < agcount; index++) {
641 struct xfs_perag *pag;
642
643 pag = xfs_perag_get(mp, index);
644 pag->pagi_inodeok = 1;
645 pag->pagf_metadata = 0;
646 xfs_perag_put(pag);
647 }
648
649 /* There is no need for lock protection on m_flags,
650 * the rw_semaphore of the VFS superblock is locked
651 * during mount/umount/remount operations, so this is
652 * enough to avoid concurency on the m_flags field
653 */
654 mp->m_flags &= ~(XFS_MOUNT_32BITINODES |
655 XFS_MOUNT_SMALL_INUMS);
656 return index;
657 }
658
659 STATIC int
660 xfs_blkdev_get(
661 xfs_mount_t *mp,
662 const char *name,
663 struct block_device **bdevp)
664 {
665 int error = 0;
666
667 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
668 mp);
669 if (IS_ERR(*bdevp)) {
670 error = PTR_ERR(*bdevp);
671 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
672 }
673
674 return error;
675 }
676
677 STATIC void
678 xfs_blkdev_put(
679 struct block_device *bdev)
680 {
681 if (bdev)
682 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
683 }
684
685 void
686 xfs_blkdev_issue_flush(
687 xfs_buftarg_t *buftarg)
688 {
689 blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
690 }
691
692 STATIC void
693 xfs_close_devices(
694 struct xfs_mount *mp)
695 {
696 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
697 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
698 xfs_free_buftarg(mp, mp->m_logdev_targp);
699 xfs_blkdev_put(logdev);
700 }
701 if (mp->m_rtdev_targp) {
702 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
703 xfs_free_buftarg(mp, mp->m_rtdev_targp);
704 xfs_blkdev_put(rtdev);
705 }
706 xfs_free_buftarg(mp, mp->m_ddev_targp);
707 }
708
709 /*
710 * The file system configurations are:
711 * (1) device (partition) with data and internal log
712 * (2) logical volume with data and log subvolumes.
713 * (3) logical volume with data, log, and realtime subvolumes.
714 *
715 * We only have to handle opening the log and realtime volumes here if
716 * they are present. The data subvolume has already been opened by
717 * get_sb_bdev() and is stored in sb->s_bdev.
718 */
719 STATIC int
720 xfs_open_devices(
721 struct xfs_mount *mp)
722 {
723 struct block_device *ddev = mp->m_super->s_bdev;
724 struct block_device *logdev = NULL, *rtdev = NULL;
725 int error;
726
727 /*
728 * Open real time and log devices - order is important.
729 */
730 if (mp->m_logname) {
731 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
732 if (error)
733 goto out;
734 }
735
736 if (mp->m_rtname) {
737 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
738 if (error)
739 goto out_close_logdev;
740
741 if (rtdev == ddev || rtdev == logdev) {
742 xfs_warn(mp,
743 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
744 error = -EINVAL;
745 goto out_close_rtdev;
746 }
747 }
748
749 /*
750 * Setup xfs_mount buffer target pointers
751 */
752 error = -ENOMEM;
753 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
754 if (!mp->m_ddev_targp)
755 goto out_close_rtdev;
756
757 if (rtdev) {
758 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
759 if (!mp->m_rtdev_targp)
760 goto out_free_ddev_targ;
761 }
762
763 if (logdev && logdev != ddev) {
764 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
765 if (!mp->m_logdev_targp)
766 goto out_free_rtdev_targ;
767 } else {
768 mp->m_logdev_targp = mp->m_ddev_targp;
769 }
770
771 return 0;
772
773 out_free_rtdev_targ:
774 if (mp->m_rtdev_targp)
775 xfs_free_buftarg(mp, mp->m_rtdev_targp);
776 out_free_ddev_targ:
777 xfs_free_buftarg(mp, mp->m_ddev_targp);
778 out_close_rtdev:
779 xfs_blkdev_put(rtdev);
780 out_close_logdev:
781 if (logdev && logdev != ddev)
782 xfs_blkdev_put(logdev);
783 out:
784 return error;
785 }
786
787 /*
788 * Setup xfs_mount buffer target pointers based on superblock
789 */
790 STATIC int
791 xfs_setup_devices(
792 struct xfs_mount *mp)
793 {
794 int error;
795
796 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
797 if (error)
798 return error;
799
800 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
801 unsigned int log_sector_size = BBSIZE;
802
803 if (xfs_sb_version_hassector(&mp->m_sb))
804 log_sector_size = mp->m_sb.sb_logsectsize;
805 error = xfs_setsize_buftarg(mp->m_logdev_targp,
806 log_sector_size);
807 if (error)
808 return error;
809 }
810 if (mp->m_rtdev_targp) {
811 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
812 mp->m_sb.sb_sectsize);
813 if (error)
814 return error;
815 }
816
817 return 0;
818 }
819
820 STATIC int
821 xfs_init_mount_workqueues(
822 struct xfs_mount *mp)
823 {
824 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
825 WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname);
826 if (!mp->m_buf_workqueue)
827 goto out;
828
829 mp->m_data_workqueue = alloc_workqueue("xfs-data/%s",
830 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
831 if (!mp->m_data_workqueue)
832 goto out_destroy_buf;
833
834 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
835 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
836 if (!mp->m_unwritten_workqueue)
837 goto out_destroy_data_iodone_queue;
838
839 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
840 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
841 if (!mp->m_cil_workqueue)
842 goto out_destroy_unwritten;
843
844 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
845 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
846 if (!mp->m_reclaim_workqueue)
847 goto out_destroy_cil;
848
849 mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
850 WQ_MEM_RECLAIM|WQ_FREEZABLE|WQ_HIGHPRI, 0,
851 mp->m_fsname);
852 if (!mp->m_log_workqueue)
853 goto out_destroy_reclaim;
854
855 mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
856 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
857 if (!mp->m_eofblocks_workqueue)
858 goto out_destroy_log;
859
860 return 0;
861
862 out_destroy_log:
863 destroy_workqueue(mp->m_log_workqueue);
864 out_destroy_reclaim:
865 destroy_workqueue(mp->m_reclaim_workqueue);
866 out_destroy_cil:
867 destroy_workqueue(mp->m_cil_workqueue);
868 out_destroy_unwritten:
869 destroy_workqueue(mp->m_unwritten_workqueue);
870 out_destroy_data_iodone_queue:
871 destroy_workqueue(mp->m_data_workqueue);
872 out_destroy_buf:
873 destroy_workqueue(mp->m_buf_workqueue);
874 out:
875 return -ENOMEM;
876 }
877
878 STATIC void
879 xfs_destroy_mount_workqueues(
880 struct xfs_mount *mp)
881 {
882 destroy_workqueue(mp->m_eofblocks_workqueue);
883 destroy_workqueue(mp->m_log_workqueue);
884 destroy_workqueue(mp->m_reclaim_workqueue);
885 destroy_workqueue(mp->m_cil_workqueue);
886 destroy_workqueue(mp->m_data_workqueue);
887 destroy_workqueue(mp->m_unwritten_workqueue);
888 destroy_workqueue(mp->m_buf_workqueue);
889 }
890
891 /*
892 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
893 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
894 * for IO to complete so that we effectively throttle multiple callers to the
895 * rate at which IO is completing.
896 */
897 void
898 xfs_flush_inodes(
899 struct xfs_mount *mp)
900 {
901 struct super_block *sb = mp->m_super;
902
903 if (down_read_trylock(&sb->s_umount)) {
904 sync_inodes_sb(sb);
905 up_read(&sb->s_umount);
906 }
907 }
908
909 /* Catch misguided souls that try to use this interface on XFS */
910 STATIC struct inode *
911 xfs_fs_alloc_inode(
912 struct super_block *sb)
913 {
914 BUG();
915 return NULL;
916 }
917
918 /*
919 * Now that the generic code is guaranteed not to be accessing
920 * the linux inode, we can reclaim the inode.
921 */
922 STATIC void
923 xfs_fs_destroy_inode(
924 struct inode *inode)
925 {
926 struct xfs_inode *ip = XFS_I(inode);
927
928 trace_xfs_destroy_inode(ip);
929
930 XFS_STATS_INC(ip->i_mount, vn_reclaim);
931
932 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
933
934 /*
935 * We should never get here with one of the reclaim flags already set.
936 */
937 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
938 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
939
940 /*
941 * We always use background reclaim here because even if the
942 * inode is clean, it still may be under IO and hence we have
943 * to take the flush lock. The background reclaim path handles
944 * this more efficiently than we can here, so simply let background
945 * reclaim tear down all inodes.
946 */
947 xfs_inode_set_reclaim_tag(ip);
948 }
949
950 /*
951 * Slab object creation initialisation for the XFS inode.
952 * This covers only the idempotent fields in the XFS inode;
953 * all other fields need to be initialised on allocation
954 * from the slab. This avoids the need to repeatedly initialise
955 * fields in the xfs inode that left in the initialise state
956 * when freeing the inode.
957 */
958 STATIC void
959 xfs_fs_inode_init_once(
960 void *inode)
961 {
962 struct xfs_inode *ip = inode;
963
964 memset(ip, 0, sizeof(struct xfs_inode));
965
966 /* vfs inode */
967 inode_init_once(VFS_I(ip));
968
969 /* xfs inode */
970 atomic_set(&ip->i_pincount, 0);
971 spin_lock_init(&ip->i_flags_lock);
972
973 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
974 "xfsino", ip->i_ino);
975 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
976 "xfsino", ip->i_ino);
977 }
978
979 STATIC void
980 xfs_fs_evict_inode(
981 struct inode *inode)
982 {
983 xfs_inode_t *ip = XFS_I(inode);
984
985 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
986
987 trace_xfs_evict_inode(ip);
988
989 truncate_inode_pages_final(&inode->i_data);
990 clear_inode(inode);
991 XFS_STATS_INC(ip->i_mount, vn_rele);
992 XFS_STATS_INC(ip->i_mount, vn_remove);
993
994 xfs_inactive(ip);
995 }
996
997 /*
998 * We do an unlocked check for XFS_IDONTCACHE here because we are already
999 * serialised against cache hits here via the inode->i_lock and igrab() in
1000 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
1001 * racing with us, and it avoids needing to grab a spinlock here for every inode
1002 * we drop the final reference on.
1003 */
1004 STATIC int
1005 xfs_fs_drop_inode(
1006 struct inode *inode)
1007 {
1008 struct xfs_inode *ip = XFS_I(inode);
1009
1010 return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
1011 }
1012
1013 STATIC void
1014 xfs_free_fsname(
1015 struct xfs_mount *mp)
1016 {
1017 kfree(mp->m_fsname);
1018 kfree(mp->m_rtname);
1019 kfree(mp->m_logname);
1020 }
1021
1022 STATIC int
1023 xfs_fs_sync_fs(
1024 struct super_block *sb,
1025 int wait)
1026 {
1027 struct xfs_mount *mp = XFS_M(sb);
1028
1029 /*
1030 * Doing anything during the async pass would be counterproductive.
1031 */
1032 if (!wait)
1033 return 0;
1034
1035 xfs_log_force(mp, XFS_LOG_SYNC);
1036 if (laptop_mode) {
1037 /*
1038 * The disk must be active because we're syncing.
1039 * We schedule log work now (now that the disk is
1040 * active) instead of later (when it might not be).
1041 */
1042 flush_delayed_work(&mp->m_log->l_work);
1043 }
1044
1045 return 0;
1046 }
1047
1048 STATIC int
1049 xfs_fs_statfs(
1050 struct dentry *dentry,
1051 struct kstatfs *statp)
1052 {
1053 struct xfs_mount *mp = XFS_M(dentry->d_sb);
1054 xfs_sb_t *sbp = &mp->m_sb;
1055 struct xfs_inode *ip = XFS_I(d_inode(dentry));
1056 __uint64_t fakeinos, id;
1057 __uint64_t icount;
1058 __uint64_t ifree;
1059 __uint64_t fdblocks;
1060 xfs_extlen_t lsize;
1061 __int64_t ffree;
1062
1063 statp->f_type = XFS_SB_MAGIC;
1064 statp->f_namelen = MAXNAMELEN - 1;
1065
1066 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1067 statp->f_fsid.val[0] = (u32)id;
1068 statp->f_fsid.val[1] = (u32)(id >> 32);
1069
1070 icount = percpu_counter_sum(&mp->m_icount);
1071 ifree = percpu_counter_sum(&mp->m_ifree);
1072 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
1073
1074 spin_lock(&mp->m_sb_lock);
1075 statp->f_bsize = sbp->sb_blocksize;
1076 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1077 statp->f_blocks = sbp->sb_dblocks - lsize;
1078 spin_unlock(&mp->m_sb_lock);
1079
1080 statp->f_bfree = fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1081 statp->f_bavail = statp->f_bfree;
1082
1083 fakeinos = statp->f_bfree << sbp->sb_inopblog;
1084 statp->f_files = MIN(icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1085 if (mp->m_maxicount)
1086 statp->f_files = min_t(typeof(statp->f_files),
1087 statp->f_files,
1088 mp->m_maxicount);
1089
1090 /* If sb_icount overshot maxicount, report actual allocation */
1091 statp->f_files = max_t(typeof(statp->f_files),
1092 statp->f_files,
1093 sbp->sb_icount);
1094
1095 /* make sure statp->f_ffree does not underflow */
1096 ffree = statp->f_files - (icount - ifree);
1097 statp->f_ffree = max_t(__int64_t, ffree, 0);
1098
1099
1100 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1101 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
1102 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
1103 xfs_qm_statvfs(ip, statp);
1104 return 0;
1105 }
1106
1107 STATIC void
1108 xfs_save_resvblks(struct xfs_mount *mp)
1109 {
1110 __uint64_t resblks = 0;
1111
1112 mp->m_resblks_save = mp->m_resblks;
1113 xfs_reserve_blocks(mp, &resblks, NULL);
1114 }
1115
1116 STATIC void
1117 xfs_restore_resvblks(struct xfs_mount *mp)
1118 {
1119 __uint64_t resblks;
1120
1121 if (mp->m_resblks_save) {
1122 resblks = mp->m_resblks_save;
1123 mp->m_resblks_save = 0;
1124 } else
1125 resblks = xfs_default_resblks(mp);
1126
1127 xfs_reserve_blocks(mp, &resblks, NULL);
1128 }
1129
1130 /*
1131 * Trigger writeback of all the dirty metadata in the file system.
1132 *
1133 * This ensures that the metadata is written to their location on disk rather
1134 * than just existing in transactions in the log. This means after a quiesce
1135 * there is no log replay required to write the inodes to disk - this is the
1136 * primary difference between a sync and a quiesce.
1137 *
1138 * Note: xfs_log_quiesce() stops background log work - the callers must ensure
1139 * it is started again when appropriate.
1140 */
1141 static void
1142 xfs_quiesce_attr(
1143 struct xfs_mount *mp)
1144 {
1145 int error = 0;
1146
1147 /* wait for all modifications to complete */
1148 while (atomic_read(&mp->m_active_trans) > 0)
1149 delay(100);
1150
1151 /* force the log to unpin objects from the now complete transactions */
1152 xfs_log_force(mp, XFS_LOG_SYNC);
1153
1154 /* reclaim inodes to do any IO before the freeze completes */
1155 xfs_reclaim_inodes(mp, 0);
1156 xfs_reclaim_inodes(mp, SYNC_WAIT);
1157
1158 /* Push the superblock and write an unmount record */
1159 error = xfs_log_sbcount(mp);
1160 if (error)
1161 xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
1162 "Frozen image may not be consistent.");
1163 /*
1164 * Just warn here till VFS can correctly support
1165 * read-only remount without racing.
1166 */
1167 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
1168
1169 xfs_log_quiesce(mp);
1170 }
1171
1172 STATIC int
1173 xfs_fs_remount(
1174 struct super_block *sb,
1175 int *flags,
1176 char *options)
1177 {
1178 struct xfs_mount *mp = XFS_M(sb);
1179 xfs_sb_t *sbp = &mp->m_sb;
1180 substring_t args[MAX_OPT_ARGS];
1181 char *p;
1182 int error;
1183
1184 sync_filesystem(sb);
1185 while ((p = strsep(&options, ",")) != NULL) {
1186 int token;
1187
1188 if (!*p)
1189 continue;
1190
1191 token = match_token(p, tokens, args);
1192 switch (token) {
1193 case Opt_barrier:
1194 mp->m_flags |= XFS_MOUNT_BARRIER;
1195 break;
1196 case Opt_nobarrier:
1197 mp->m_flags &= ~XFS_MOUNT_BARRIER;
1198 break;
1199 case Opt_inode64:
1200 mp->m_maxagi = xfs_set_inode64(mp, sbp->sb_agcount);
1201 break;
1202 case Opt_inode32:
1203 mp->m_maxagi = xfs_set_inode32(mp, sbp->sb_agcount);
1204 break;
1205 default:
1206 /*
1207 * Logically we would return an error here to prevent
1208 * users from believing they might have changed
1209 * mount options using remount which can't be changed.
1210 *
1211 * But unfortunately mount(8) adds all options from
1212 * mtab and fstab to the mount arguments in some cases
1213 * so we can't blindly reject options, but have to
1214 * check for each specified option if it actually
1215 * differs from the currently set option and only
1216 * reject it if that's the case.
1217 *
1218 * Until that is implemented we return success for
1219 * every remount request, and silently ignore all
1220 * options that we can't actually change.
1221 */
1222 #if 0
1223 xfs_info(mp,
1224 "mount option \"%s\" not supported for remount", p);
1225 return -EINVAL;
1226 #else
1227 break;
1228 #endif
1229 }
1230 }
1231
1232 /* ro -> rw */
1233 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1234 if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1235 xfs_warn(mp,
1236 "ro->rw transition prohibited on norecovery mount");
1237 return -EINVAL;
1238 }
1239
1240 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1241
1242 /*
1243 * If this is the first remount to writeable state we
1244 * might have some superblock changes to update.
1245 */
1246 if (mp->m_update_sb) {
1247 error = xfs_sync_sb(mp, false);
1248 if (error) {
1249 xfs_warn(mp, "failed to write sb changes");
1250 return error;
1251 }
1252 mp->m_update_sb = false;
1253 }
1254
1255 /*
1256 * Fill out the reserve pool if it is empty. Use the stashed
1257 * value if it is non-zero, otherwise go with the default.
1258 */
1259 xfs_restore_resvblks(mp);
1260 xfs_log_work_queue(mp);
1261 }
1262
1263 /* rw -> ro */
1264 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1265 /*
1266 * Before we sync the metadata, we need to free up the reserve
1267 * block pool so that the used block count in the superblock on
1268 * disk is correct at the end of the remount. Stash the current
1269 * reserve pool size so that if we get remounted rw, we can
1270 * return it to the same size.
1271 */
1272 xfs_save_resvblks(mp);
1273 xfs_quiesce_attr(mp);
1274 mp->m_flags |= XFS_MOUNT_RDONLY;
1275 }
1276
1277 return 0;
1278 }
1279
1280 /*
1281 * Second stage of a freeze. The data is already frozen so we only
1282 * need to take care of the metadata. Once that's done sync the superblock
1283 * to the log to dirty it in case of a crash while frozen. This ensures that we
1284 * will recover the unlinked inode lists on the next mount.
1285 */
1286 STATIC int
1287 xfs_fs_freeze(
1288 struct super_block *sb)
1289 {
1290 struct xfs_mount *mp = XFS_M(sb);
1291
1292 xfs_save_resvblks(mp);
1293 xfs_quiesce_attr(mp);
1294 return xfs_sync_sb(mp, true);
1295 }
1296
1297 STATIC int
1298 xfs_fs_unfreeze(
1299 struct super_block *sb)
1300 {
1301 struct xfs_mount *mp = XFS_M(sb);
1302
1303 xfs_restore_resvblks(mp);
1304 xfs_log_work_queue(mp);
1305 return 0;
1306 }
1307
1308 STATIC int
1309 xfs_fs_show_options(
1310 struct seq_file *m,
1311 struct dentry *root)
1312 {
1313 return xfs_showargs(XFS_M(root->d_sb), m);
1314 }
1315
1316 /*
1317 * This function fills in xfs_mount_t fields based on mount args.
1318 * Note: the superblock _has_ now been read in.
1319 */
1320 STATIC int
1321 xfs_finish_flags(
1322 struct xfs_mount *mp)
1323 {
1324 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1325
1326 /* Fail a mount where the logbuf is smaller than the log stripe */
1327 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1328 if (mp->m_logbsize <= 0 &&
1329 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1330 mp->m_logbsize = mp->m_sb.sb_logsunit;
1331 } else if (mp->m_logbsize > 0 &&
1332 mp->m_logbsize < mp->m_sb.sb_logsunit) {
1333 xfs_warn(mp,
1334 "logbuf size must be greater than or equal to log stripe size");
1335 return -EINVAL;
1336 }
1337 } else {
1338 /* Fail a mount if the logbuf is larger than 32K */
1339 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1340 xfs_warn(mp,
1341 "logbuf size for version 1 logs must be 16K or 32K");
1342 return -EINVAL;
1343 }
1344 }
1345
1346 /*
1347 * V5 filesystems always use attr2 format for attributes.
1348 */
1349 if (xfs_sb_version_hascrc(&mp->m_sb) &&
1350 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1351 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1352 "attr2 is always enabled for V5 filesystems.");
1353 return -EINVAL;
1354 }
1355
1356 /*
1357 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1358 * told by noattr2 to turn it off
1359 */
1360 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1361 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1362 mp->m_flags |= XFS_MOUNT_ATTR2;
1363
1364 /*
1365 * prohibit r/w mounts of read-only filesystems
1366 */
1367 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1368 xfs_warn(mp,
1369 "cannot mount a read-only filesystem as read-write");
1370 return -EROFS;
1371 }
1372
1373 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
1374 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
1375 !xfs_sb_version_has_pquotino(&mp->m_sb)) {
1376 xfs_warn(mp,
1377 "Super block does not support project and group quota together");
1378 return -EINVAL;
1379 }
1380
1381 return 0;
1382 }
1383
1384 static int
1385 xfs_init_percpu_counters(
1386 struct xfs_mount *mp)
1387 {
1388 int error;
1389
1390 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1391 if (error)
1392 return -ENOMEM;
1393
1394 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1395 if (error)
1396 goto free_icount;
1397
1398 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1399 if (error)
1400 goto free_ifree;
1401
1402 return 0;
1403
1404 free_ifree:
1405 percpu_counter_destroy(&mp->m_ifree);
1406 free_icount:
1407 percpu_counter_destroy(&mp->m_icount);
1408 return -ENOMEM;
1409 }
1410
1411 void
1412 xfs_reinit_percpu_counters(
1413 struct xfs_mount *mp)
1414 {
1415 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1416 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1417 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1418 }
1419
1420 static void
1421 xfs_destroy_percpu_counters(
1422 struct xfs_mount *mp)
1423 {
1424 percpu_counter_destroy(&mp->m_icount);
1425 percpu_counter_destroy(&mp->m_ifree);
1426 percpu_counter_destroy(&mp->m_fdblocks);
1427 }
1428
1429 STATIC int
1430 xfs_fs_fill_super(
1431 struct super_block *sb,
1432 void *data,
1433 int silent)
1434 {
1435 struct inode *root;
1436 struct xfs_mount *mp = NULL;
1437 int flags = 0, error = -ENOMEM;
1438
1439 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1440 if (!mp)
1441 goto out;
1442
1443 spin_lock_init(&mp->m_sb_lock);
1444 mutex_init(&mp->m_growlock);
1445 atomic_set(&mp->m_active_trans, 0);
1446 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1447 INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1448 mp->m_kobj.kobject.kset = xfs_kset;
1449
1450 mp->m_super = sb;
1451 sb->s_fs_info = mp;
1452
1453 error = xfs_parseargs(mp, (char *)data);
1454 if (error)
1455 goto out_free_fsname;
1456
1457 sb_min_blocksize(sb, BBSIZE);
1458 sb->s_xattr = xfs_xattr_handlers;
1459 sb->s_export_op = &xfs_export_operations;
1460 #ifdef CONFIG_XFS_QUOTA
1461 sb->s_qcop = &xfs_quotactl_operations;
1462 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1463 #endif
1464 sb->s_op = &xfs_super_operations;
1465
1466 if (silent)
1467 flags |= XFS_MFSI_QUIET;
1468
1469 error = xfs_open_devices(mp);
1470 if (error)
1471 goto out_free_fsname;
1472
1473 error = xfs_init_mount_workqueues(mp);
1474 if (error)
1475 goto out_close_devices;
1476
1477 error = xfs_init_percpu_counters(mp);
1478 if (error)
1479 goto out_destroy_workqueues;
1480
1481 /* Allocate stats memory before we do operations that might use it */
1482 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1483 if (!mp->m_stats.xs_stats) {
1484 error = -ENOMEM;
1485 goto out_destroy_counters;
1486 }
1487
1488 error = xfs_readsb(mp, flags);
1489 if (error)
1490 goto out_free_stats;
1491
1492 error = xfs_finish_flags(mp);
1493 if (error)
1494 goto out_free_sb;
1495
1496 error = xfs_setup_devices(mp);
1497 if (error)
1498 goto out_free_sb;
1499
1500 error = xfs_filestream_mount(mp);
1501 if (error)
1502 goto out_free_sb;
1503
1504 /*
1505 * we must configure the block size in the superblock before we run the
1506 * full mount process as the mount process can lookup and cache inodes.
1507 */
1508 sb->s_magic = XFS_SB_MAGIC;
1509 sb->s_blocksize = mp->m_sb.sb_blocksize;
1510 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1511 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1512 sb->s_max_links = XFS_MAXLINK;
1513 sb->s_time_gran = 1;
1514 set_posix_acl_flag(sb);
1515
1516 /* version 5 superblocks support inode version counters. */
1517 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1518 sb->s_flags |= MS_I_VERSION;
1519
1520 if (mp->m_flags & XFS_MOUNT_DAX) {
1521 xfs_warn(mp,
1522 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1523 if (sb->s_blocksize != PAGE_SIZE) {
1524 xfs_alert(mp,
1525 "Filesystem block size invalid for DAX Turning DAX off.");
1526 mp->m_flags &= ~XFS_MOUNT_DAX;
1527 } else if (!sb->s_bdev->bd_disk->fops->direct_access) {
1528 xfs_alert(mp,
1529 "Block device does not support DAX Turning DAX off.");
1530 mp->m_flags &= ~XFS_MOUNT_DAX;
1531 }
1532 }
1533
1534 if (xfs_sb_version_hassparseinodes(&mp->m_sb))
1535 xfs_alert(mp,
1536 "EXPERIMENTAL sparse inode feature enabled. Use at your own risk!");
1537
1538 error = xfs_mountfs(mp);
1539 if (error)
1540 goto out_filestream_unmount;
1541
1542 root = igrab(VFS_I(mp->m_rootip));
1543 if (!root) {
1544 error = -ENOENT;
1545 goto out_unmount;
1546 }
1547 sb->s_root = d_make_root(root);
1548 if (!sb->s_root) {
1549 error = -ENOMEM;
1550 goto out_unmount;
1551 }
1552
1553 return 0;
1554
1555 out_filestream_unmount:
1556 xfs_filestream_unmount(mp);
1557 out_free_sb:
1558 xfs_freesb(mp);
1559 out_free_stats:
1560 free_percpu(mp->m_stats.xs_stats);
1561 out_destroy_counters:
1562 xfs_destroy_percpu_counters(mp);
1563 out_destroy_workqueues:
1564 xfs_destroy_mount_workqueues(mp);
1565 out_close_devices:
1566 xfs_close_devices(mp);
1567 out_free_fsname:
1568 xfs_free_fsname(mp);
1569 kfree(mp);
1570 out:
1571 return error;
1572
1573 out_unmount:
1574 xfs_filestream_unmount(mp);
1575 xfs_unmountfs(mp);
1576 goto out_free_sb;
1577 }
1578
1579 STATIC void
1580 xfs_fs_put_super(
1581 struct super_block *sb)
1582 {
1583 struct xfs_mount *mp = XFS_M(sb);
1584
1585 xfs_notice(mp, "Unmounting Filesystem");
1586 xfs_filestream_unmount(mp);
1587 xfs_unmountfs(mp);
1588
1589 xfs_freesb(mp);
1590 free_percpu(mp->m_stats.xs_stats);
1591 xfs_destroy_percpu_counters(mp);
1592 xfs_destroy_mount_workqueues(mp);
1593 xfs_close_devices(mp);
1594 xfs_free_fsname(mp);
1595 kfree(mp);
1596 }
1597
1598 STATIC struct dentry *
1599 xfs_fs_mount(
1600 struct file_system_type *fs_type,
1601 int flags,
1602 const char *dev_name,
1603 void *data)
1604 {
1605 return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
1606 }
1607
1608 static long
1609 xfs_fs_nr_cached_objects(
1610 struct super_block *sb,
1611 struct shrink_control *sc)
1612 {
1613 return xfs_reclaim_inodes_count(XFS_M(sb));
1614 }
1615
1616 static long
1617 xfs_fs_free_cached_objects(
1618 struct super_block *sb,
1619 struct shrink_control *sc)
1620 {
1621 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1622 }
1623
1624 static const struct super_operations xfs_super_operations = {
1625 .alloc_inode = xfs_fs_alloc_inode,
1626 .destroy_inode = xfs_fs_destroy_inode,
1627 .evict_inode = xfs_fs_evict_inode,
1628 .drop_inode = xfs_fs_drop_inode,
1629 .put_super = xfs_fs_put_super,
1630 .sync_fs = xfs_fs_sync_fs,
1631 .freeze_fs = xfs_fs_freeze,
1632 .unfreeze_fs = xfs_fs_unfreeze,
1633 .statfs = xfs_fs_statfs,
1634 .remount_fs = xfs_fs_remount,
1635 .show_options = xfs_fs_show_options,
1636 .nr_cached_objects = xfs_fs_nr_cached_objects,
1637 .free_cached_objects = xfs_fs_free_cached_objects,
1638 };
1639
1640 static struct file_system_type xfs_fs_type = {
1641 .owner = THIS_MODULE,
1642 .name = "xfs",
1643 .mount = xfs_fs_mount,
1644 .kill_sb = kill_block_super,
1645 .fs_flags = FS_REQUIRES_DEV,
1646 };
1647 MODULE_ALIAS_FS("xfs");
1648
1649 STATIC int __init
1650 xfs_init_zones(void)
1651 {
1652
1653 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
1654 if (!xfs_ioend_zone)
1655 goto out;
1656
1657 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
1658 xfs_ioend_zone);
1659 if (!xfs_ioend_pool)
1660 goto out_destroy_ioend_zone;
1661
1662 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1663 "xfs_log_ticket");
1664 if (!xfs_log_ticket_zone)
1665 goto out_destroy_ioend_pool;
1666
1667 xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
1668 "xfs_bmap_free_item");
1669 if (!xfs_bmap_free_item_zone)
1670 goto out_destroy_log_ticket_zone;
1671
1672 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1673 "xfs_btree_cur");
1674 if (!xfs_btree_cur_zone)
1675 goto out_destroy_bmap_free_item_zone;
1676
1677 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1678 "xfs_da_state");
1679 if (!xfs_da_state_zone)
1680 goto out_destroy_btree_cur_zone;
1681
1682 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1683 if (!xfs_ifork_zone)
1684 goto out_destroy_da_state_zone;
1685
1686 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1687 if (!xfs_trans_zone)
1688 goto out_destroy_ifork_zone;
1689
1690 xfs_log_item_desc_zone =
1691 kmem_zone_init(sizeof(struct xfs_log_item_desc),
1692 "xfs_log_item_desc");
1693 if (!xfs_log_item_desc_zone)
1694 goto out_destroy_trans_zone;
1695
1696 /*
1697 * The size of the zone allocated buf log item is the maximum
1698 * size possible under XFS. This wastes a little bit of memory,
1699 * but it is much faster.
1700 */
1701 xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item),
1702 "xfs_buf_item");
1703 if (!xfs_buf_item_zone)
1704 goto out_destroy_log_item_desc_zone;
1705
1706 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1707 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1708 sizeof(xfs_extent_t))), "xfs_efd_item");
1709 if (!xfs_efd_zone)
1710 goto out_destroy_buf_item_zone;
1711
1712 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1713 ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1714 sizeof(xfs_extent_t))), "xfs_efi_item");
1715 if (!xfs_efi_zone)
1716 goto out_destroy_efd_zone;
1717
1718 xfs_inode_zone =
1719 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1720 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD |
1721 KM_ZONE_ACCOUNT, xfs_fs_inode_init_once);
1722 if (!xfs_inode_zone)
1723 goto out_destroy_efi_zone;
1724
1725 xfs_ili_zone =
1726 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1727 KM_ZONE_SPREAD, NULL);
1728 if (!xfs_ili_zone)
1729 goto out_destroy_inode_zone;
1730 xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item),
1731 "xfs_icr");
1732 if (!xfs_icreate_zone)
1733 goto out_destroy_ili_zone;
1734
1735 return 0;
1736
1737 out_destroy_ili_zone:
1738 kmem_zone_destroy(xfs_ili_zone);
1739 out_destroy_inode_zone:
1740 kmem_zone_destroy(xfs_inode_zone);
1741 out_destroy_efi_zone:
1742 kmem_zone_destroy(xfs_efi_zone);
1743 out_destroy_efd_zone:
1744 kmem_zone_destroy(xfs_efd_zone);
1745 out_destroy_buf_item_zone:
1746 kmem_zone_destroy(xfs_buf_item_zone);
1747 out_destroy_log_item_desc_zone:
1748 kmem_zone_destroy(xfs_log_item_desc_zone);
1749 out_destroy_trans_zone:
1750 kmem_zone_destroy(xfs_trans_zone);
1751 out_destroy_ifork_zone:
1752 kmem_zone_destroy(xfs_ifork_zone);
1753 out_destroy_da_state_zone:
1754 kmem_zone_destroy(xfs_da_state_zone);
1755 out_destroy_btree_cur_zone:
1756 kmem_zone_destroy(xfs_btree_cur_zone);
1757 out_destroy_bmap_free_item_zone:
1758 kmem_zone_destroy(xfs_bmap_free_item_zone);
1759 out_destroy_log_ticket_zone:
1760 kmem_zone_destroy(xfs_log_ticket_zone);
1761 out_destroy_ioend_pool:
1762 mempool_destroy(xfs_ioend_pool);
1763 out_destroy_ioend_zone:
1764 kmem_zone_destroy(xfs_ioend_zone);
1765 out:
1766 return -ENOMEM;
1767 }
1768
1769 STATIC void
1770 xfs_destroy_zones(void)
1771 {
1772 /*
1773 * Make sure all delayed rcu free are flushed before we
1774 * destroy caches.
1775 */
1776 rcu_barrier();
1777 kmem_zone_destroy(xfs_icreate_zone);
1778 kmem_zone_destroy(xfs_ili_zone);
1779 kmem_zone_destroy(xfs_inode_zone);
1780 kmem_zone_destroy(xfs_efi_zone);
1781 kmem_zone_destroy(xfs_efd_zone);
1782 kmem_zone_destroy(xfs_buf_item_zone);
1783 kmem_zone_destroy(xfs_log_item_desc_zone);
1784 kmem_zone_destroy(xfs_trans_zone);
1785 kmem_zone_destroy(xfs_ifork_zone);
1786 kmem_zone_destroy(xfs_da_state_zone);
1787 kmem_zone_destroy(xfs_btree_cur_zone);
1788 kmem_zone_destroy(xfs_bmap_free_item_zone);
1789 kmem_zone_destroy(xfs_log_ticket_zone);
1790 mempool_destroy(xfs_ioend_pool);
1791 kmem_zone_destroy(xfs_ioend_zone);
1792
1793 }
1794
1795 STATIC int __init
1796 xfs_init_workqueues(void)
1797 {
1798 /*
1799 * The allocation workqueue can be used in memory reclaim situations
1800 * (writepage path), and parallelism is only limited by the number of
1801 * AGs in all the filesystems mounted. Hence use the default large
1802 * max_active value for this workqueue.
1803 */
1804 xfs_alloc_wq = alloc_workqueue("xfsalloc",
1805 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
1806 if (!xfs_alloc_wq)
1807 return -ENOMEM;
1808
1809 return 0;
1810 }
1811
1812 STATIC void
1813 xfs_destroy_workqueues(void)
1814 {
1815 destroy_workqueue(xfs_alloc_wq);
1816 }
1817
1818 STATIC int __init
1819 init_xfs_fs(void)
1820 {
1821 int error;
1822
1823 printk(KERN_INFO XFS_VERSION_STRING " with "
1824 XFS_BUILD_OPTIONS " enabled\n");
1825
1826 xfs_dir_startup();
1827
1828 error = xfs_init_zones();
1829 if (error)
1830 goto out;
1831
1832 error = xfs_init_workqueues();
1833 if (error)
1834 goto out_destroy_zones;
1835
1836 error = xfs_mru_cache_init();
1837 if (error)
1838 goto out_destroy_wq;
1839
1840 error = xfs_buf_init();
1841 if (error)
1842 goto out_mru_cache_uninit;
1843
1844 error = xfs_init_procfs();
1845 if (error)
1846 goto out_buf_terminate;
1847
1848 error = xfs_sysctl_register();
1849 if (error)
1850 goto out_cleanup_procfs;
1851
1852 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
1853 if (!xfs_kset) {
1854 error = -ENOMEM;
1855 goto out_sysctl_unregister;
1856 }
1857
1858 xfsstats.xs_kobj.kobject.kset = xfs_kset;
1859
1860 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
1861 if (!xfsstats.xs_stats) {
1862 error = -ENOMEM;
1863 goto out_kset_unregister;
1864 }
1865
1866 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
1867 "stats");
1868 if (error)
1869 goto out_free_stats;
1870
1871 #ifdef DEBUG
1872 xfs_dbg_kobj.kobject.kset = xfs_kset;
1873 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
1874 if (error)
1875 goto out_remove_stats_kobj;
1876 #endif
1877
1878 error = xfs_qm_init();
1879 if (error)
1880 goto out_remove_dbg_kobj;
1881
1882 error = register_filesystem(&xfs_fs_type);
1883 if (error)
1884 goto out_qm_exit;
1885 return 0;
1886
1887 out_qm_exit:
1888 xfs_qm_exit();
1889 out_remove_dbg_kobj:
1890 #ifdef DEBUG
1891 xfs_sysfs_del(&xfs_dbg_kobj);
1892 out_remove_stats_kobj:
1893 #endif
1894 xfs_sysfs_del(&xfsstats.xs_kobj);
1895 out_free_stats:
1896 free_percpu(xfsstats.xs_stats);
1897 out_kset_unregister:
1898 kset_unregister(xfs_kset);
1899 out_sysctl_unregister:
1900 xfs_sysctl_unregister();
1901 out_cleanup_procfs:
1902 xfs_cleanup_procfs();
1903 out_buf_terminate:
1904 xfs_buf_terminate();
1905 out_mru_cache_uninit:
1906 xfs_mru_cache_uninit();
1907 out_destroy_wq:
1908 xfs_destroy_workqueues();
1909 out_destroy_zones:
1910 xfs_destroy_zones();
1911 out:
1912 return error;
1913 }
1914
1915 STATIC void __exit
1916 exit_xfs_fs(void)
1917 {
1918 xfs_qm_exit();
1919 unregister_filesystem(&xfs_fs_type);
1920 #ifdef DEBUG
1921 xfs_sysfs_del(&xfs_dbg_kobj);
1922 #endif
1923 xfs_sysfs_del(&xfsstats.xs_kobj);
1924 free_percpu(xfsstats.xs_stats);
1925 kset_unregister(xfs_kset);
1926 xfs_sysctl_unregister();
1927 xfs_cleanup_procfs();
1928 xfs_buf_terminate();
1929 xfs_mru_cache_uninit();
1930 xfs_destroy_workqueues();
1931 xfs_destroy_zones();
1932 xfs_uuid_table_free();
1933 }
1934
1935 module_init(init_xfs_fs);
1936 module_exit(exit_xfs_fs);
1937
1938 MODULE_AUTHOR("Silicon Graphics, Inc.");
1939 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1940 MODULE_LICENSE("GPL");