]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/xfs/xfs_super.c
xfs: Fix false positive lockdep warning with sb_internal & fs_reclaim
[mirror_ubuntu-hirsute-kernel.git] / fs / xfs / xfs_super.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38
39 #include <linux/magic.h>
40 #include <linux/fs_context.h>
41 #include <linux/fs_parser.h>
42
43 static const struct super_operations xfs_super_operations;
44
45 static struct kset *xfs_kset; /* top-level xfs sysfs dir */
46 #ifdef DEBUG
47 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
48 #endif
49
50 enum xfs_dax_mode {
51 XFS_DAX_INODE = 0,
52 XFS_DAX_ALWAYS = 1,
53 XFS_DAX_NEVER = 2,
54 };
55
56 static void
57 xfs_mount_set_dax_mode(
58 struct xfs_mount *mp,
59 enum xfs_dax_mode mode)
60 {
61 switch (mode) {
62 case XFS_DAX_INODE:
63 mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER);
64 break;
65 case XFS_DAX_ALWAYS:
66 mp->m_flags |= XFS_MOUNT_DAX_ALWAYS;
67 mp->m_flags &= ~XFS_MOUNT_DAX_NEVER;
68 break;
69 case XFS_DAX_NEVER:
70 mp->m_flags |= XFS_MOUNT_DAX_NEVER;
71 mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS;
72 break;
73 }
74 }
75
76 static const struct constant_table dax_param_enums[] = {
77 {"inode", XFS_DAX_INODE },
78 {"always", XFS_DAX_ALWAYS },
79 {"never", XFS_DAX_NEVER },
80 {}
81 };
82
83 /*
84 * Table driven mount option parser.
85 */
86 enum {
87 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
88 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
89 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
90 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
91 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
92 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
93 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
94 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
95 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
96 };
97
98 static const struct fs_parameter_spec xfs_fs_parameters[] = {
99 fsparam_u32("logbufs", Opt_logbufs),
100 fsparam_string("logbsize", Opt_logbsize),
101 fsparam_string("logdev", Opt_logdev),
102 fsparam_string("rtdev", Opt_rtdev),
103 fsparam_flag("wsync", Opt_wsync),
104 fsparam_flag("noalign", Opt_noalign),
105 fsparam_flag("swalloc", Opt_swalloc),
106 fsparam_u32("sunit", Opt_sunit),
107 fsparam_u32("swidth", Opt_swidth),
108 fsparam_flag("nouuid", Opt_nouuid),
109 fsparam_flag("grpid", Opt_grpid),
110 fsparam_flag("nogrpid", Opt_nogrpid),
111 fsparam_flag("bsdgroups", Opt_bsdgroups),
112 fsparam_flag("sysvgroups", Opt_sysvgroups),
113 fsparam_string("allocsize", Opt_allocsize),
114 fsparam_flag("norecovery", Opt_norecovery),
115 fsparam_flag("inode64", Opt_inode64),
116 fsparam_flag("inode32", Opt_inode32),
117 fsparam_flag("ikeep", Opt_ikeep),
118 fsparam_flag("noikeep", Opt_noikeep),
119 fsparam_flag("largeio", Opt_largeio),
120 fsparam_flag("nolargeio", Opt_nolargeio),
121 fsparam_flag("attr2", Opt_attr2),
122 fsparam_flag("noattr2", Opt_noattr2),
123 fsparam_flag("filestreams", Opt_filestreams),
124 fsparam_flag("quota", Opt_quota),
125 fsparam_flag("noquota", Opt_noquota),
126 fsparam_flag("usrquota", Opt_usrquota),
127 fsparam_flag("grpquota", Opt_grpquota),
128 fsparam_flag("prjquota", Opt_prjquota),
129 fsparam_flag("uquota", Opt_uquota),
130 fsparam_flag("gquota", Opt_gquota),
131 fsparam_flag("pquota", Opt_pquota),
132 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
133 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
134 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
135 fsparam_flag("qnoenforce", Opt_qnoenforce),
136 fsparam_flag("discard", Opt_discard),
137 fsparam_flag("nodiscard", Opt_nodiscard),
138 fsparam_flag("dax", Opt_dax),
139 fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
140 {}
141 };
142
143 struct proc_xfs_info {
144 uint64_t flag;
145 char *str;
146 };
147
148 static int
149 xfs_fs_show_options(
150 struct seq_file *m,
151 struct dentry *root)
152 {
153 static struct proc_xfs_info xfs_info_set[] = {
154 /* the few simple ones we can get from the mount struct */
155 { XFS_MOUNT_IKEEP, ",ikeep" },
156 { XFS_MOUNT_WSYNC, ",wsync" },
157 { XFS_MOUNT_NOALIGN, ",noalign" },
158 { XFS_MOUNT_SWALLOC, ",swalloc" },
159 { XFS_MOUNT_NOUUID, ",nouuid" },
160 { XFS_MOUNT_NORECOVERY, ",norecovery" },
161 { XFS_MOUNT_ATTR2, ",attr2" },
162 { XFS_MOUNT_FILESTREAMS, ",filestreams" },
163 { XFS_MOUNT_GRPID, ",grpid" },
164 { XFS_MOUNT_DISCARD, ",discard" },
165 { XFS_MOUNT_LARGEIO, ",largeio" },
166 { XFS_MOUNT_DAX_ALWAYS, ",dax=always" },
167 { XFS_MOUNT_DAX_NEVER, ",dax=never" },
168 { 0, NULL }
169 };
170 struct xfs_mount *mp = XFS_M(root->d_sb);
171 struct proc_xfs_info *xfs_infop;
172
173 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
174 if (mp->m_flags & xfs_infop->flag)
175 seq_puts(m, xfs_infop->str);
176 }
177
178 seq_printf(m, ",inode%d",
179 (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
180
181 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
182 seq_printf(m, ",allocsize=%dk",
183 (1 << mp->m_allocsize_log) >> 10);
184
185 if (mp->m_logbufs > 0)
186 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
187 if (mp->m_logbsize > 0)
188 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
189
190 if (mp->m_logname)
191 seq_show_option(m, "logdev", mp->m_logname);
192 if (mp->m_rtname)
193 seq_show_option(m, "rtdev", mp->m_rtname);
194
195 if (mp->m_dalign > 0)
196 seq_printf(m, ",sunit=%d",
197 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
198 if (mp->m_swidth > 0)
199 seq_printf(m, ",swidth=%d",
200 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
201
202 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
203 seq_puts(m, ",usrquota");
204 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
205 seq_puts(m, ",uqnoenforce");
206
207 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
208 if (mp->m_qflags & XFS_PQUOTA_ENFD)
209 seq_puts(m, ",prjquota");
210 else
211 seq_puts(m, ",pqnoenforce");
212 }
213 if (mp->m_qflags & XFS_GQUOTA_ACCT) {
214 if (mp->m_qflags & XFS_GQUOTA_ENFD)
215 seq_puts(m, ",grpquota");
216 else
217 seq_puts(m, ",gqnoenforce");
218 }
219
220 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
221 seq_puts(m, ",noquota");
222
223 return 0;
224 }
225
226 /*
227 * Set parameters for inode allocation heuristics, taking into account
228 * filesystem size and inode32/inode64 mount options; i.e. specifically
229 * whether or not XFS_MOUNT_SMALL_INUMS is set.
230 *
231 * Inode allocation patterns are altered only if inode32 is requested
232 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
233 * If altered, XFS_MOUNT_32BITINODES is set as well.
234 *
235 * An agcount independent of that in the mount structure is provided
236 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
237 * to the potentially higher ag count.
238 *
239 * Returns the maximum AG index which may contain inodes.
240 */
241 xfs_agnumber_t
242 xfs_set_inode_alloc(
243 struct xfs_mount *mp,
244 xfs_agnumber_t agcount)
245 {
246 xfs_agnumber_t index;
247 xfs_agnumber_t maxagi = 0;
248 xfs_sb_t *sbp = &mp->m_sb;
249 xfs_agnumber_t max_metadata;
250 xfs_agino_t agino;
251 xfs_ino_t ino;
252
253 /*
254 * Calculate how much should be reserved for inodes to meet
255 * the max inode percentage. Used only for inode32.
256 */
257 if (M_IGEO(mp)->maxicount) {
258 uint64_t icount;
259
260 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
261 do_div(icount, 100);
262 icount += sbp->sb_agblocks - 1;
263 do_div(icount, sbp->sb_agblocks);
264 max_metadata = icount;
265 } else {
266 max_metadata = agcount;
267 }
268
269 /* Get the last possible inode in the filesystem */
270 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
271 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
272
273 /*
274 * If user asked for no more than 32-bit inodes, and the fs is
275 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
276 * the allocator to accommodate the request.
277 */
278 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
279 mp->m_flags |= XFS_MOUNT_32BITINODES;
280 else
281 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
282
283 for (index = 0; index < agcount; index++) {
284 struct xfs_perag *pag;
285
286 ino = XFS_AGINO_TO_INO(mp, index, agino);
287
288 pag = xfs_perag_get(mp, index);
289
290 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
291 if (ino > XFS_MAXINUMBER_32) {
292 pag->pagi_inodeok = 0;
293 pag->pagf_metadata = 0;
294 } else {
295 pag->pagi_inodeok = 1;
296 maxagi++;
297 if (index < max_metadata)
298 pag->pagf_metadata = 1;
299 else
300 pag->pagf_metadata = 0;
301 }
302 } else {
303 pag->pagi_inodeok = 1;
304 pag->pagf_metadata = 0;
305 }
306
307 xfs_perag_put(pag);
308 }
309
310 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
311 }
312
313 STATIC int
314 xfs_blkdev_get(
315 xfs_mount_t *mp,
316 const char *name,
317 struct block_device **bdevp)
318 {
319 int error = 0;
320
321 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
322 mp);
323 if (IS_ERR(*bdevp)) {
324 error = PTR_ERR(*bdevp);
325 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
326 }
327
328 return error;
329 }
330
331 STATIC void
332 xfs_blkdev_put(
333 struct block_device *bdev)
334 {
335 if (bdev)
336 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
337 }
338
339 void
340 xfs_blkdev_issue_flush(
341 xfs_buftarg_t *buftarg)
342 {
343 blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS);
344 }
345
346 STATIC void
347 xfs_close_devices(
348 struct xfs_mount *mp)
349 {
350 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
351
352 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
353 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
354 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
355
356 xfs_free_buftarg(mp->m_logdev_targp);
357 xfs_blkdev_put(logdev);
358 fs_put_dax(dax_logdev);
359 }
360 if (mp->m_rtdev_targp) {
361 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
362 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
363
364 xfs_free_buftarg(mp->m_rtdev_targp);
365 xfs_blkdev_put(rtdev);
366 fs_put_dax(dax_rtdev);
367 }
368 xfs_free_buftarg(mp->m_ddev_targp);
369 fs_put_dax(dax_ddev);
370 }
371
372 /*
373 * The file system configurations are:
374 * (1) device (partition) with data and internal log
375 * (2) logical volume with data and log subvolumes.
376 * (3) logical volume with data, log, and realtime subvolumes.
377 *
378 * We only have to handle opening the log and realtime volumes here if
379 * they are present. The data subvolume has already been opened by
380 * get_sb_bdev() and is stored in sb->s_bdev.
381 */
382 STATIC int
383 xfs_open_devices(
384 struct xfs_mount *mp)
385 {
386 struct block_device *ddev = mp->m_super->s_bdev;
387 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
388 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
389 struct block_device *logdev = NULL, *rtdev = NULL;
390 int error;
391
392 /*
393 * Open real time and log devices - order is important.
394 */
395 if (mp->m_logname) {
396 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
397 if (error)
398 goto out;
399 dax_logdev = fs_dax_get_by_bdev(logdev);
400 }
401
402 if (mp->m_rtname) {
403 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
404 if (error)
405 goto out_close_logdev;
406
407 if (rtdev == ddev || rtdev == logdev) {
408 xfs_warn(mp,
409 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
410 error = -EINVAL;
411 goto out_close_rtdev;
412 }
413 dax_rtdev = fs_dax_get_by_bdev(rtdev);
414 }
415
416 /*
417 * Setup xfs_mount buffer target pointers
418 */
419 error = -ENOMEM;
420 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
421 if (!mp->m_ddev_targp)
422 goto out_close_rtdev;
423
424 if (rtdev) {
425 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
426 if (!mp->m_rtdev_targp)
427 goto out_free_ddev_targ;
428 }
429
430 if (logdev && logdev != ddev) {
431 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
432 if (!mp->m_logdev_targp)
433 goto out_free_rtdev_targ;
434 } else {
435 mp->m_logdev_targp = mp->m_ddev_targp;
436 }
437
438 return 0;
439
440 out_free_rtdev_targ:
441 if (mp->m_rtdev_targp)
442 xfs_free_buftarg(mp->m_rtdev_targp);
443 out_free_ddev_targ:
444 xfs_free_buftarg(mp->m_ddev_targp);
445 out_close_rtdev:
446 xfs_blkdev_put(rtdev);
447 fs_put_dax(dax_rtdev);
448 out_close_logdev:
449 if (logdev && logdev != ddev) {
450 xfs_blkdev_put(logdev);
451 fs_put_dax(dax_logdev);
452 }
453 out:
454 fs_put_dax(dax_ddev);
455 return error;
456 }
457
458 /*
459 * Setup xfs_mount buffer target pointers based on superblock
460 */
461 STATIC int
462 xfs_setup_devices(
463 struct xfs_mount *mp)
464 {
465 int error;
466
467 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
468 if (error)
469 return error;
470
471 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
472 unsigned int log_sector_size = BBSIZE;
473
474 if (xfs_sb_version_hassector(&mp->m_sb))
475 log_sector_size = mp->m_sb.sb_logsectsize;
476 error = xfs_setsize_buftarg(mp->m_logdev_targp,
477 log_sector_size);
478 if (error)
479 return error;
480 }
481 if (mp->m_rtdev_targp) {
482 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
483 mp->m_sb.sb_sectsize);
484 if (error)
485 return error;
486 }
487
488 return 0;
489 }
490
491 STATIC int
492 xfs_init_mount_workqueues(
493 struct xfs_mount *mp)
494 {
495 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
496 WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id);
497 if (!mp->m_buf_workqueue)
498 goto out;
499
500 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
501 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
502 if (!mp->m_unwritten_workqueue)
503 goto out_destroy_buf;
504
505 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
506 WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
507 0, mp->m_super->s_id);
508 if (!mp->m_cil_workqueue)
509 goto out_destroy_unwritten;
510
511 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
512 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
513 if (!mp->m_reclaim_workqueue)
514 goto out_destroy_cil;
515
516 mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
517 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
518 if (!mp->m_eofblocks_workqueue)
519 goto out_destroy_reclaim;
520
521 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
522 mp->m_super->s_id);
523 if (!mp->m_sync_workqueue)
524 goto out_destroy_eofb;
525
526 return 0;
527
528 out_destroy_eofb:
529 destroy_workqueue(mp->m_eofblocks_workqueue);
530 out_destroy_reclaim:
531 destroy_workqueue(mp->m_reclaim_workqueue);
532 out_destroy_cil:
533 destroy_workqueue(mp->m_cil_workqueue);
534 out_destroy_unwritten:
535 destroy_workqueue(mp->m_unwritten_workqueue);
536 out_destroy_buf:
537 destroy_workqueue(mp->m_buf_workqueue);
538 out:
539 return -ENOMEM;
540 }
541
542 STATIC void
543 xfs_destroy_mount_workqueues(
544 struct xfs_mount *mp)
545 {
546 destroy_workqueue(mp->m_sync_workqueue);
547 destroy_workqueue(mp->m_eofblocks_workqueue);
548 destroy_workqueue(mp->m_reclaim_workqueue);
549 destroy_workqueue(mp->m_cil_workqueue);
550 destroy_workqueue(mp->m_unwritten_workqueue);
551 destroy_workqueue(mp->m_buf_workqueue);
552 }
553
554 static void
555 xfs_flush_inodes_worker(
556 struct work_struct *work)
557 {
558 struct xfs_mount *mp = container_of(work, struct xfs_mount,
559 m_flush_inodes_work);
560 struct super_block *sb = mp->m_super;
561
562 if (down_read_trylock(&sb->s_umount)) {
563 sync_inodes_sb(sb);
564 up_read(&sb->s_umount);
565 }
566 }
567
568 /*
569 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
570 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
571 * for IO to complete so that we effectively throttle multiple callers to the
572 * rate at which IO is completing.
573 */
574 void
575 xfs_flush_inodes(
576 struct xfs_mount *mp)
577 {
578 /*
579 * If flush_work() returns true then that means we waited for a flush
580 * which was already in progress. Don't bother running another scan.
581 */
582 if (flush_work(&mp->m_flush_inodes_work))
583 return;
584
585 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
586 flush_work(&mp->m_flush_inodes_work);
587 }
588
589 /* Catch misguided souls that try to use this interface on XFS */
590 STATIC struct inode *
591 xfs_fs_alloc_inode(
592 struct super_block *sb)
593 {
594 BUG();
595 return NULL;
596 }
597
598 #ifdef DEBUG
599 static void
600 xfs_check_delalloc(
601 struct xfs_inode *ip,
602 int whichfork)
603 {
604 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
605 struct xfs_bmbt_irec got;
606 struct xfs_iext_cursor icur;
607
608 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
609 return;
610 do {
611 if (isnullstartblock(got.br_startblock)) {
612 xfs_warn(ip->i_mount,
613 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
614 ip->i_ino,
615 whichfork == XFS_DATA_FORK ? "data" : "cow",
616 got.br_startoff, got.br_blockcount);
617 }
618 } while (xfs_iext_next_extent(ifp, &icur, &got));
619 }
620 #else
621 #define xfs_check_delalloc(ip, whichfork) do { } while (0)
622 #endif
623
624 /*
625 * Now that the generic code is guaranteed not to be accessing
626 * the linux inode, we can inactivate and reclaim the inode.
627 */
628 STATIC void
629 xfs_fs_destroy_inode(
630 struct inode *inode)
631 {
632 struct xfs_inode *ip = XFS_I(inode);
633
634 trace_xfs_destroy_inode(ip);
635
636 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
637 XFS_STATS_INC(ip->i_mount, vn_rele);
638 XFS_STATS_INC(ip->i_mount, vn_remove);
639
640 xfs_inactive(ip);
641
642 if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
643 xfs_check_delalloc(ip, XFS_DATA_FORK);
644 xfs_check_delalloc(ip, XFS_COW_FORK);
645 ASSERT(0);
646 }
647
648 XFS_STATS_INC(ip->i_mount, vn_reclaim);
649
650 /*
651 * We should never get here with one of the reclaim flags already set.
652 */
653 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
654 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
655
656 /*
657 * We always use background reclaim here because even if the
658 * inode is clean, it still may be under IO and hence we have
659 * to take the flush lock. The background reclaim path handles
660 * this more efficiently than we can here, so simply let background
661 * reclaim tear down all inodes.
662 */
663 xfs_inode_set_reclaim_tag(ip);
664 }
665
666 static void
667 xfs_fs_dirty_inode(
668 struct inode *inode,
669 int flag)
670 {
671 struct xfs_inode *ip = XFS_I(inode);
672 struct xfs_mount *mp = ip->i_mount;
673 struct xfs_trans *tp;
674
675 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
676 return;
677 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
678 return;
679
680 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
681 return;
682 xfs_ilock(ip, XFS_ILOCK_EXCL);
683 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
684 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
685 xfs_trans_commit(tp);
686 }
687
688 /*
689 * Slab object creation initialisation for the XFS inode.
690 * This covers only the idempotent fields in the XFS inode;
691 * all other fields need to be initialised on allocation
692 * from the slab. This avoids the need to repeatedly initialise
693 * fields in the xfs inode that left in the initialise state
694 * when freeing the inode.
695 */
696 STATIC void
697 xfs_fs_inode_init_once(
698 void *inode)
699 {
700 struct xfs_inode *ip = inode;
701
702 memset(ip, 0, sizeof(struct xfs_inode));
703
704 /* vfs inode */
705 inode_init_once(VFS_I(ip));
706
707 /* xfs inode */
708 atomic_set(&ip->i_pincount, 0);
709 spin_lock_init(&ip->i_flags_lock);
710
711 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
712 "xfsino", ip->i_ino);
713 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
714 "xfsino", ip->i_ino);
715 }
716
717 /*
718 * We do an unlocked check for XFS_IDONTCACHE here because we are already
719 * serialised against cache hits here via the inode->i_lock and igrab() in
720 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
721 * racing with us, and it avoids needing to grab a spinlock here for every inode
722 * we drop the final reference on.
723 */
724 STATIC int
725 xfs_fs_drop_inode(
726 struct inode *inode)
727 {
728 struct xfs_inode *ip = XFS_I(inode);
729
730 /*
731 * If this unlinked inode is in the middle of recovery, don't
732 * drop the inode just yet; log recovery will take care of
733 * that. See the comment for this inode flag.
734 */
735 if (ip->i_flags & XFS_IRECOVERY) {
736 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
737 return 0;
738 }
739
740 return generic_drop_inode(inode);
741 }
742
743 static void
744 xfs_mount_free(
745 struct xfs_mount *mp)
746 {
747 kfree(mp->m_rtname);
748 kfree(mp->m_logname);
749 kmem_free(mp);
750 }
751
752 STATIC int
753 xfs_fs_sync_fs(
754 struct super_block *sb,
755 int wait)
756 {
757 struct xfs_mount *mp = XFS_M(sb);
758
759 /*
760 * Doing anything during the async pass would be counterproductive.
761 */
762 if (!wait)
763 return 0;
764
765 xfs_log_force(mp, XFS_LOG_SYNC);
766 if (laptop_mode) {
767 /*
768 * The disk must be active because we're syncing.
769 * We schedule log work now (now that the disk is
770 * active) instead of later (when it might not be).
771 */
772 flush_delayed_work(&mp->m_log->l_work);
773 }
774
775 return 0;
776 }
777
778 STATIC int
779 xfs_fs_statfs(
780 struct dentry *dentry,
781 struct kstatfs *statp)
782 {
783 struct xfs_mount *mp = XFS_M(dentry->d_sb);
784 xfs_sb_t *sbp = &mp->m_sb;
785 struct xfs_inode *ip = XFS_I(d_inode(dentry));
786 uint64_t fakeinos, id;
787 uint64_t icount;
788 uint64_t ifree;
789 uint64_t fdblocks;
790 xfs_extlen_t lsize;
791 int64_t ffree;
792
793 statp->f_type = XFS_SUPER_MAGIC;
794 statp->f_namelen = MAXNAMELEN - 1;
795
796 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
797 statp->f_fsid.val[0] = (u32)id;
798 statp->f_fsid.val[1] = (u32)(id >> 32);
799
800 icount = percpu_counter_sum(&mp->m_icount);
801 ifree = percpu_counter_sum(&mp->m_ifree);
802 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
803
804 spin_lock(&mp->m_sb_lock);
805 statp->f_bsize = sbp->sb_blocksize;
806 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
807 statp->f_blocks = sbp->sb_dblocks - lsize;
808 spin_unlock(&mp->m_sb_lock);
809
810 /* make sure statp->f_bfree does not underflow */
811 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
812 statp->f_bavail = statp->f_bfree;
813
814 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
815 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
816 if (M_IGEO(mp)->maxicount)
817 statp->f_files = min_t(typeof(statp->f_files),
818 statp->f_files,
819 M_IGEO(mp)->maxicount);
820
821 /* If sb_icount overshot maxicount, report actual allocation */
822 statp->f_files = max_t(typeof(statp->f_files),
823 statp->f_files,
824 sbp->sb_icount);
825
826 /* make sure statp->f_ffree does not underflow */
827 ffree = statp->f_files - (icount - ifree);
828 statp->f_ffree = max_t(int64_t, ffree, 0);
829
830
831 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
832 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
833 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
834 xfs_qm_statvfs(ip, statp);
835
836 if (XFS_IS_REALTIME_MOUNT(mp) &&
837 (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
838 statp->f_blocks = sbp->sb_rblocks;
839 statp->f_bavail = statp->f_bfree =
840 sbp->sb_frextents * sbp->sb_rextsize;
841 }
842
843 return 0;
844 }
845
846 STATIC void
847 xfs_save_resvblks(struct xfs_mount *mp)
848 {
849 uint64_t resblks = 0;
850
851 mp->m_resblks_save = mp->m_resblks;
852 xfs_reserve_blocks(mp, &resblks, NULL);
853 }
854
855 STATIC void
856 xfs_restore_resvblks(struct xfs_mount *mp)
857 {
858 uint64_t resblks;
859
860 if (mp->m_resblks_save) {
861 resblks = mp->m_resblks_save;
862 mp->m_resblks_save = 0;
863 } else
864 resblks = xfs_default_resblks(mp);
865
866 xfs_reserve_blocks(mp, &resblks, NULL);
867 }
868
869 /*
870 * Trigger writeback of all the dirty metadata in the file system.
871 *
872 * This ensures that the metadata is written to their location on disk rather
873 * than just existing in transactions in the log. This means after a quiesce
874 * there is no log replay required to write the inodes to disk - this is the
875 * primary difference between a sync and a quiesce.
876 *
877 * We cancel log work early here to ensure all transactions the log worker may
878 * run have finished before we clean up and log the superblock and write an
879 * unmount record. The unfreeze process is responsible for restarting the log
880 * worker correctly.
881 */
882 void
883 xfs_quiesce_attr(
884 struct xfs_mount *mp)
885 {
886 int error = 0;
887
888 cancel_delayed_work_sync(&mp->m_log->l_work);
889
890 /* force the log to unpin objects from the now complete transactions */
891 xfs_log_force(mp, XFS_LOG_SYNC);
892
893
894 /* Push the superblock and write an unmount record */
895 error = xfs_log_sbcount(mp);
896 if (error)
897 xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
898 "Frozen image may not be consistent.");
899 xfs_log_quiesce(mp);
900 }
901
902 /*
903 * Second stage of a freeze. The data is already frozen so we only
904 * need to take care of the metadata. Once that's done sync the superblock
905 * to the log to dirty it in case of a crash while frozen. This ensures that we
906 * will recover the unlinked inode lists on the next mount.
907 */
908 STATIC int
909 xfs_fs_freeze(
910 struct super_block *sb)
911 {
912 struct xfs_mount *mp = XFS_M(sb);
913 unsigned int flags;
914 int ret;
915
916 /*
917 * The filesystem is now frozen far enough that memory reclaim
918 * cannot safely operate on the filesystem. Hence we need to
919 * set a GFP_NOFS context here to avoid recursion deadlocks.
920 */
921 flags = memalloc_nofs_save();
922 xfs_stop_block_reaping(mp);
923 xfs_save_resvblks(mp);
924 xfs_quiesce_attr(mp);
925 ret = xfs_sync_sb(mp, true);
926 memalloc_nofs_restore(flags);
927 return ret;
928 }
929
930 STATIC int
931 xfs_fs_unfreeze(
932 struct super_block *sb)
933 {
934 struct xfs_mount *mp = XFS_M(sb);
935
936 xfs_restore_resvblks(mp);
937 xfs_log_work_queue(mp);
938 xfs_start_block_reaping(mp);
939 return 0;
940 }
941
942 /*
943 * This function fills in xfs_mount_t fields based on mount args.
944 * Note: the superblock _has_ now been read in.
945 */
946 STATIC int
947 xfs_finish_flags(
948 struct xfs_mount *mp)
949 {
950 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
951
952 /* Fail a mount where the logbuf is smaller than the log stripe */
953 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
954 if (mp->m_logbsize <= 0 &&
955 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
956 mp->m_logbsize = mp->m_sb.sb_logsunit;
957 } else if (mp->m_logbsize > 0 &&
958 mp->m_logbsize < mp->m_sb.sb_logsunit) {
959 xfs_warn(mp,
960 "logbuf size must be greater than or equal to log stripe size");
961 return -EINVAL;
962 }
963 } else {
964 /* Fail a mount if the logbuf is larger than 32K */
965 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
966 xfs_warn(mp,
967 "logbuf size for version 1 logs must be 16K or 32K");
968 return -EINVAL;
969 }
970 }
971
972 /*
973 * V5 filesystems always use attr2 format for attributes.
974 */
975 if (xfs_sb_version_hascrc(&mp->m_sb) &&
976 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
977 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
978 "attr2 is always enabled for V5 filesystems.");
979 return -EINVAL;
980 }
981
982 /*
983 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
984 * told by noattr2 to turn it off
985 */
986 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
987 !(mp->m_flags & XFS_MOUNT_NOATTR2))
988 mp->m_flags |= XFS_MOUNT_ATTR2;
989
990 /*
991 * prohibit r/w mounts of read-only filesystems
992 */
993 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
994 xfs_warn(mp,
995 "cannot mount a read-only filesystem as read-write");
996 return -EROFS;
997 }
998
999 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
1000 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
1001 !xfs_sb_version_has_pquotino(&mp->m_sb)) {
1002 xfs_warn(mp,
1003 "Super block does not support project and group quota together");
1004 return -EINVAL;
1005 }
1006
1007 return 0;
1008 }
1009
1010 static int
1011 xfs_init_percpu_counters(
1012 struct xfs_mount *mp)
1013 {
1014 int error;
1015
1016 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1017 if (error)
1018 return -ENOMEM;
1019
1020 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1021 if (error)
1022 goto free_icount;
1023
1024 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1025 if (error)
1026 goto free_ifree;
1027
1028 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1029 if (error)
1030 goto free_fdblocks;
1031
1032 return 0;
1033
1034 free_fdblocks:
1035 percpu_counter_destroy(&mp->m_fdblocks);
1036 free_ifree:
1037 percpu_counter_destroy(&mp->m_ifree);
1038 free_icount:
1039 percpu_counter_destroy(&mp->m_icount);
1040 return -ENOMEM;
1041 }
1042
1043 void
1044 xfs_reinit_percpu_counters(
1045 struct xfs_mount *mp)
1046 {
1047 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1048 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1049 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1050 }
1051
1052 static void
1053 xfs_destroy_percpu_counters(
1054 struct xfs_mount *mp)
1055 {
1056 percpu_counter_destroy(&mp->m_icount);
1057 percpu_counter_destroy(&mp->m_ifree);
1058 percpu_counter_destroy(&mp->m_fdblocks);
1059 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1060 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1061 percpu_counter_destroy(&mp->m_delalloc_blks);
1062 }
1063
1064 static void
1065 xfs_fs_put_super(
1066 struct super_block *sb)
1067 {
1068 struct xfs_mount *mp = XFS_M(sb);
1069
1070 /* if ->fill_super failed, we have no mount to tear down */
1071 if (!sb->s_fs_info)
1072 return;
1073
1074 xfs_notice(mp, "Unmounting Filesystem");
1075 xfs_filestream_unmount(mp);
1076 xfs_unmountfs(mp);
1077
1078 xfs_freesb(mp);
1079 free_percpu(mp->m_stats.xs_stats);
1080 xfs_destroy_percpu_counters(mp);
1081 xfs_destroy_mount_workqueues(mp);
1082 xfs_close_devices(mp);
1083
1084 sb->s_fs_info = NULL;
1085 xfs_mount_free(mp);
1086 }
1087
1088 static long
1089 xfs_fs_nr_cached_objects(
1090 struct super_block *sb,
1091 struct shrink_control *sc)
1092 {
1093 /* Paranoia: catch incorrect calls during mount setup or teardown */
1094 if (WARN_ON_ONCE(!sb->s_fs_info))
1095 return 0;
1096 return xfs_reclaim_inodes_count(XFS_M(sb));
1097 }
1098
1099 static long
1100 xfs_fs_free_cached_objects(
1101 struct super_block *sb,
1102 struct shrink_control *sc)
1103 {
1104 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1105 }
1106
1107 static const struct super_operations xfs_super_operations = {
1108 .alloc_inode = xfs_fs_alloc_inode,
1109 .destroy_inode = xfs_fs_destroy_inode,
1110 .dirty_inode = xfs_fs_dirty_inode,
1111 .drop_inode = xfs_fs_drop_inode,
1112 .put_super = xfs_fs_put_super,
1113 .sync_fs = xfs_fs_sync_fs,
1114 .freeze_fs = xfs_fs_freeze,
1115 .unfreeze_fs = xfs_fs_unfreeze,
1116 .statfs = xfs_fs_statfs,
1117 .show_options = xfs_fs_show_options,
1118 .nr_cached_objects = xfs_fs_nr_cached_objects,
1119 .free_cached_objects = xfs_fs_free_cached_objects,
1120 };
1121
1122 static int
1123 suffix_kstrtoint(
1124 const char *s,
1125 unsigned int base,
1126 int *res)
1127 {
1128 int last, shift_left_factor = 0, _res;
1129 char *value;
1130 int ret = 0;
1131
1132 value = kstrdup(s, GFP_KERNEL);
1133 if (!value)
1134 return -ENOMEM;
1135
1136 last = strlen(value) - 1;
1137 if (value[last] == 'K' || value[last] == 'k') {
1138 shift_left_factor = 10;
1139 value[last] = '\0';
1140 }
1141 if (value[last] == 'M' || value[last] == 'm') {
1142 shift_left_factor = 20;
1143 value[last] = '\0';
1144 }
1145 if (value[last] == 'G' || value[last] == 'g') {
1146 shift_left_factor = 30;
1147 value[last] = '\0';
1148 }
1149
1150 if (kstrtoint(value, base, &_res))
1151 ret = -EINVAL;
1152 kfree(value);
1153 *res = _res << shift_left_factor;
1154 return ret;
1155 }
1156
1157 /*
1158 * Set mount state from a mount option.
1159 *
1160 * NOTE: mp->m_super is NULL here!
1161 */
1162 static int
1163 xfs_fc_parse_param(
1164 struct fs_context *fc,
1165 struct fs_parameter *param)
1166 {
1167 struct xfs_mount *mp = fc->s_fs_info;
1168 struct fs_parse_result result;
1169 int size = 0;
1170 int opt;
1171
1172 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1173 if (opt < 0)
1174 return opt;
1175
1176 switch (opt) {
1177 case Opt_logbufs:
1178 mp->m_logbufs = result.uint_32;
1179 return 0;
1180 case Opt_logbsize:
1181 if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize))
1182 return -EINVAL;
1183 return 0;
1184 case Opt_logdev:
1185 kfree(mp->m_logname);
1186 mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1187 if (!mp->m_logname)
1188 return -ENOMEM;
1189 return 0;
1190 case Opt_rtdev:
1191 kfree(mp->m_rtname);
1192 mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1193 if (!mp->m_rtname)
1194 return -ENOMEM;
1195 return 0;
1196 case Opt_allocsize:
1197 if (suffix_kstrtoint(param->string, 10, &size))
1198 return -EINVAL;
1199 mp->m_allocsize_log = ffs(size) - 1;
1200 mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1201 return 0;
1202 case Opt_grpid:
1203 case Opt_bsdgroups:
1204 mp->m_flags |= XFS_MOUNT_GRPID;
1205 return 0;
1206 case Opt_nogrpid:
1207 case Opt_sysvgroups:
1208 mp->m_flags &= ~XFS_MOUNT_GRPID;
1209 return 0;
1210 case Opt_wsync:
1211 mp->m_flags |= XFS_MOUNT_WSYNC;
1212 return 0;
1213 case Opt_norecovery:
1214 mp->m_flags |= XFS_MOUNT_NORECOVERY;
1215 return 0;
1216 case Opt_noalign:
1217 mp->m_flags |= XFS_MOUNT_NOALIGN;
1218 return 0;
1219 case Opt_swalloc:
1220 mp->m_flags |= XFS_MOUNT_SWALLOC;
1221 return 0;
1222 case Opt_sunit:
1223 mp->m_dalign = result.uint_32;
1224 return 0;
1225 case Opt_swidth:
1226 mp->m_swidth = result.uint_32;
1227 return 0;
1228 case Opt_inode32:
1229 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1230 return 0;
1231 case Opt_inode64:
1232 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1233 return 0;
1234 case Opt_nouuid:
1235 mp->m_flags |= XFS_MOUNT_NOUUID;
1236 return 0;
1237 case Opt_ikeep:
1238 mp->m_flags |= XFS_MOUNT_IKEEP;
1239 return 0;
1240 case Opt_noikeep:
1241 mp->m_flags &= ~XFS_MOUNT_IKEEP;
1242 return 0;
1243 case Opt_largeio:
1244 mp->m_flags |= XFS_MOUNT_LARGEIO;
1245 return 0;
1246 case Opt_nolargeio:
1247 mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1248 return 0;
1249 case Opt_attr2:
1250 mp->m_flags |= XFS_MOUNT_ATTR2;
1251 return 0;
1252 case Opt_noattr2:
1253 mp->m_flags &= ~XFS_MOUNT_ATTR2;
1254 mp->m_flags |= XFS_MOUNT_NOATTR2;
1255 return 0;
1256 case Opt_filestreams:
1257 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1258 return 0;
1259 case Opt_noquota:
1260 mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1261 mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1262 mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
1263 return 0;
1264 case Opt_quota:
1265 case Opt_uquota:
1266 case Opt_usrquota:
1267 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
1268 XFS_UQUOTA_ENFD);
1269 return 0;
1270 case Opt_qnoenforce:
1271 case Opt_uqnoenforce:
1272 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
1273 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1274 return 0;
1275 case Opt_pquota:
1276 case Opt_prjquota:
1277 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
1278 XFS_PQUOTA_ENFD);
1279 return 0;
1280 case Opt_pqnoenforce:
1281 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
1282 mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1283 return 0;
1284 case Opt_gquota:
1285 case Opt_grpquota:
1286 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
1287 XFS_GQUOTA_ENFD);
1288 return 0;
1289 case Opt_gqnoenforce:
1290 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
1291 mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1292 return 0;
1293 case Opt_discard:
1294 mp->m_flags |= XFS_MOUNT_DISCARD;
1295 return 0;
1296 case Opt_nodiscard:
1297 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1298 return 0;
1299 #ifdef CONFIG_FS_DAX
1300 case Opt_dax:
1301 xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS);
1302 return 0;
1303 case Opt_dax_enum:
1304 xfs_mount_set_dax_mode(mp, result.uint_32);
1305 return 0;
1306 #endif
1307 default:
1308 xfs_warn(mp, "unknown mount option [%s].", param->key);
1309 return -EINVAL;
1310 }
1311
1312 return 0;
1313 }
1314
1315 static int
1316 xfs_fc_validate_params(
1317 struct xfs_mount *mp)
1318 {
1319 /*
1320 * no recovery flag requires a read-only mount
1321 */
1322 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1323 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1324 xfs_warn(mp, "no-recovery mounts must be read-only.");
1325 return -EINVAL;
1326 }
1327
1328 if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1329 (mp->m_dalign || mp->m_swidth)) {
1330 xfs_warn(mp,
1331 "sunit and swidth options incompatible with the noalign option");
1332 return -EINVAL;
1333 }
1334
1335 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1336 xfs_warn(mp, "quota support not available in this kernel.");
1337 return -EINVAL;
1338 }
1339
1340 if ((mp->m_dalign && !mp->m_swidth) ||
1341 (!mp->m_dalign && mp->m_swidth)) {
1342 xfs_warn(mp, "sunit and swidth must be specified together");
1343 return -EINVAL;
1344 }
1345
1346 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1347 xfs_warn(mp,
1348 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1349 mp->m_swidth, mp->m_dalign);
1350 return -EINVAL;
1351 }
1352
1353 if (mp->m_logbufs != -1 &&
1354 mp->m_logbufs != 0 &&
1355 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1356 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1357 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1358 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1359 return -EINVAL;
1360 }
1361
1362 if (mp->m_logbsize != -1 &&
1363 mp->m_logbsize != 0 &&
1364 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1365 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1366 !is_power_of_2(mp->m_logbsize))) {
1367 xfs_warn(mp,
1368 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1369 mp->m_logbsize);
1370 return -EINVAL;
1371 }
1372
1373 if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1374 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1375 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1376 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1377 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1378 return -EINVAL;
1379 }
1380
1381 return 0;
1382 }
1383
1384 static int
1385 xfs_fc_fill_super(
1386 struct super_block *sb,
1387 struct fs_context *fc)
1388 {
1389 struct xfs_mount *mp = sb->s_fs_info;
1390 struct inode *root;
1391 int flags = 0, error;
1392
1393 mp->m_super = sb;
1394
1395 error = xfs_fc_validate_params(mp);
1396 if (error)
1397 goto out_free_names;
1398
1399 sb_min_blocksize(sb, BBSIZE);
1400 sb->s_xattr = xfs_xattr_handlers;
1401 sb->s_export_op = &xfs_export_operations;
1402 #ifdef CONFIG_XFS_QUOTA
1403 sb->s_qcop = &xfs_quotactl_operations;
1404 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1405 #endif
1406 sb->s_op = &xfs_super_operations;
1407
1408 /*
1409 * Delay mount work if the debug hook is set. This is debug
1410 * instrumention to coordinate simulation of xfs mount failures with
1411 * VFS superblock operations
1412 */
1413 if (xfs_globals.mount_delay) {
1414 xfs_notice(mp, "Delaying mount for %d seconds.",
1415 xfs_globals.mount_delay);
1416 msleep(xfs_globals.mount_delay * 1000);
1417 }
1418
1419 if (fc->sb_flags & SB_SILENT)
1420 flags |= XFS_MFSI_QUIET;
1421
1422 error = xfs_open_devices(mp);
1423 if (error)
1424 goto out_free_names;
1425
1426 error = xfs_init_mount_workqueues(mp);
1427 if (error)
1428 goto out_close_devices;
1429
1430 error = xfs_init_percpu_counters(mp);
1431 if (error)
1432 goto out_destroy_workqueues;
1433
1434 /* Allocate stats memory before we do operations that might use it */
1435 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1436 if (!mp->m_stats.xs_stats) {
1437 error = -ENOMEM;
1438 goto out_destroy_counters;
1439 }
1440
1441 error = xfs_readsb(mp, flags);
1442 if (error)
1443 goto out_free_stats;
1444
1445 error = xfs_finish_flags(mp);
1446 if (error)
1447 goto out_free_sb;
1448
1449 error = xfs_setup_devices(mp);
1450 if (error)
1451 goto out_free_sb;
1452
1453 /*
1454 * XFS block mappings use 54 bits to store the logical block offset.
1455 * This should suffice to handle the maximum file size that the VFS
1456 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1457 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1458 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1459 * to check this assertion.
1460 *
1461 * Avoid integer overflow by comparing the maximum bmbt offset to the
1462 * maximum pagecache offset in units of fs blocks.
1463 */
1464 if (XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE) > XFS_MAX_FILEOFF) {
1465 xfs_warn(mp,
1466 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1467 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1468 XFS_MAX_FILEOFF);
1469 error = -EINVAL;
1470 goto out_free_sb;
1471 }
1472
1473 error = xfs_filestream_mount(mp);
1474 if (error)
1475 goto out_free_sb;
1476
1477 /*
1478 * we must configure the block size in the superblock before we run the
1479 * full mount process as the mount process can lookup and cache inodes.
1480 */
1481 sb->s_magic = XFS_SUPER_MAGIC;
1482 sb->s_blocksize = mp->m_sb.sb_blocksize;
1483 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1484 sb->s_maxbytes = MAX_LFS_FILESIZE;
1485 sb->s_max_links = XFS_MAXLINK;
1486 sb->s_time_gran = 1;
1487 sb->s_time_min = S32_MIN;
1488 sb->s_time_max = S32_MAX;
1489 sb->s_iflags |= SB_I_CGROUPWB;
1490
1491 set_posix_acl_flag(sb);
1492
1493 /* version 5 superblocks support inode version counters. */
1494 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1495 sb->s_flags |= SB_I_VERSION;
1496
1497 if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) {
1498 bool rtdev_is_dax = false, datadev_is_dax;
1499
1500 xfs_warn(mp,
1501 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1502
1503 datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1504 sb->s_blocksize);
1505 if (mp->m_rtdev_targp)
1506 rtdev_is_dax = bdev_dax_supported(
1507 mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1508 if (!rtdev_is_dax && !datadev_is_dax) {
1509 xfs_alert(mp,
1510 "DAX unsupported by block device. Turning off DAX.");
1511 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
1512 }
1513 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1514 xfs_alert(mp,
1515 "DAX and reflink cannot be used together!");
1516 error = -EINVAL;
1517 goto out_filestream_unmount;
1518 }
1519 }
1520
1521 if (mp->m_flags & XFS_MOUNT_DISCARD) {
1522 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1523
1524 if (!blk_queue_discard(q)) {
1525 xfs_warn(mp, "mounting with \"discard\" option, but "
1526 "the device does not support discard");
1527 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1528 }
1529 }
1530
1531 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1532 if (mp->m_sb.sb_rblocks) {
1533 xfs_alert(mp,
1534 "reflink not compatible with realtime device!");
1535 error = -EINVAL;
1536 goto out_filestream_unmount;
1537 }
1538
1539 if (xfs_globals.always_cow) {
1540 xfs_info(mp, "using DEBUG-only always_cow mode.");
1541 mp->m_always_cow = true;
1542 }
1543 }
1544
1545 if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1546 xfs_alert(mp,
1547 "reverse mapping btree not compatible with realtime device!");
1548 error = -EINVAL;
1549 goto out_filestream_unmount;
1550 }
1551
1552 error = xfs_mountfs(mp);
1553 if (error)
1554 goto out_filestream_unmount;
1555
1556 root = igrab(VFS_I(mp->m_rootip));
1557 if (!root) {
1558 error = -ENOENT;
1559 goto out_unmount;
1560 }
1561 sb->s_root = d_make_root(root);
1562 if (!sb->s_root) {
1563 error = -ENOMEM;
1564 goto out_unmount;
1565 }
1566
1567 return 0;
1568
1569 out_filestream_unmount:
1570 xfs_filestream_unmount(mp);
1571 out_free_sb:
1572 xfs_freesb(mp);
1573 out_free_stats:
1574 free_percpu(mp->m_stats.xs_stats);
1575 out_destroy_counters:
1576 xfs_destroy_percpu_counters(mp);
1577 out_destroy_workqueues:
1578 xfs_destroy_mount_workqueues(mp);
1579 out_close_devices:
1580 xfs_close_devices(mp);
1581 out_free_names:
1582 sb->s_fs_info = NULL;
1583 xfs_mount_free(mp);
1584 return error;
1585
1586 out_unmount:
1587 xfs_filestream_unmount(mp);
1588 xfs_unmountfs(mp);
1589 goto out_free_sb;
1590 }
1591
1592 static int
1593 xfs_fc_get_tree(
1594 struct fs_context *fc)
1595 {
1596 return get_tree_bdev(fc, xfs_fc_fill_super);
1597 }
1598
1599 static int
1600 xfs_remount_rw(
1601 struct xfs_mount *mp)
1602 {
1603 struct xfs_sb *sbp = &mp->m_sb;
1604 int error;
1605
1606 if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1607 xfs_warn(mp,
1608 "ro->rw transition prohibited on norecovery mount");
1609 return -EINVAL;
1610 }
1611
1612 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1613 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1614 xfs_warn(mp,
1615 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1616 (sbp->sb_features_ro_compat &
1617 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1618 return -EINVAL;
1619 }
1620
1621 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1622
1623 /*
1624 * If this is the first remount to writeable state we might have some
1625 * superblock changes to update.
1626 */
1627 if (mp->m_update_sb) {
1628 error = xfs_sync_sb(mp, false);
1629 if (error) {
1630 xfs_warn(mp, "failed to write sb changes");
1631 return error;
1632 }
1633 mp->m_update_sb = false;
1634 }
1635
1636 /*
1637 * Fill out the reserve pool if it is empty. Use the stashed value if
1638 * it is non-zero, otherwise go with the default.
1639 */
1640 xfs_restore_resvblks(mp);
1641 xfs_log_work_queue(mp);
1642
1643 /* Recover any CoW blocks that never got remapped. */
1644 error = xfs_reflink_recover_cow(mp);
1645 if (error) {
1646 xfs_err(mp,
1647 "Error %d recovering leftover CoW allocations.", error);
1648 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1649 return error;
1650 }
1651 xfs_start_block_reaping(mp);
1652
1653 /* Create the per-AG metadata reservation pool .*/
1654 error = xfs_fs_reserve_ag_blocks(mp);
1655 if (error && error != -ENOSPC)
1656 return error;
1657
1658 return 0;
1659 }
1660
1661 static int
1662 xfs_remount_ro(
1663 struct xfs_mount *mp)
1664 {
1665 int error;
1666
1667 /*
1668 * Cancel background eofb scanning so it cannot race with the final
1669 * log force+buftarg wait and deadlock the remount.
1670 */
1671 xfs_stop_block_reaping(mp);
1672
1673 /* Get rid of any leftover CoW reservations... */
1674 error = xfs_icache_free_cowblocks(mp, NULL);
1675 if (error) {
1676 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1677 return error;
1678 }
1679
1680 /* Free the per-AG metadata reservation pool. */
1681 error = xfs_fs_unreserve_ag_blocks(mp);
1682 if (error) {
1683 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1684 return error;
1685 }
1686
1687 /*
1688 * Before we sync the metadata, we need to free up the reserve block
1689 * pool so that the used block count in the superblock on disk is
1690 * correct at the end of the remount. Stash the current* reserve pool
1691 * size so that if we get remounted rw, we can return it to the same
1692 * size.
1693 */
1694 xfs_save_resvblks(mp);
1695
1696 xfs_quiesce_attr(mp);
1697 mp->m_flags |= XFS_MOUNT_RDONLY;
1698
1699 return 0;
1700 }
1701
1702 /*
1703 * Logically we would return an error here to prevent users from believing
1704 * they might have changed mount options using remount which can't be changed.
1705 *
1706 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1707 * arguments in some cases so we can't blindly reject options, but have to
1708 * check for each specified option if it actually differs from the currently
1709 * set option and only reject it if that's the case.
1710 *
1711 * Until that is implemented we return success for every remount request, and
1712 * silently ignore all options that we can't actually change.
1713 */
1714 static int
1715 xfs_fc_reconfigure(
1716 struct fs_context *fc)
1717 {
1718 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1719 struct xfs_mount *new_mp = fc->s_fs_info;
1720 xfs_sb_t *sbp = &mp->m_sb;
1721 int flags = fc->sb_flags;
1722 int error;
1723
1724 error = xfs_fc_validate_params(new_mp);
1725 if (error)
1726 return error;
1727
1728 sync_filesystem(mp->m_super);
1729
1730 /* inode32 -> inode64 */
1731 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1732 !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1733 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1734 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1735 }
1736
1737 /* inode64 -> inode32 */
1738 if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1739 (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1740 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1741 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1742 }
1743
1744 /* ro -> rw */
1745 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1746 error = xfs_remount_rw(mp);
1747 if (error)
1748 return error;
1749 }
1750
1751 /* rw -> ro */
1752 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1753 error = xfs_remount_ro(mp);
1754 if (error)
1755 return error;
1756 }
1757
1758 return 0;
1759 }
1760
1761 static void xfs_fc_free(
1762 struct fs_context *fc)
1763 {
1764 struct xfs_mount *mp = fc->s_fs_info;
1765
1766 /*
1767 * mp is stored in the fs_context when it is initialized.
1768 * mp is transferred to the superblock on a successful mount,
1769 * but if an error occurs before the transfer we have to free
1770 * it here.
1771 */
1772 if (mp)
1773 xfs_mount_free(mp);
1774 }
1775
1776 static const struct fs_context_operations xfs_context_ops = {
1777 .parse_param = xfs_fc_parse_param,
1778 .get_tree = xfs_fc_get_tree,
1779 .reconfigure = xfs_fc_reconfigure,
1780 .free = xfs_fc_free,
1781 };
1782
1783 static int xfs_init_fs_context(
1784 struct fs_context *fc)
1785 {
1786 struct xfs_mount *mp;
1787
1788 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1789 if (!mp)
1790 return -ENOMEM;
1791
1792 spin_lock_init(&mp->m_sb_lock);
1793 spin_lock_init(&mp->m_agirotor_lock);
1794 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1795 spin_lock_init(&mp->m_perag_lock);
1796 mutex_init(&mp->m_growlock);
1797 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1798 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1799 INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1800 INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1801 mp->m_kobj.kobject.kset = xfs_kset;
1802 /*
1803 * We don't create the finobt per-ag space reservation until after log
1804 * recovery, so we must set this to true so that an ifree transaction
1805 * started during log recovery will not depend on space reservations
1806 * for finobt expansion.
1807 */
1808 mp->m_finobt_nores = true;
1809
1810 /*
1811 * These can be overridden by the mount option parsing.
1812 */
1813 mp->m_logbufs = -1;
1814 mp->m_logbsize = -1;
1815 mp->m_allocsize_log = 16; /* 64k */
1816
1817 /*
1818 * Copy binary VFS mount flags we are interested in.
1819 */
1820 if (fc->sb_flags & SB_RDONLY)
1821 mp->m_flags |= XFS_MOUNT_RDONLY;
1822 if (fc->sb_flags & SB_DIRSYNC)
1823 mp->m_flags |= XFS_MOUNT_DIRSYNC;
1824 if (fc->sb_flags & SB_SYNCHRONOUS)
1825 mp->m_flags |= XFS_MOUNT_WSYNC;
1826
1827 fc->s_fs_info = mp;
1828 fc->ops = &xfs_context_ops;
1829
1830 return 0;
1831 }
1832
1833 static struct file_system_type xfs_fs_type = {
1834 .owner = THIS_MODULE,
1835 .name = "xfs",
1836 .init_fs_context = xfs_init_fs_context,
1837 .parameters = xfs_fs_parameters,
1838 .kill_sb = kill_block_super,
1839 .fs_flags = FS_REQUIRES_DEV,
1840 };
1841 MODULE_ALIAS_FS("xfs");
1842
1843 STATIC int __init
1844 xfs_init_zones(void)
1845 {
1846 xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1847 sizeof(struct xlog_ticket),
1848 0, 0, NULL);
1849 if (!xfs_log_ticket_zone)
1850 goto out;
1851
1852 xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1853 sizeof(struct xfs_extent_free_item),
1854 0, 0, NULL);
1855 if (!xfs_bmap_free_item_zone)
1856 goto out_destroy_log_ticket_zone;
1857
1858 xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1859 sizeof(struct xfs_btree_cur),
1860 0, 0, NULL);
1861 if (!xfs_btree_cur_zone)
1862 goto out_destroy_bmap_free_item_zone;
1863
1864 xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1865 sizeof(struct xfs_da_state),
1866 0, 0, NULL);
1867 if (!xfs_da_state_zone)
1868 goto out_destroy_btree_cur_zone;
1869
1870 xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1871 sizeof(struct xfs_ifork),
1872 0, 0, NULL);
1873 if (!xfs_ifork_zone)
1874 goto out_destroy_da_state_zone;
1875
1876 xfs_trans_zone = kmem_cache_create("xf_trans",
1877 sizeof(struct xfs_trans),
1878 0, 0, NULL);
1879 if (!xfs_trans_zone)
1880 goto out_destroy_ifork_zone;
1881
1882
1883 /*
1884 * The size of the zone allocated buf log item is the maximum
1885 * size possible under XFS. This wastes a little bit of memory,
1886 * but it is much faster.
1887 */
1888 xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
1889 sizeof(struct xfs_buf_log_item),
1890 0, 0, NULL);
1891 if (!xfs_buf_item_zone)
1892 goto out_destroy_trans_zone;
1893
1894 xfs_efd_zone = kmem_cache_create("xfs_efd_item",
1895 (sizeof(struct xfs_efd_log_item) +
1896 (XFS_EFD_MAX_FAST_EXTENTS - 1) *
1897 sizeof(struct xfs_extent)),
1898 0, 0, NULL);
1899 if (!xfs_efd_zone)
1900 goto out_destroy_buf_item_zone;
1901
1902 xfs_efi_zone = kmem_cache_create("xfs_efi_item",
1903 (sizeof(struct xfs_efi_log_item) +
1904 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
1905 sizeof(struct xfs_extent)),
1906 0, 0, NULL);
1907 if (!xfs_efi_zone)
1908 goto out_destroy_efd_zone;
1909
1910 xfs_inode_zone = kmem_cache_create("xfs_inode",
1911 sizeof(struct xfs_inode), 0,
1912 (SLAB_HWCACHE_ALIGN |
1913 SLAB_RECLAIM_ACCOUNT |
1914 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1915 xfs_fs_inode_init_once);
1916 if (!xfs_inode_zone)
1917 goto out_destroy_efi_zone;
1918
1919 xfs_ili_zone = kmem_cache_create("xfs_ili",
1920 sizeof(struct xfs_inode_log_item), 0,
1921 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1922 NULL);
1923 if (!xfs_ili_zone)
1924 goto out_destroy_inode_zone;
1925
1926 xfs_icreate_zone = kmem_cache_create("xfs_icr",
1927 sizeof(struct xfs_icreate_item),
1928 0, 0, NULL);
1929 if (!xfs_icreate_zone)
1930 goto out_destroy_ili_zone;
1931
1932 xfs_rud_zone = kmem_cache_create("xfs_rud_item",
1933 sizeof(struct xfs_rud_log_item),
1934 0, 0, NULL);
1935 if (!xfs_rud_zone)
1936 goto out_destroy_icreate_zone;
1937
1938 xfs_rui_zone = kmem_cache_create("xfs_rui_item",
1939 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1940 0, 0, NULL);
1941 if (!xfs_rui_zone)
1942 goto out_destroy_rud_zone;
1943
1944 xfs_cud_zone = kmem_cache_create("xfs_cud_item",
1945 sizeof(struct xfs_cud_log_item),
1946 0, 0, NULL);
1947 if (!xfs_cud_zone)
1948 goto out_destroy_rui_zone;
1949
1950 xfs_cui_zone = kmem_cache_create("xfs_cui_item",
1951 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
1952 0, 0, NULL);
1953 if (!xfs_cui_zone)
1954 goto out_destroy_cud_zone;
1955
1956 xfs_bud_zone = kmem_cache_create("xfs_bud_item",
1957 sizeof(struct xfs_bud_log_item),
1958 0, 0, NULL);
1959 if (!xfs_bud_zone)
1960 goto out_destroy_cui_zone;
1961
1962 xfs_bui_zone = kmem_cache_create("xfs_bui_item",
1963 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
1964 0, 0, NULL);
1965 if (!xfs_bui_zone)
1966 goto out_destroy_bud_zone;
1967
1968 return 0;
1969
1970 out_destroy_bud_zone:
1971 kmem_cache_destroy(xfs_bud_zone);
1972 out_destroy_cui_zone:
1973 kmem_cache_destroy(xfs_cui_zone);
1974 out_destroy_cud_zone:
1975 kmem_cache_destroy(xfs_cud_zone);
1976 out_destroy_rui_zone:
1977 kmem_cache_destroy(xfs_rui_zone);
1978 out_destroy_rud_zone:
1979 kmem_cache_destroy(xfs_rud_zone);
1980 out_destroy_icreate_zone:
1981 kmem_cache_destroy(xfs_icreate_zone);
1982 out_destroy_ili_zone:
1983 kmem_cache_destroy(xfs_ili_zone);
1984 out_destroy_inode_zone:
1985 kmem_cache_destroy(xfs_inode_zone);
1986 out_destroy_efi_zone:
1987 kmem_cache_destroy(xfs_efi_zone);
1988 out_destroy_efd_zone:
1989 kmem_cache_destroy(xfs_efd_zone);
1990 out_destroy_buf_item_zone:
1991 kmem_cache_destroy(xfs_buf_item_zone);
1992 out_destroy_trans_zone:
1993 kmem_cache_destroy(xfs_trans_zone);
1994 out_destroy_ifork_zone:
1995 kmem_cache_destroy(xfs_ifork_zone);
1996 out_destroy_da_state_zone:
1997 kmem_cache_destroy(xfs_da_state_zone);
1998 out_destroy_btree_cur_zone:
1999 kmem_cache_destroy(xfs_btree_cur_zone);
2000 out_destroy_bmap_free_item_zone:
2001 kmem_cache_destroy(xfs_bmap_free_item_zone);
2002 out_destroy_log_ticket_zone:
2003 kmem_cache_destroy(xfs_log_ticket_zone);
2004 out:
2005 return -ENOMEM;
2006 }
2007
2008 STATIC void
2009 xfs_destroy_zones(void)
2010 {
2011 /*
2012 * Make sure all delayed rcu free are flushed before we
2013 * destroy caches.
2014 */
2015 rcu_barrier();
2016 kmem_cache_destroy(xfs_bui_zone);
2017 kmem_cache_destroy(xfs_bud_zone);
2018 kmem_cache_destroy(xfs_cui_zone);
2019 kmem_cache_destroy(xfs_cud_zone);
2020 kmem_cache_destroy(xfs_rui_zone);
2021 kmem_cache_destroy(xfs_rud_zone);
2022 kmem_cache_destroy(xfs_icreate_zone);
2023 kmem_cache_destroy(xfs_ili_zone);
2024 kmem_cache_destroy(xfs_inode_zone);
2025 kmem_cache_destroy(xfs_efi_zone);
2026 kmem_cache_destroy(xfs_efd_zone);
2027 kmem_cache_destroy(xfs_buf_item_zone);
2028 kmem_cache_destroy(xfs_trans_zone);
2029 kmem_cache_destroy(xfs_ifork_zone);
2030 kmem_cache_destroy(xfs_da_state_zone);
2031 kmem_cache_destroy(xfs_btree_cur_zone);
2032 kmem_cache_destroy(xfs_bmap_free_item_zone);
2033 kmem_cache_destroy(xfs_log_ticket_zone);
2034 }
2035
2036 STATIC int __init
2037 xfs_init_workqueues(void)
2038 {
2039 /*
2040 * The allocation workqueue can be used in memory reclaim situations
2041 * (writepage path), and parallelism is only limited by the number of
2042 * AGs in all the filesystems mounted. Hence use the default large
2043 * max_active value for this workqueue.
2044 */
2045 xfs_alloc_wq = alloc_workqueue("xfsalloc",
2046 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
2047 if (!xfs_alloc_wq)
2048 return -ENOMEM;
2049
2050 xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
2051 if (!xfs_discard_wq)
2052 goto out_free_alloc_wq;
2053
2054 return 0;
2055 out_free_alloc_wq:
2056 destroy_workqueue(xfs_alloc_wq);
2057 return -ENOMEM;
2058 }
2059
2060 STATIC void
2061 xfs_destroy_workqueues(void)
2062 {
2063 destroy_workqueue(xfs_discard_wq);
2064 destroy_workqueue(xfs_alloc_wq);
2065 }
2066
2067 STATIC int __init
2068 init_xfs_fs(void)
2069 {
2070 int error;
2071
2072 xfs_check_ondisk_structs();
2073
2074 printk(KERN_INFO XFS_VERSION_STRING " with "
2075 XFS_BUILD_OPTIONS " enabled\n");
2076
2077 xfs_dir_startup();
2078
2079 error = xfs_init_zones();
2080 if (error)
2081 goto out;
2082
2083 error = xfs_init_workqueues();
2084 if (error)
2085 goto out_destroy_zones;
2086
2087 error = xfs_mru_cache_init();
2088 if (error)
2089 goto out_destroy_wq;
2090
2091 error = xfs_buf_init();
2092 if (error)
2093 goto out_mru_cache_uninit;
2094
2095 error = xfs_init_procfs();
2096 if (error)
2097 goto out_buf_terminate;
2098
2099 error = xfs_sysctl_register();
2100 if (error)
2101 goto out_cleanup_procfs;
2102
2103 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2104 if (!xfs_kset) {
2105 error = -ENOMEM;
2106 goto out_sysctl_unregister;
2107 }
2108
2109 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2110
2111 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2112 if (!xfsstats.xs_stats) {
2113 error = -ENOMEM;
2114 goto out_kset_unregister;
2115 }
2116
2117 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2118 "stats");
2119 if (error)
2120 goto out_free_stats;
2121
2122 #ifdef DEBUG
2123 xfs_dbg_kobj.kobject.kset = xfs_kset;
2124 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2125 if (error)
2126 goto out_remove_stats_kobj;
2127 #endif
2128
2129 error = xfs_qm_init();
2130 if (error)
2131 goto out_remove_dbg_kobj;
2132
2133 error = register_filesystem(&xfs_fs_type);
2134 if (error)
2135 goto out_qm_exit;
2136 return 0;
2137
2138 out_qm_exit:
2139 xfs_qm_exit();
2140 out_remove_dbg_kobj:
2141 #ifdef DEBUG
2142 xfs_sysfs_del(&xfs_dbg_kobj);
2143 out_remove_stats_kobj:
2144 #endif
2145 xfs_sysfs_del(&xfsstats.xs_kobj);
2146 out_free_stats:
2147 free_percpu(xfsstats.xs_stats);
2148 out_kset_unregister:
2149 kset_unregister(xfs_kset);
2150 out_sysctl_unregister:
2151 xfs_sysctl_unregister();
2152 out_cleanup_procfs:
2153 xfs_cleanup_procfs();
2154 out_buf_terminate:
2155 xfs_buf_terminate();
2156 out_mru_cache_uninit:
2157 xfs_mru_cache_uninit();
2158 out_destroy_wq:
2159 xfs_destroy_workqueues();
2160 out_destroy_zones:
2161 xfs_destroy_zones();
2162 out:
2163 return error;
2164 }
2165
2166 STATIC void __exit
2167 exit_xfs_fs(void)
2168 {
2169 xfs_qm_exit();
2170 unregister_filesystem(&xfs_fs_type);
2171 #ifdef DEBUG
2172 xfs_sysfs_del(&xfs_dbg_kobj);
2173 #endif
2174 xfs_sysfs_del(&xfsstats.xs_kobj);
2175 free_percpu(xfsstats.xs_stats);
2176 kset_unregister(xfs_kset);
2177 xfs_sysctl_unregister();
2178 xfs_cleanup_procfs();
2179 xfs_buf_terminate();
2180 xfs_mru_cache_uninit();
2181 xfs_destroy_workqueues();
2182 xfs_destroy_zones();
2183 xfs_uuid_table_free();
2184 }
2185
2186 module_init(init_xfs_fs);
2187 module_exit(exit_xfs_fs);
2188
2189 MODULE_AUTHOR("Silicon Graphics, Inc.");
2190 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2191 MODULE_LICENSE("GPL");