]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - fs/xfs/linux-2.6/xfs_super.c
[XFS] This fix prevents bulkstat from spinning in an infinite loop.
[mirror_ubuntu-hirsute-kernel.git] / fs / xfs / linux-2.6 / xfs_super.c
CommitLineData
1da177e4 1/*
a805bad5 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7b718769 3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_bit.h"
1da177e4
LT
20#include "xfs_log.h"
21#include "xfs_clnt.h"
a844f451 22#include "xfs_inum.h"
1da177e4
LT
23#include "xfs_trans.h"
24#include "xfs_sb.h"
a844f451 25#include "xfs_ag.h"
1da177e4
LT
26#include "xfs_dir2.h"
27#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h"
30#include "xfs_mount.h"
1da177e4 31#include "xfs_bmap_btree.h"
a844f451 32#include "xfs_alloc_btree.h"
1da177e4 33#include "xfs_ialloc_btree.h"
1da177e4 34#include "xfs_dir2_sf.h"
a844f451 35#include "xfs_attr_sf.h"
1da177e4
LT
36#include "xfs_dinode.h"
37#include "xfs_inode.h"
a844f451
NS
38#include "xfs_btree.h"
39#include "xfs_ialloc.h"
1da177e4 40#include "xfs_bmap.h"
1da177e4
LT
41#include "xfs_rtalloc.h"
42#include "xfs_error.h"
43#include "xfs_itable.h"
44#include "xfs_rw.h"
45#include "xfs_acl.h"
1da177e4
LT
46#include "xfs_attr.h"
47#include "xfs_buf_item.h"
48#include "xfs_utils.h"
739bfb2a 49#include "xfs_vnodeops.h"
745f6919 50#include "xfs_vfsops.h"
1da177e4 51#include "xfs_version.h"
1da177e4
LT
52
53#include <linux/namei.h>
54#include <linux/init.h>
55#include <linux/mount.h>
0829c360 56#include <linux/mempool.h>
1da177e4 57#include <linux/writeback.h>
4df08c52 58#include <linux/kthread.h>
7dfb7103 59#include <linux/freezer.h>
1da177e4 60
7989cb8e
DC
61static struct quotactl_ops xfs_quotactl_operations;
62static struct super_operations xfs_super_operations;
63static kmem_zone_t *xfs_vnode_zone;
64static kmem_zone_t *xfs_ioend_zone;
0829c360 65mempool_t *xfs_ioend_pool;
1da177e4
LT
66
67STATIC struct xfs_mount_args *
68xfs_args_allocate(
764d1f89
NS
69 struct super_block *sb,
70 int silent)
1da177e4
LT
71{
72 struct xfs_mount_args *args;
73
74 args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
75 args->logbufs = args->logbufsize = -1;
76 strncpy(args->fsname, sb->s_id, MAXNAMELEN);
77
78 /* Copy the already-parsed mount(2) flags we're interested in */
1da177e4
LT
79 if (sb->s_flags & MS_DIRSYNC)
80 args->flags |= XFSMNT_DIRSYNC;
81 if (sb->s_flags & MS_SYNCHRONOUS)
82 args->flags |= XFSMNT_WSYNC;
764d1f89
NS
83 if (silent)
84 args->flags |= XFSMNT_QUIET;
1da177e4
LT
85 args->flags |= XFSMNT_32BITINODES;
86
87 return args;
88}
89
90__uint64_t
91xfs_max_file_offset(
92 unsigned int blockshift)
93{
94 unsigned int pagefactor = 1;
95 unsigned int bitshift = BITS_PER_LONG - 1;
96
97 /* Figure out maximum filesize, on Linux this can depend on
98 * the filesystem blocksize (on 32 bit platforms).
99 * __block_prepare_write does this in an [unsigned] long...
100 * page->index << (PAGE_CACHE_SHIFT - bbits)
101 * So, for page sized blocks (4K on 32 bit platforms),
102 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
103 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
104 * but for smaller blocksizes it is less (bbits = log2 bsize).
105 * Note1: get_block_t takes a long (implicit cast from above)
106 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
107 * can optionally convert the [unsigned] long from above into
108 * an [unsigned] long long.
109 */
110
111#if BITS_PER_LONG == 32
112# if defined(CONFIG_LBD)
113 ASSERT(sizeof(sector_t) == 8);
114 pagefactor = PAGE_CACHE_SIZE;
115 bitshift = BITS_PER_LONG;
116# else
117 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
118# endif
119#endif
120
121 return (((__uint64_t)pagefactor) << bitshift) - 1;
122}
123
7989cb8e 124STATIC_INLINE void
1da177e4
LT
125xfs_set_inodeops(
126 struct inode *inode)
127{
0432dab2
CH
128 switch (inode->i_mode & S_IFMT) {
129 case S_IFREG:
416c6d5b 130 inode->i_op = &xfs_inode_operations;
3562fd45 131 inode->i_fop = &xfs_file_operations;
e4c573bb 132 inode->i_mapping->a_ops = &xfs_address_space_operations;
0432dab2
CH
133 break;
134 case S_IFDIR:
416c6d5b 135 inode->i_op = &xfs_dir_inode_operations;
3562fd45 136 inode->i_fop = &xfs_dir_file_operations;
0432dab2
CH
137 break;
138 case S_IFLNK:
416c6d5b 139 inode->i_op = &xfs_symlink_inode_operations;
1da177e4 140 if (inode->i_blocks)
e4c573bb 141 inode->i_mapping->a_ops = &xfs_address_space_operations;
0432dab2
CH
142 break;
143 default:
416c6d5b 144 inode->i_op = &xfs_inode_operations;
1da177e4 145 init_special_inode(inode, inode->i_mode, inode->i_rdev);
0432dab2 146 break;
1da177e4
LT
147 }
148}
149
7989cb8e 150STATIC_INLINE void
1da177e4
LT
151xfs_revalidate_inode(
152 xfs_mount_t *mp,
67fcaa73 153 bhv_vnode_t *vp,
1da177e4
LT
154 xfs_inode_t *ip)
155{
ec86dc02 156 struct inode *inode = vn_to_inode(vp);
1da177e4 157
0432dab2 158 inode->i_mode = ip->i_d.di_mode;
1da177e4
LT
159 inode->i_nlink = ip->i_d.di_nlink;
160 inode->i_uid = ip->i_d.di_uid;
161 inode->i_gid = ip->i_d.di_gid;
0432dab2
CH
162
163 switch (inode->i_mode & S_IFMT) {
164 case S_IFBLK:
165 case S_IFCHR:
166 inode->i_rdev =
167 MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
168 sysv_minor(ip->i_df.if_u2.if_rdev));
169 break;
170 default:
1da177e4 171 inode->i_rdev = 0;
0432dab2 172 break;
1da177e4 173 }
0432dab2 174
1da177e4
LT
175 inode->i_generation = ip->i_d.di_gen;
176 i_size_write(inode, ip->i_d.di_size);
177 inode->i_blocks =
178 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
179 inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
180 inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
181 inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
182 inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
183 inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
184 inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
185 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
186 inode->i_flags |= S_IMMUTABLE;
187 else
188 inode->i_flags &= ~S_IMMUTABLE;
189 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
190 inode->i_flags |= S_APPEND;
191 else
192 inode->i_flags &= ~S_APPEND;
193 if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
194 inode->i_flags |= S_SYNC;
195 else
196 inode->i_flags &= ~S_SYNC;
197 if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
198 inode->i_flags |= S_NOATIME;
199 else
200 inode->i_flags &= ~S_NOATIME;
b3aea4ed 201 xfs_iflags_clear(ip, XFS_IMODIFIED);
1da177e4
LT
202}
203
204void
205xfs_initialize_vnode(
48c872a9 206 struct xfs_mount *mp,
67fcaa73 207 bhv_vnode_t *vp,
745f6919 208 struct xfs_inode *ip)
1da177e4 209{
ec86dc02 210 struct inode *inode = vn_to_inode(vp);
1da177e4 211
739bfb2a 212 if (!ip->i_vnode) {
739bfb2a
CH
213 ip->i_vnode = vp;
214 inode->i_private = ip;
1da177e4
LT
215 }
216
217 /*
218 * We need to set the ops vectors, and unlock the inode, but if
219 * we have been called during the new inode create process, it is
220 * too early to fill in the Linux inode. We will get called a
221 * second time once the inode is properly set up, and then we can
222 * finish our work.
223 */
745f6919 224 if (ip->i_d.di_mode != 0 && (inode->i_state & I_NEW)) {
48c872a9 225 xfs_revalidate_inode(mp, vp, ip);
1da177e4 226 xfs_set_inodeops(inode);
ec86dc02 227
7a18c386 228 xfs_iflags_clear(ip, XFS_INEW);
1da177e4
LT
229 barrier();
230
231 unlock_new_inode(inode);
232 }
233}
234
235int
236xfs_blkdev_get(
237 xfs_mount_t *mp,
238 const char *name,
239 struct block_device **bdevp)
240{
241 int error = 0;
242
243 *bdevp = open_bdev_excl(name, 0, mp);
244 if (IS_ERR(*bdevp)) {
245 error = PTR_ERR(*bdevp);
246 printk("XFS: Invalid device [%s], error=%d\n", name, error);
247 }
248
249 return -error;
250}
251
252void
253xfs_blkdev_put(
254 struct block_device *bdev)
255{
256 if (bdev)
257 close_bdev_excl(bdev);
258}
259
f538d4da
CH
260/*
261 * Try to write out the superblock using barriers.
262 */
263STATIC int
264xfs_barrier_test(
265 xfs_mount_t *mp)
266{
267 xfs_buf_t *sbp = xfs_getsb(mp, 0);
268 int error;
269
270 XFS_BUF_UNDONE(sbp);
271 XFS_BUF_UNREAD(sbp);
272 XFS_BUF_UNDELAYWRITE(sbp);
273 XFS_BUF_WRITE(sbp);
274 XFS_BUF_UNASYNC(sbp);
275 XFS_BUF_ORDERED(sbp);
276
277 xfsbdstrat(mp, sbp);
278 error = xfs_iowait(sbp);
279
280 /*
281 * Clear all the flags we set and possible error state in the
282 * buffer. We only did the write to try out whether barriers
283 * worked and shouldn't leave any traces in the superblock
284 * buffer.
285 */
286 XFS_BUF_DONE(sbp);
287 XFS_BUF_ERROR(sbp, 0);
288 XFS_BUF_UNORDERED(sbp);
289
290 xfs_buf_relse(sbp);
291 return error;
292}
293
294void
295xfs_mountfs_check_barriers(xfs_mount_t *mp)
296{
297 int error;
298
299 if (mp->m_logdev_targp != mp->m_ddev_targp) {
300 xfs_fs_cmn_err(CE_NOTE, mp,
301 "Disabling barriers, not supported with external log device");
302 mp->m_flags &= ~XFS_MOUNT_BARRIER;
4ef19ddd 303 return;
f538d4da
CH
304 }
305
b2ea401b
NS
306 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
307 xfs_fs_cmn_err(CE_NOTE, mp,
308 "Disabling barriers, underlying device is readonly");
309 mp->m_flags &= ~XFS_MOUNT_BARRIER;
310 return;
311 }
312
f538d4da
CH
313 error = xfs_barrier_test(mp);
314 if (error) {
315 xfs_fs_cmn_err(CE_NOTE, mp,
316 "Disabling barriers, trial barrier write failed");
317 mp->m_flags &= ~XFS_MOUNT_BARRIER;
4ef19ddd 318 return;
f538d4da
CH
319 }
320}
321
322void
323xfs_blkdev_issue_flush(
324 xfs_buftarg_t *buftarg)
325{
ce8e922c 326 blkdev_issue_flush(buftarg->bt_bdev, NULL);
f538d4da 327}
1da177e4
LT
328
329STATIC struct inode *
a50cd269 330xfs_fs_alloc_inode(
1da177e4
LT
331 struct super_block *sb)
332{
67fcaa73 333 bhv_vnode_t *vp;
1da177e4 334
8758280f
NS
335 vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
336 if (unlikely(!vp))
1da177e4 337 return NULL;
ec86dc02 338 return vn_to_inode(vp);
1da177e4
LT
339}
340
341STATIC void
a50cd269 342xfs_fs_destroy_inode(
1da177e4
LT
343 struct inode *inode)
344{
ec86dc02 345 kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
1da177e4
LT
346}
347
348STATIC void
a50cd269 349xfs_fs_inode_init_once(
8758280f
NS
350 void *vnode,
351 kmem_zone_t *zonep,
1da177e4
LT
352 unsigned long flags)
353{
a35afb83 354 inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
1da177e4
LT
355}
356
357STATIC int
8758280f 358xfs_init_zones(void)
1da177e4 359{
67fcaa73 360 xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
e0cc2325
NS
361 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
362 KM_ZONE_SPREAD,
a50cd269 363 xfs_fs_inode_init_once);
0829c360
CH
364 if (!xfs_vnode_zone)
365 goto out;
366
367 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
368 if (!xfs_ioend_zone)
369 goto out_destroy_vnode_zone;
370
93d2341c
MD
371 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
372 xfs_ioend_zone);
0829c360
CH
373 if (!xfs_ioend_pool)
374 goto out_free_ioend_zone;
1da177e4 375 return 0;
0829c360 376
0829c360
CH
377 out_free_ioend_zone:
378 kmem_zone_destroy(xfs_ioend_zone);
379 out_destroy_vnode_zone:
380 kmem_zone_destroy(xfs_vnode_zone);
381 out:
382 return -ENOMEM;
1da177e4
LT
383}
384
385STATIC void
8758280f 386xfs_destroy_zones(void)
1da177e4 387{
0829c360
CH
388 mempool_destroy(xfs_ioend_pool);
389 kmem_zone_destroy(xfs_vnode_zone);
390 kmem_zone_destroy(xfs_ioend_zone);
1da177e4
LT
391}
392
393/*
394 * Attempt to flush the inode, this will actually fail
395 * if the inode is pinned, but we dirty the inode again
396 * at the point when it is unpinned after a log write,
8758280f 397 * since this is when the inode itself becomes flushable.
1da177e4
LT
398 */
399STATIC int
a50cd269 400xfs_fs_write_inode(
1da177e4
LT
401 struct inode *inode,
402 int sync)
403{
1da177e4
LT
404 int error = 0, flags = FLUSH_INODE;
405
1543d79c 406 vn_trace_entry(XFS_I(inode), __FUNCTION__,
739bfb2a
CH
407 (inst_t *)__return_address);
408 if (sync) {
409 filemap_fdatawait(inode->i_mapping);
410 flags |= FLUSH_SYNC;
411 }
412 error = xfs_inode_flush(XFS_I(inode), flags);
413 if (error == EAGAIN) {
414 if (sync)
415 error = xfs_inode_flush(XFS_I(inode),
416 flags | FLUSH_LOG);
417 else
418 error = 0;
1da177e4 419 }
739bfb2a 420
1da177e4
LT
421 return -error;
422}
423
424STATIC void
a50cd269 425xfs_fs_clear_inode(
1da177e4
LT
426 struct inode *inode)
427{
1543d79c 428 xfs_inode_t *ip = XFS_I(inode);
56d433e4 429
02ba71de 430 /*
1543d79c 431 * ip can be null when xfs_iget_core calls xfs_idestroy if we
02ba71de
CH
432 * find an inode with di_mode == 0 but without IGET_CREATE set.
433 */
1543d79c
CH
434 if (ip) {
435 vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address);
436
437 XFS_STATS_INC(vn_rele);
438 XFS_STATS_INC(vn_remove);
439 XFS_STATS_INC(vn_reclaim);
440 XFS_STATS_DEC(vn_active);
441
442 xfs_inactive(ip);
443 xfs_iflags_clear(ip, XFS_IMODIFIED);
444 if (xfs_reclaim(ip))
445 panic("%s: cannot reclaim 0x%p\n", __FUNCTION__, inode);
b3aea4ed 446 }
56d433e4 447
739bfb2a 448 ASSERT(XFS_I(inode) == NULL);
56d433e4 449}
1da177e4
LT
450
451/*
452 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
453 * Doing this has two advantages:
454 * - It saves on stack space, which is tight in certain situations
455 * - It can be used (with care) as a mechanism to avoid deadlocks.
456 * Flushing while allocating in a full filesystem requires both.
457 */
458STATIC void
459xfs_syncd_queue_work(
74394496 460 struct xfs_mount *mp,
1da177e4 461 void *data,
74394496 462 void (*syncer)(struct xfs_mount *, void *))
1da177e4 463{
b83bd138 464 struct bhv_vfs_sync_work *work;
1da177e4 465
b83bd138 466 work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
1da177e4
LT
467 INIT_LIST_HEAD(&work->w_list);
468 work->w_syncer = syncer;
469 work->w_data = data;
74394496
CH
470 work->w_mount = mp;
471 spin_lock(&mp->m_sync_lock);
472 list_add_tail(&work->w_list, &mp->m_sync_list);
473 spin_unlock(&mp->m_sync_lock);
474 wake_up_process(mp->m_sync_task);
1da177e4
LT
475}
476
477/*
478 * Flush delayed allocate data, attempting to free up reserved space
479 * from existing allocations. At this point a new allocation attempt
480 * has failed with ENOSPC and we are in the process of scratching our
481 * heads, looking about for more room...
482 */
483STATIC void
484xfs_flush_inode_work(
74394496
CH
485 struct xfs_mount *mp,
486 void *arg)
1da177e4 487{
74394496
CH
488 struct inode *inode = arg;
489 filemap_flush(inode->i_mapping);
490 iput(inode);
1da177e4
LT
491}
492
493void
494xfs_flush_inode(
495 xfs_inode_t *ip)
496{
74394496 497 struct inode *inode = ip->i_vnode;
1da177e4
LT
498
499 igrab(inode);
74394496 500 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
041e0e3b 501 delay(msecs_to_jiffies(500));
1da177e4
LT
502}
503
504/*
505 * This is the "bigger hammer" version of xfs_flush_inode_work...
506 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
507 */
508STATIC void
509xfs_flush_device_work(
74394496
CH
510 struct xfs_mount *mp,
511 void *arg)
1da177e4 512{
74394496 513 struct inode *inode = arg;
b267ce99 514 sync_blockdev(mp->m_super->s_bdev);
74394496 515 iput(inode);
1da177e4
LT
516}
517
518void
519xfs_flush_device(
520 xfs_inode_t *ip)
521{
ec86dc02 522 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
1da177e4
LT
523
524 igrab(inode);
74394496 525 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
041e0e3b 526 delay(msecs_to_jiffies(500));
1da177e4
LT
527 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
528}
529
1da177e4 530STATIC void
74394496
CH
531xfs_sync_worker(
532 struct xfs_mount *mp,
1da177e4
LT
533 void *unused)
534{
535 int error;
536
74394496
CH
537 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
538 error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR |
539 SYNC_REFCACHE | SYNC_SUPER);
540 mp->m_sync_seq++;
541 wake_up(&mp->m_wait_single_sync_task);
1da177e4
LT
542}
543
544STATIC int
545xfssyncd(
546 void *arg)
547{
74394496 548 struct xfs_mount *mp = arg;
1da177e4 549 long timeleft;
b83bd138 550 bhv_vfs_sync_work_t *work, *n;
4df08c52 551 LIST_HEAD (tmp);
1da177e4 552
83144186 553 set_freezable();
041e0e3b 554 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
1da177e4 555 for (;;) {
041e0e3b 556 timeleft = schedule_timeout_interruptible(timeleft);
1da177e4 557 /* swsusp */
3e1d1d28 558 try_to_freeze();
74394496 559 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
1da177e4
LT
560 break;
561
74394496 562 spin_lock(&mp->m_sync_lock);
1da177e4
LT
563 /*
564 * We can get woken by laptop mode, to do a sync -
565 * that's the (only!) case where the list would be
566 * empty with time remaining.
567 */
74394496 568 if (!timeleft || list_empty(&mp->m_sync_list)) {
1da177e4 569 if (!timeleft)
041e0e3b
NA
570 timeleft = xfs_syncd_centisecs *
571 msecs_to_jiffies(10);
74394496
CH
572 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
573 list_add_tail(&mp->m_sync_work.w_list,
574 &mp->m_sync_list);
1da177e4 575 }
74394496 576 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
1da177e4 577 list_move(&work->w_list, &tmp);
74394496 578 spin_unlock(&mp->m_sync_lock);
1da177e4
LT
579
580 list_for_each_entry_safe(work, n, &tmp, w_list) {
74394496 581 (*work->w_syncer)(mp, work->w_data);
1da177e4 582 list_del(&work->w_list);
74394496 583 if (work == &mp->m_sync_work)
1da177e4 584 continue;
b83bd138 585 kmem_free(work, sizeof(struct bhv_vfs_sync_work));
1da177e4
LT
586 }
587 }
588
1da177e4
LT
589 return 0;
590}
591
1da177e4 592STATIC void
a50cd269 593xfs_fs_put_super(
1da177e4
LT
594 struct super_block *sb)
595{
745f6919 596 struct xfs_mount *mp = XFS_M(sb);
1da177e4
LT
597 int error;
598
74394496
CH
599 kthread_stop(mp->m_sync_task);
600
745f6919
CH
601 xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI);
602 error = xfs_unmount(mp, 0, NULL);
b267ce99 603 if (error)
b83bd138 604 printk("XFS: unmount got error=%d\n", error);
1da177e4
LT
605}
606
607STATIC void
a50cd269 608xfs_fs_write_super(
1da177e4
LT
609 struct super_block *sb)
610{
b83bd138 611 if (!(sb->s_flags & MS_RDONLY))
745f6919 612 xfs_sync(XFS_M(sb), SYNC_FSDATA);
1da177e4
LT
613 sb->s_dirt = 0;
614}
615
616STATIC int
a50cd269 617xfs_fs_sync_super(
1da177e4
LT
618 struct super_block *sb,
619 int wait)
620{
745f6919 621 struct xfs_mount *mp = XFS_M(sb);
b83bd138
NS
622 int error;
623 int flags;
1da177e4 624
2823945f
DC
625 if (unlikely(sb->s_frozen == SB_FREEZE_WRITE)) {
626 /*
627 * First stage of freeze - no more writers will make progress
628 * now we are here, so we flush delwri and delalloc buffers
629 * here, then wait for all I/O to complete. Data is frozen at
630 * that point. Metadata is not frozen, transactions can still
631 * occur here so don't bother flushing the buftarg (i.e
632 * SYNC_QUIESCE) because it'll just get dirty again.
633 */
516b2e7c 634 flags = SYNC_DATA_QUIESCE;
2823945f 635 } else
f898d6c0 636 flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0);
1da177e4 637
745f6919 638 error = xfs_sync(mp, flags);
1da177e4
LT
639 sb->s_dirt = 0;
640
641 if (unlikely(laptop_mode)) {
74394496 642 int prev_sync_seq = mp->m_sync_seq;
1da177e4
LT
643
644 /*
645 * The disk must be active because we're syncing.
646 * We schedule xfssyncd now (now that the disk is
647 * active) instead of later (when it might not be).
648 */
74394496 649 wake_up_process(mp->m_sync_task);
1da177e4
LT
650 /*
651 * We have to wait for the sync iteration to complete.
652 * If we don't, the disk activity caused by the sync
653 * will come after the sync is completed, and that
654 * triggers another sync from laptop mode.
655 */
74394496
CH
656 wait_event(mp->m_wait_single_sync_task,
657 mp->m_sync_seq != prev_sync_seq);
1da177e4
LT
658 }
659
660 return -error;
661}
662
663STATIC int
a50cd269 664xfs_fs_statfs(
726c3342 665 struct dentry *dentry,
1da177e4
LT
666 struct kstatfs *statp)
667{
745f6919 668 return -xfs_statvfs(XFS_M(dentry->d_sb), statp,
d6938d1b 669 vn_from_inode(dentry->d_inode));
1da177e4
LT
670}
671
672STATIC int
a50cd269 673xfs_fs_remount(
1da177e4
LT
674 struct super_block *sb,
675 int *flags,
676 char *options)
677{
745f6919 678 struct xfs_mount *mp = XFS_M(sb);
764d1f89 679 struct xfs_mount_args *args = xfs_args_allocate(sb, 0);
1da177e4
LT
680 int error;
681
745f6919 682 error = xfs_parseargs(mp, options, args, 1);
1da177e4 683 if (!error)
745f6919 684 error = xfs_mntupdate(mp, flags, args);
1da177e4
LT
685 kmem_free(args, sizeof(*args));
686 return -error;
687}
688
689STATIC void
a50cd269 690xfs_fs_lockfs(
1da177e4
LT
691 struct super_block *sb)
692{
745f6919 693 xfs_freeze(XFS_M(sb));
1da177e4
LT
694}
695
696STATIC int
a50cd269 697xfs_fs_show_options(
1da177e4
LT
698 struct seq_file *m,
699 struct vfsmount *mnt)
700{
745f6919 701 return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
1da177e4
LT
702}
703
ee34807a 704STATIC int
a50cd269 705xfs_fs_quotasync(
ee34807a
NS
706 struct super_block *sb,
707 int type)
708{
b09cc771 709 return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XQUOTASYNC, 0, NULL);
ee34807a
NS
710}
711
1da177e4 712STATIC int
a50cd269 713xfs_fs_getxstate(
1da177e4
LT
714 struct super_block *sb,
715 struct fs_quota_stat *fqs)
716{
b09cc771 717 return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
1da177e4
LT
718}
719
720STATIC int
a50cd269 721xfs_fs_setxstate(
1da177e4
LT
722 struct super_block *sb,
723 unsigned int flags,
724 int op)
725{
b09cc771 726 return -XFS_QM_QUOTACTL(XFS_M(sb), op, 0, (caddr_t)&flags);
1da177e4
LT
727}
728
729STATIC int
a50cd269 730xfs_fs_getxquota(
1da177e4
LT
731 struct super_block *sb,
732 int type,
733 qid_t id,
734 struct fs_disk_quota *fdq)
735{
b09cc771 736 return -XFS_QM_QUOTACTL(XFS_M(sb),
b83bd138
NS
737 (type == USRQUOTA) ? Q_XGETQUOTA :
738 ((type == GRPQUOTA) ? Q_XGETGQUOTA :
739 Q_XGETPQUOTA), id, (caddr_t)fdq);
1da177e4
LT
740}
741
742STATIC int
a50cd269 743xfs_fs_setxquota(
1da177e4
LT
744 struct super_block *sb,
745 int type,
746 qid_t id,
747 struct fs_disk_quota *fdq)
748{
b09cc771 749 return -XFS_QM_QUOTACTL(XFS_M(sb),
b83bd138
NS
750 (type == USRQUOTA) ? Q_XSETQLIM :
751 ((type == GRPQUOTA) ? Q_XSETGQLIM :
752 Q_XSETPQLIM), id, (caddr_t)fdq);
1da177e4
LT
753}
754
755STATIC int
a50cd269 756xfs_fs_fill_super(
1da177e4
LT
757 struct super_block *sb,
758 void *data,
759 int silent)
760{
0a74cd19 761 struct inode *rootvp;
745f6919 762 struct xfs_mount *mp = NULL;
764d1f89 763 struct xfs_mount_args *args = xfs_args_allocate(sb, silent);
1da177e4 764 struct kstatfs statvfs;
b83bd138 765 int error;
1da177e4 766
745f6919 767 mp = xfs_mount_init();
74394496
CH
768
769 INIT_LIST_HEAD(&mp->m_sync_list);
770 spin_lock_init(&mp->m_sync_lock);
771 init_waitqueue_head(&mp->m_wait_single_sync_task);
772
b267ce99
CH
773 mp->m_super = sb;
774 sb->s_fs_info = mp;
1da177e4 775
bd186aa9
CH
776 if (sb->s_flags & MS_RDONLY)
777 mp->m_flags |= XFS_MOUNT_RDONLY;
778
745f6919
CH
779 error = xfs_parseargs(mp, (char *)data, args, 0);
780 if (error)
1da177e4 781 goto fail_vfsop;
1da177e4
LT
782
783 sb_min_blocksize(sb, BBSIZE);
a50cd269 784 sb->s_export_op = &xfs_export_operations;
a50cd269
NS
785 sb->s_qcop = &xfs_quotactl_operations;
786 sb->s_op = &xfs_super_operations;
1da177e4 787
745f6919
CH
788 error = xfs_mount(mp, args, NULL);
789 if (error)
1da177e4 790 goto fail_vfsop;
1da177e4 791
745f6919 792 error = xfs_statvfs(mp, &statvfs, NULL);
1da177e4
LT
793 if (error)
794 goto fail_unmount;
795
796 sb->s_dirt = 1;
797 sb->s_magic = statvfs.f_type;
798 sb->s_blocksize = statvfs.f_bsize;
799 sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
800 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
801 sb->s_time_gran = 1;
802 set_posix_acl_flag(sb);
803
745f6919 804 error = xfs_root(mp, &rootvp);
1da177e4
LT
805 if (error)
806 goto fail_unmount;
807
ec86dc02 808 sb->s_root = d_alloc_root(vn_to_inode(rootvp));
1da177e4
LT
809 if (!sb->s_root) {
810 error = ENOMEM;
811 goto fail_vnrele;
812 }
813 if (is_bad_inode(sb->s_root->d_inode)) {
814 error = EINVAL;
815 goto fail_vnrele;
816 }
74394496
CH
817
818 mp->m_sync_work.w_syncer = xfs_sync_worker;
819 mp->m_sync_work.w_mount = mp;
820 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
821 if (IS_ERR(mp->m_sync_task)) {
822 error = -PTR_ERR(mp->m_sync_task);
1da177e4 823 goto fail_vnrele;
74394496
CH
824 }
825
1543d79c
CH
826 vn_trace_exit(XFS_I(sb->s_root->d_inode), __FUNCTION__,
827 (inst_t *)__return_address);
1da177e4
LT
828
829 kmem_free(args, sizeof(*args));
830 return 0;
831
832fail_vnrele:
833 if (sb->s_root) {
834 dput(sb->s_root);
835 sb->s_root = NULL;
836 } else {
837 VN_RELE(rootvp);
838 }
839
840fail_unmount:
745f6919 841 xfs_unmount(mp, 0, NULL);
1da177e4
LT
842
843fail_vfsop:
1da177e4
LT
844 kmem_free(args, sizeof(*args));
845 return -error;
846}
847
454e2398 848STATIC int
a50cd269 849xfs_fs_get_sb(
1da177e4
LT
850 struct file_system_type *fs_type,
851 int flags,
852 const char *dev_name,
454e2398
DH
853 void *data,
854 struct vfsmount *mnt)
1da177e4 855{
454e2398
DH
856 return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
857 mnt);
a50cd269
NS
858}
859
7989cb8e 860static struct super_operations xfs_super_operations = {
a50cd269
NS
861 .alloc_inode = xfs_fs_alloc_inode,
862 .destroy_inode = xfs_fs_destroy_inode,
863 .write_inode = xfs_fs_write_inode,
864 .clear_inode = xfs_fs_clear_inode,
865 .put_super = xfs_fs_put_super,
866 .write_super = xfs_fs_write_super,
867 .sync_fs = xfs_fs_sync_super,
868 .write_super_lockfs = xfs_fs_lockfs,
869 .statfs = xfs_fs_statfs,
870 .remount_fs = xfs_fs_remount,
871 .show_options = xfs_fs_show_options,
1da177e4
LT
872};
873
7989cb8e 874static struct quotactl_ops xfs_quotactl_operations = {
a50cd269
NS
875 .quota_sync = xfs_fs_quotasync,
876 .get_xstate = xfs_fs_getxstate,
877 .set_xstate = xfs_fs_setxstate,
878 .get_xquota = xfs_fs_getxquota,
879 .set_xquota = xfs_fs_setxquota,
1da177e4
LT
880};
881
5085b607 882static struct file_system_type xfs_fs_type = {
1da177e4
LT
883 .owner = THIS_MODULE,
884 .name = "xfs",
a50cd269 885 .get_sb = xfs_fs_get_sb,
1da177e4
LT
886 .kill_sb = kill_block_super,
887 .fs_flags = FS_REQUIRES_DEV,
888};
889
890
891STATIC int __init
892init_xfs_fs( void )
893{
894 int error;
1da177e4
LT
895 static char message[] __initdata = KERN_INFO \
896 XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
897
898 printk(message);
899
1da177e4
LT
900 ktrace_init(64);
901
8758280f 902 error = xfs_init_zones();
1da177e4 903 if (error < 0)
0829c360 904 goto undo_zones;
1da177e4 905
ce8e922c 906 error = xfs_buf_init();
1da177e4 907 if (error < 0)
ce8e922c 908 goto undo_buffers;
1da177e4
LT
909
910 vn_init();
911 xfs_init();
912 uuid_init();
913 vfs_initquota();
914
915 error = register_filesystem(&xfs_fs_type);
916 if (error)
917 goto undo_register;
1da177e4
LT
918 return 0;
919
920undo_register:
ce8e922c 921 xfs_buf_terminate();
1da177e4 922
ce8e922c 923undo_buffers:
8758280f 924 xfs_destroy_zones();
1da177e4 925
0829c360 926undo_zones:
1da177e4
LT
927 return error;
928}
929
930STATIC void __exit
931exit_xfs_fs( void )
932{
933 vfs_exitquota();
1da177e4
LT
934 unregister_filesystem(&xfs_fs_type);
935 xfs_cleanup();
ce8e922c 936 xfs_buf_terminate();
8758280f 937 xfs_destroy_zones();
1da177e4
LT
938 ktrace_uninit();
939}
940
941module_init(init_xfs_fs);
942module_exit(exit_xfs_fs);
943
944MODULE_AUTHOR("Silicon Graphics, Inc.");
945MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
946MODULE_LICENSE("GPL");