]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/xfs/linux-2.6/xfs_super.c
[XFS] Implement fallocate.
[mirror_ubuntu-artful-kernel.git] / fs / xfs / linux-2.6 / xfs_super.c
CommitLineData
1da177e4 1/*
a805bad5 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7b718769 3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_bit.h"
1da177e4
LT
20#include "xfs_log.h"
21#include "xfs_clnt.h"
a844f451 22#include "xfs_inum.h"
1da177e4
LT
23#include "xfs_trans.h"
24#include "xfs_sb.h"
a844f451 25#include "xfs_ag.h"
1da177e4
LT
26#include "xfs_dir2.h"
27#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h"
30#include "xfs_mount.h"
1da177e4 31#include "xfs_bmap_btree.h"
a844f451 32#include "xfs_alloc_btree.h"
1da177e4 33#include "xfs_ialloc_btree.h"
1da177e4 34#include "xfs_dir2_sf.h"
a844f451 35#include "xfs_attr_sf.h"
1da177e4
LT
36#include "xfs_dinode.h"
37#include "xfs_inode.h"
a844f451
NS
38#include "xfs_btree.h"
39#include "xfs_ialloc.h"
1da177e4 40#include "xfs_bmap.h"
1da177e4
LT
41#include "xfs_rtalloc.h"
42#include "xfs_error.h"
43#include "xfs_itable.h"
9909c4aa 44#include "xfs_fsops.h"
1da177e4
LT
45#include "xfs_rw.h"
46#include "xfs_acl.h"
1da177e4
LT
47#include "xfs_attr.h"
48#include "xfs_buf_item.h"
49#include "xfs_utils.h"
739bfb2a 50#include "xfs_vnodeops.h"
745f6919 51#include "xfs_vfsops.h"
1da177e4 52#include "xfs_version.h"
1da177e4
LT
53
54#include <linux/namei.h>
55#include <linux/init.h>
56#include <linux/mount.h>
0829c360 57#include <linux/mempool.h>
1da177e4 58#include <linux/writeback.h>
4df08c52 59#include <linux/kthread.h>
7dfb7103 60#include <linux/freezer.h>
1da177e4 61
7989cb8e
DC
62static struct quotactl_ops xfs_quotactl_operations;
63static struct super_operations xfs_super_operations;
64static kmem_zone_t *xfs_vnode_zone;
65static kmem_zone_t *xfs_ioend_zone;
0829c360 66mempool_t *xfs_ioend_pool;
1da177e4
LT
67
68STATIC struct xfs_mount_args *
69xfs_args_allocate(
764d1f89
NS
70 struct super_block *sb,
71 int silent)
1da177e4
LT
72{
73 struct xfs_mount_args *args;
74
75 args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
76 args->logbufs = args->logbufsize = -1;
77 strncpy(args->fsname, sb->s_id, MAXNAMELEN);
78
79 /* Copy the already-parsed mount(2) flags we're interested in */
1da177e4
LT
80 if (sb->s_flags & MS_DIRSYNC)
81 args->flags |= XFSMNT_DIRSYNC;
82 if (sb->s_flags & MS_SYNCHRONOUS)
83 args->flags |= XFSMNT_WSYNC;
764d1f89
NS
84 if (silent)
85 args->flags |= XFSMNT_QUIET;
1da177e4
LT
86 args->flags |= XFSMNT_32BITINODES;
87
88 return args;
89}
90
91__uint64_t
92xfs_max_file_offset(
93 unsigned int blockshift)
94{
95 unsigned int pagefactor = 1;
96 unsigned int bitshift = BITS_PER_LONG - 1;
97
98 /* Figure out maximum filesize, on Linux this can depend on
99 * the filesystem blocksize (on 32 bit platforms).
100 * __block_prepare_write does this in an [unsigned] long...
101 * page->index << (PAGE_CACHE_SHIFT - bbits)
102 * So, for page sized blocks (4K on 32 bit platforms),
103 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
104 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
105 * but for smaller blocksizes it is less (bbits = log2 bsize).
106 * Note1: get_block_t takes a long (implicit cast from above)
107 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
108 * can optionally convert the [unsigned] long from above into
109 * an [unsigned] long long.
110 */
111
112#if BITS_PER_LONG == 32
113# if defined(CONFIG_LBD)
114 ASSERT(sizeof(sector_t) == 8);
115 pagefactor = PAGE_CACHE_SIZE;
116 bitshift = BITS_PER_LONG;
117# else
118 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
119# endif
120#endif
121
122 return (((__uint64_t)pagefactor) << bitshift) - 1;
123}
124
7989cb8e 125STATIC_INLINE void
1da177e4
LT
126xfs_set_inodeops(
127 struct inode *inode)
128{
0432dab2
CH
129 switch (inode->i_mode & S_IFMT) {
130 case S_IFREG:
416c6d5b 131 inode->i_op = &xfs_inode_operations;
3562fd45 132 inode->i_fop = &xfs_file_operations;
e4c573bb 133 inode->i_mapping->a_ops = &xfs_address_space_operations;
0432dab2
CH
134 break;
135 case S_IFDIR:
416c6d5b 136 inode->i_op = &xfs_dir_inode_operations;
3562fd45 137 inode->i_fop = &xfs_dir_file_operations;
0432dab2
CH
138 break;
139 case S_IFLNK:
416c6d5b 140 inode->i_op = &xfs_symlink_inode_operations;
1da177e4 141 if (inode->i_blocks)
e4c573bb 142 inode->i_mapping->a_ops = &xfs_address_space_operations;
0432dab2
CH
143 break;
144 default:
416c6d5b 145 inode->i_op = &xfs_inode_operations;
1da177e4 146 init_special_inode(inode, inode->i_mode, inode->i_rdev);
0432dab2 147 break;
1da177e4
LT
148 }
149}
150
7989cb8e 151STATIC_INLINE void
1da177e4
LT
152xfs_revalidate_inode(
153 xfs_mount_t *mp,
67fcaa73 154 bhv_vnode_t *vp,
1da177e4
LT
155 xfs_inode_t *ip)
156{
ec86dc02 157 struct inode *inode = vn_to_inode(vp);
1da177e4 158
0432dab2 159 inode->i_mode = ip->i_d.di_mode;
1da177e4
LT
160 inode->i_nlink = ip->i_d.di_nlink;
161 inode->i_uid = ip->i_d.di_uid;
162 inode->i_gid = ip->i_d.di_gid;
0432dab2
CH
163
164 switch (inode->i_mode & S_IFMT) {
165 case S_IFBLK:
166 case S_IFCHR:
167 inode->i_rdev =
168 MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
169 sysv_minor(ip->i_df.if_u2.if_rdev));
170 break;
171 default:
1da177e4 172 inode->i_rdev = 0;
0432dab2 173 break;
1da177e4 174 }
0432dab2 175
1da177e4
LT
176 inode->i_generation = ip->i_d.di_gen;
177 i_size_write(inode, ip->i_d.di_size);
178 inode->i_blocks =
179 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
180 inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
181 inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
182 inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
183 inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
184 inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
185 inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
186 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
187 inode->i_flags |= S_IMMUTABLE;
188 else
189 inode->i_flags &= ~S_IMMUTABLE;
190 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
191 inode->i_flags |= S_APPEND;
192 else
193 inode->i_flags &= ~S_APPEND;
194 if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
195 inode->i_flags |= S_SYNC;
196 else
197 inode->i_flags &= ~S_SYNC;
198 if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
199 inode->i_flags |= S_NOATIME;
200 else
201 inode->i_flags &= ~S_NOATIME;
b3aea4ed 202 xfs_iflags_clear(ip, XFS_IMODIFIED);
1da177e4
LT
203}
204
205void
206xfs_initialize_vnode(
48c872a9 207 struct xfs_mount *mp,
67fcaa73 208 bhv_vnode_t *vp,
745f6919 209 struct xfs_inode *ip)
1da177e4 210{
ec86dc02 211 struct inode *inode = vn_to_inode(vp);
1da177e4 212
739bfb2a 213 if (!ip->i_vnode) {
739bfb2a
CH
214 ip->i_vnode = vp;
215 inode->i_private = ip;
1da177e4
LT
216 }
217
218 /*
219 * We need to set the ops vectors, and unlock the inode, but if
220 * we have been called during the new inode create process, it is
221 * too early to fill in the Linux inode. We will get called a
222 * second time once the inode is properly set up, and then we can
223 * finish our work.
224 */
745f6919 225 if (ip->i_d.di_mode != 0 && (inode->i_state & I_NEW)) {
48c872a9 226 xfs_revalidate_inode(mp, vp, ip);
1da177e4 227 xfs_set_inodeops(inode);
ec86dc02 228
7a18c386 229 xfs_iflags_clear(ip, XFS_INEW);
1da177e4
LT
230 barrier();
231
232 unlock_new_inode(inode);
233 }
234}
235
236int
237xfs_blkdev_get(
238 xfs_mount_t *mp,
239 const char *name,
240 struct block_device **bdevp)
241{
242 int error = 0;
243
244 *bdevp = open_bdev_excl(name, 0, mp);
245 if (IS_ERR(*bdevp)) {
246 error = PTR_ERR(*bdevp);
247 printk("XFS: Invalid device [%s], error=%d\n", name, error);
248 }
249
250 return -error;
251}
252
253void
254xfs_blkdev_put(
255 struct block_device *bdev)
256{
257 if (bdev)
258 close_bdev_excl(bdev);
259}
260
f538d4da
CH
261/*
262 * Try to write out the superblock using barriers.
263 */
264STATIC int
265xfs_barrier_test(
266 xfs_mount_t *mp)
267{
268 xfs_buf_t *sbp = xfs_getsb(mp, 0);
269 int error;
270
271 XFS_BUF_UNDONE(sbp);
272 XFS_BUF_UNREAD(sbp);
273 XFS_BUF_UNDELAYWRITE(sbp);
274 XFS_BUF_WRITE(sbp);
275 XFS_BUF_UNASYNC(sbp);
276 XFS_BUF_ORDERED(sbp);
277
278 xfsbdstrat(mp, sbp);
279 error = xfs_iowait(sbp);
280
281 /*
282 * Clear all the flags we set and possible error state in the
283 * buffer. We only did the write to try out whether barriers
284 * worked and shouldn't leave any traces in the superblock
285 * buffer.
286 */
287 XFS_BUF_DONE(sbp);
288 XFS_BUF_ERROR(sbp, 0);
289 XFS_BUF_UNORDERED(sbp);
290
291 xfs_buf_relse(sbp);
292 return error;
293}
294
295void
296xfs_mountfs_check_barriers(xfs_mount_t *mp)
297{
298 int error;
299
300 if (mp->m_logdev_targp != mp->m_ddev_targp) {
301 xfs_fs_cmn_err(CE_NOTE, mp,
302 "Disabling barriers, not supported with external log device");
303 mp->m_flags &= ~XFS_MOUNT_BARRIER;
4ef19ddd 304 return;
f538d4da
CH
305 }
306
ce8e922c 307 if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
f538d4da
CH
308 QUEUE_ORDERED_NONE) {
309 xfs_fs_cmn_err(CE_NOTE, mp,
310 "Disabling barriers, not supported by the underlying device");
311 mp->m_flags &= ~XFS_MOUNT_BARRIER;
4ef19ddd 312 return;
f538d4da
CH
313 }
314
b2ea401b
NS
315 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
316 xfs_fs_cmn_err(CE_NOTE, mp,
317 "Disabling barriers, underlying device is readonly");
318 mp->m_flags &= ~XFS_MOUNT_BARRIER;
319 return;
320 }
321
f538d4da
CH
322 error = xfs_barrier_test(mp);
323 if (error) {
324 xfs_fs_cmn_err(CE_NOTE, mp,
325 "Disabling barriers, trial barrier write failed");
326 mp->m_flags &= ~XFS_MOUNT_BARRIER;
4ef19ddd 327 return;
f538d4da
CH
328 }
329}
330
331void
332xfs_blkdev_issue_flush(
333 xfs_buftarg_t *buftarg)
334{
ce8e922c 335 blkdev_issue_flush(buftarg->bt_bdev, NULL);
f538d4da 336}
1da177e4
LT
337
338STATIC struct inode *
a50cd269 339xfs_fs_alloc_inode(
1da177e4
LT
340 struct super_block *sb)
341{
67fcaa73 342 bhv_vnode_t *vp;
1da177e4 343
8758280f
NS
344 vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
345 if (unlikely(!vp))
1da177e4 346 return NULL;
ec86dc02 347 return vn_to_inode(vp);
1da177e4
LT
348}
349
350STATIC void
a50cd269 351xfs_fs_destroy_inode(
1da177e4
LT
352 struct inode *inode)
353{
ec86dc02 354 kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
1da177e4
LT
355}
356
357STATIC void
a50cd269 358xfs_fs_inode_init_once(
8758280f 359 kmem_zone_t *zonep,
4ba9b9d0 360 void *vnode)
1da177e4 361{
a35afb83 362 inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
1da177e4
LT
363}
364
365STATIC int
8758280f 366xfs_init_zones(void)
1da177e4 367{
67fcaa73 368 xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
e0cc2325
NS
369 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
370 KM_ZONE_SPREAD,
a50cd269 371 xfs_fs_inode_init_once);
0829c360
CH
372 if (!xfs_vnode_zone)
373 goto out;
374
375 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
376 if (!xfs_ioend_zone)
377 goto out_destroy_vnode_zone;
378
93d2341c
MD
379 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
380 xfs_ioend_zone);
0829c360
CH
381 if (!xfs_ioend_pool)
382 goto out_free_ioend_zone;
1da177e4 383 return 0;
0829c360 384
0829c360
CH
385 out_free_ioend_zone:
386 kmem_zone_destroy(xfs_ioend_zone);
387 out_destroy_vnode_zone:
388 kmem_zone_destroy(xfs_vnode_zone);
389 out:
390 return -ENOMEM;
1da177e4
LT
391}
392
393STATIC void
8758280f 394xfs_destroy_zones(void)
1da177e4 395{
0829c360
CH
396 mempool_destroy(xfs_ioend_pool);
397 kmem_zone_destroy(xfs_vnode_zone);
398 kmem_zone_destroy(xfs_ioend_zone);
1da177e4
LT
399}
400
401/*
402 * Attempt to flush the inode, this will actually fail
403 * if the inode is pinned, but we dirty the inode again
404 * at the point when it is unpinned after a log write,
8758280f 405 * since this is when the inode itself becomes flushable.
1da177e4
LT
406 */
407STATIC int
a50cd269 408xfs_fs_write_inode(
1da177e4
LT
409 struct inode *inode,
410 int sync)
411{
1da177e4
LT
412 int error = 0, flags = FLUSH_INODE;
413
cf441eeb 414 xfs_itrace_entry(XFS_I(inode));
739bfb2a
CH
415 if (sync) {
416 filemap_fdatawait(inode->i_mapping);
417 flags |= FLUSH_SYNC;
1da177e4 418 }
739bfb2a 419 error = xfs_inode_flush(XFS_I(inode), flags);
e893bffd
LM
420 /*
421 * if we failed to write out the inode then mark
422 * it dirty again so we'll try again later.
423 */
424 if (error)
425 mark_inode_dirty_sync(inode);
739bfb2a 426
1da177e4
LT
427 return -error;
428}
429
430STATIC void
a50cd269 431xfs_fs_clear_inode(
1da177e4
LT
432 struct inode *inode)
433{
1543d79c 434 xfs_inode_t *ip = XFS_I(inode);
56d433e4 435
02ba71de 436 /*
1543d79c 437 * ip can be null when xfs_iget_core calls xfs_idestroy if we
02ba71de
CH
438 * find an inode with di_mode == 0 but without IGET_CREATE set.
439 */
1543d79c 440 if (ip) {
cf441eeb 441 xfs_itrace_entry(ip);
1543d79c
CH
442 XFS_STATS_INC(vn_rele);
443 XFS_STATS_INC(vn_remove);
444 XFS_STATS_INC(vn_reclaim);
445 XFS_STATS_DEC(vn_active);
446
447 xfs_inactive(ip);
448 xfs_iflags_clear(ip, XFS_IMODIFIED);
449 if (xfs_reclaim(ip))
450 panic("%s: cannot reclaim 0x%p\n", __FUNCTION__, inode);
b3aea4ed 451 }
56d433e4 452
739bfb2a 453 ASSERT(XFS_I(inode) == NULL);
56d433e4 454}
1da177e4
LT
455
456/*
457 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
458 * Doing this has two advantages:
459 * - It saves on stack space, which is tight in certain situations
460 * - It can be used (with care) as a mechanism to avoid deadlocks.
461 * Flushing while allocating in a full filesystem requires both.
462 */
463STATIC void
464xfs_syncd_queue_work(
74394496 465 struct xfs_mount *mp,
1da177e4 466 void *data,
74394496 467 void (*syncer)(struct xfs_mount *, void *))
1da177e4 468{
b83bd138 469 struct bhv_vfs_sync_work *work;
1da177e4 470
b83bd138 471 work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
1da177e4
LT
472 INIT_LIST_HEAD(&work->w_list);
473 work->w_syncer = syncer;
474 work->w_data = data;
74394496
CH
475 work->w_mount = mp;
476 spin_lock(&mp->m_sync_lock);
477 list_add_tail(&work->w_list, &mp->m_sync_list);
478 spin_unlock(&mp->m_sync_lock);
479 wake_up_process(mp->m_sync_task);
1da177e4
LT
480}
481
482/*
483 * Flush delayed allocate data, attempting to free up reserved space
484 * from existing allocations. At this point a new allocation attempt
485 * has failed with ENOSPC and we are in the process of scratching our
486 * heads, looking about for more room...
487 */
488STATIC void
489xfs_flush_inode_work(
74394496
CH
490 struct xfs_mount *mp,
491 void *arg)
1da177e4 492{
74394496
CH
493 struct inode *inode = arg;
494 filemap_flush(inode->i_mapping);
495 iput(inode);
1da177e4
LT
496}
497
498void
499xfs_flush_inode(
500 xfs_inode_t *ip)
501{
74394496 502 struct inode *inode = ip->i_vnode;
1da177e4
LT
503
504 igrab(inode);
74394496 505 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
041e0e3b 506 delay(msecs_to_jiffies(500));
1da177e4
LT
507}
508
509/*
510 * This is the "bigger hammer" version of xfs_flush_inode_work...
511 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
512 */
513STATIC void
514xfs_flush_device_work(
74394496
CH
515 struct xfs_mount *mp,
516 void *arg)
1da177e4 517{
74394496 518 struct inode *inode = arg;
b267ce99 519 sync_blockdev(mp->m_super->s_bdev);
74394496 520 iput(inode);
1da177e4
LT
521}
522
523void
524xfs_flush_device(
525 xfs_inode_t *ip)
526{
ec86dc02 527 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
1da177e4
LT
528
529 igrab(inode);
74394496 530 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
041e0e3b 531 delay(msecs_to_jiffies(500));
1da177e4
LT
532 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
533}
534
1da177e4 535STATIC void
74394496
CH
536xfs_sync_worker(
537 struct xfs_mount *mp,
1da177e4
LT
538 void *unused)
539{
540 int error;
541
74394496
CH
542 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
543 error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR |
544 SYNC_REFCACHE | SYNC_SUPER);
545 mp->m_sync_seq++;
546 wake_up(&mp->m_wait_single_sync_task);
1da177e4
LT
547}
548
549STATIC int
550xfssyncd(
551 void *arg)
552{
74394496 553 struct xfs_mount *mp = arg;
1da177e4 554 long timeleft;
b83bd138 555 bhv_vfs_sync_work_t *work, *n;
4df08c52 556 LIST_HEAD (tmp);
1da177e4 557
83144186 558 set_freezable();
041e0e3b 559 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
1da177e4 560 for (;;) {
041e0e3b 561 timeleft = schedule_timeout_interruptible(timeleft);
1da177e4 562 /* swsusp */
3e1d1d28 563 try_to_freeze();
74394496 564 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
1da177e4
LT
565 break;
566
74394496 567 spin_lock(&mp->m_sync_lock);
1da177e4
LT
568 /*
569 * We can get woken by laptop mode, to do a sync -
570 * that's the (only!) case where the list would be
571 * empty with time remaining.
572 */
74394496 573 if (!timeleft || list_empty(&mp->m_sync_list)) {
1da177e4 574 if (!timeleft)
041e0e3b
NA
575 timeleft = xfs_syncd_centisecs *
576 msecs_to_jiffies(10);
74394496
CH
577 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
578 list_add_tail(&mp->m_sync_work.w_list,
579 &mp->m_sync_list);
1da177e4 580 }
74394496 581 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
1da177e4 582 list_move(&work->w_list, &tmp);
74394496 583 spin_unlock(&mp->m_sync_lock);
1da177e4
LT
584
585 list_for_each_entry_safe(work, n, &tmp, w_list) {
74394496 586 (*work->w_syncer)(mp, work->w_data);
1da177e4 587 list_del(&work->w_list);
74394496 588 if (work == &mp->m_sync_work)
1da177e4 589 continue;
b83bd138 590 kmem_free(work, sizeof(struct bhv_vfs_sync_work));
1da177e4
LT
591 }
592 }
593
1da177e4
LT
594 return 0;
595}
596
1da177e4 597STATIC void
a50cd269 598xfs_fs_put_super(
1da177e4
LT
599 struct super_block *sb)
600{
745f6919 601 struct xfs_mount *mp = XFS_M(sb);
1da177e4
LT
602 int error;
603
74394496
CH
604 kthread_stop(mp->m_sync_task);
605
745f6919
CH
606 xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI);
607 error = xfs_unmount(mp, 0, NULL);
b267ce99 608 if (error)
b83bd138 609 printk("XFS: unmount got error=%d\n", error);
1da177e4
LT
610}
611
612STATIC void
a50cd269 613xfs_fs_write_super(
1da177e4
LT
614 struct super_block *sb)
615{
b83bd138 616 if (!(sb->s_flags & MS_RDONLY))
745f6919 617 xfs_sync(XFS_M(sb), SYNC_FSDATA);
1da177e4
LT
618 sb->s_dirt = 0;
619}
620
621STATIC int
a50cd269 622xfs_fs_sync_super(
1da177e4
LT
623 struct super_block *sb,
624 int wait)
625{
745f6919 626 struct xfs_mount *mp = XFS_M(sb);
b83bd138
NS
627 int error;
628 int flags;
1da177e4 629
e893bffd
LM
630 /*
631 * Treat a sync operation like a freeze. This is to work
632 * around a race in sync_inodes() which works in two phases
633 * - an asynchronous flush, which can write out an inode
634 * without waiting for file size updates to complete, and a
635 * synchronous flush, which wont do anything because the
636 * async flush removed the inode's dirty flag. Also
637 * sync_inodes() will not see any files that just have
638 * outstanding transactions to be flushed because we don't
639 * dirty the Linux inode until after the transaction I/O
640 * completes.
641 */
642 if (wait || unlikely(sb->s_frozen == SB_FREEZE_WRITE)) {
2823945f
DC
643 /*
644 * First stage of freeze - no more writers will make progress
645 * now we are here, so we flush delwri and delalloc buffers
646 * here, then wait for all I/O to complete. Data is frozen at
647 * that point. Metadata is not frozen, transactions can still
648 * occur here so don't bother flushing the buftarg (i.e
649 * SYNC_QUIESCE) because it'll just get dirty again.
650 */
516b2e7c 651 flags = SYNC_DATA_QUIESCE;
2823945f 652 } else
e893bffd 653 flags = SYNC_FSDATA;
1da177e4 654
745f6919 655 error = xfs_sync(mp, flags);
1da177e4
LT
656 sb->s_dirt = 0;
657
658 if (unlikely(laptop_mode)) {
74394496 659 int prev_sync_seq = mp->m_sync_seq;
1da177e4
LT
660
661 /*
662 * The disk must be active because we're syncing.
663 * We schedule xfssyncd now (now that the disk is
664 * active) instead of later (when it might not be).
665 */
74394496 666 wake_up_process(mp->m_sync_task);
1da177e4
LT
667 /*
668 * We have to wait for the sync iteration to complete.
669 * If we don't, the disk activity caused by the sync
670 * will come after the sync is completed, and that
671 * triggers another sync from laptop mode.
672 */
74394496
CH
673 wait_event(mp->m_wait_single_sync_task,
674 mp->m_sync_seq != prev_sync_seq);
1da177e4
LT
675 }
676
677 return -error;
678}
679
680STATIC int
a50cd269 681xfs_fs_statfs(
726c3342 682 struct dentry *dentry,
1da177e4
LT
683 struct kstatfs *statp)
684{
4ca488eb
CH
685 struct xfs_mount *mp = XFS_M(dentry->d_sb);
686 xfs_sb_t *sbp = &mp->m_sb;
687 __uint64_t fakeinos, id;
688 xfs_extlen_t lsize;
689
690 statp->f_type = XFS_SB_MAGIC;
691 statp->f_namelen = MAXNAMELEN - 1;
692
693 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
694 statp->f_fsid.val[0] = (u32)id;
695 statp->f_fsid.val[1] = (u32)(id >> 32);
696
697 xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
698
699 spin_lock(&mp->m_sb_lock);
700 statp->f_bsize = sbp->sb_blocksize;
701 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
702 statp->f_blocks = sbp->sb_dblocks - lsize;
703 statp->f_bfree = statp->f_bavail =
704 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
705 fakeinos = statp->f_bfree << sbp->sb_inopblog;
706#if XFS_BIG_INUMS
707 fakeinos += mp->m_inoadd;
708#endif
709 statp->f_files =
710 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
711 if (mp->m_maxicount)
712#if XFS_BIG_INUMS
713 if (!mp->m_inoadd)
714#endif
715 statp->f_files = min_t(typeof(statp->f_files),
716 statp->f_files,
717 mp->m_maxicount);
718 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
719 spin_unlock(&mp->m_sb_lock);
720
721 XFS_QM_DQSTATVFS(XFS_I(dentry->d_inode), statp);
722 return 0;
1da177e4
LT
723}
724
725STATIC int
a50cd269 726xfs_fs_remount(
1da177e4
LT
727 struct super_block *sb,
728 int *flags,
729 char *options)
730{
745f6919 731 struct xfs_mount *mp = XFS_M(sb);
764d1f89 732 struct xfs_mount_args *args = xfs_args_allocate(sb, 0);
1da177e4
LT
733 int error;
734
745f6919 735 error = xfs_parseargs(mp, options, args, 1);
1da177e4 736 if (!error)
745f6919 737 error = xfs_mntupdate(mp, flags, args);
1da177e4
LT
738 kmem_free(args, sizeof(*args));
739 return -error;
740}
741
9909c4aa
CH
742/*
743 * Second stage of a freeze. The data is already frozen so we only
744 * need to take care of themetadata. Once that's done write a dummy
745 * record to dirty the log in case of a crash while frozen.
746 */
1da177e4 747STATIC void
a50cd269 748xfs_fs_lockfs(
1da177e4
LT
749 struct super_block *sb)
750{
9909c4aa
CH
751 struct xfs_mount *mp = XFS_M(sb);
752
753 xfs_attr_quiesce(mp);
754 xfs_fs_log_dummy(mp);
1da177e4
LT
755}
756
757STATIC int
a50cd269 758xfs_fs_show_options(
1da177e4
LT
759 struct seq_file *m,
760 struct vfsmount *mnt)
761{
745f6919 762 return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
1da177e4
LT
763}
764
ee34807a 765STATIC int
a50cd269 766xfs_fs_quotasync(
ee34807a
NS
767 struct super_block *sb,
768 int type)
769{
b09cc771 770 return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XQUOTASYNC, 0, NULL);
ee34807a
NS
771}
772
1da177e4 773STATIC int
a50cd269 774xfs_fs_getxstate(
1da177e4
LT
775 struct super_block *sb,
776 struct fs_quota_stat *fqs)
777{
b09cc771 778 return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
1da177e4
LT
779}
780
781STATIC int
a50cd269 782xfs_fs_setxstate(
1da177e4
LT
783 struct super_block *sb,
784 unsigned int flags,
785 int op)
786{
b09cc771 787 return -XFS_QM_QUOTACTL(XFS_M(sb), op, 0, (caddr_t)&flags);
1da177e4
LT
788}
789
790STATIC int
a50cd269 791xfs_fs_getxquota(
1da177e4
LT
792 struct super_block *sb,
793 int type,
794 qid_t id,
795 struct fs_disk_quota *fdq)
796{
b09cc771 797 return -XFS_QM_QUOTACTL(XFS_M(sb),
b83bd138
NS
798 (type == USRQUOTA) ? Q_XGETQUOTA :
799 ((type == GRPQUOTA) ? Q_XGETGQUOTA :
800 Q_XGETPQUOTA), id, (caddr_t)fdq);
1da177e4
LT
801}
802
803STATIC int
a50cd269 804xfs_fs_setxquota(
1da177e4
LT
805 struct super_block *sb,
806 int type,
807 qid_t id,
808 struct fs_disk_quota *fdq)
809{
b09cc771 810 return -XFS_QM_QUOTACTL(XFS_M(sb),
b83bd138
NS
811 (type == USRQUOTA) ? Q_XSETQLIM :
812 ((type == GRPQUOTA) ? Q_XSETGQLIM :
813 Q_XSETPQLIM), id, (caddr_t)fdq);
1da177e4
LT
814}
815
816STATIC int
a50cd269 817xfs_fs_fill_super(
1da177e4
LT
818 struct super_block *sb,
819 void *data,
820 int silent)
821{
0a74cd19 822 struct inode *rootvp;
745f6919 823 struct xfs_mount *mp = NULL;
764d1f89 824 struct xfs_mount_args *args = xfs_args_allocate(sb, silent);
b83bd138 825 int error;
1da177e4 826
745f6919 827 mp = xfs_mount_init();
1da177e4 828
74394496
CH
829 INIT_LIST_HEAD(&mp->m_sync_list);
830 spin_lock_init(&mp->m_sync_lock);
831 init_waitqueue_head(&mp->m_wait_single_sync_task);
832
b267ce99
CH
833 mp->m_super = sb;
834 sb->s_fs_info = mp;
1da177e4 835
bd186aa9
CH
836 if (sb->s_flags & MS_RDONLY)
837 mp->m_flags |= XFS_MOUNT_RDONLY;
838
745f6919
CH
839 error = xfs_parseargs(mp, (char *)data, args, 0);
840 if (error)
1da177e4 841 goto fail_vfsop;
1da177e4
LT
842
843 sb_min_blocksize(sb, BBSIZE);
a50cd269 844 sb->s_export_op = &xfs_export_operations;
a50cd269
NS
845 sb->s_qcop = &xfs_quotactl_operations;
846 sb->s_op = &xfs_super_operations;
1da177e4 847
745f6919
CH
848 error = xfs_mount(mp, args, NULL);
849 if (error)
1da177e4 850 goto fail_vfsop;
1da177e4 851
1da177e4 852 sb->s_dirt = 1;
4ca488eb
CH
853 sb->s_magic = XFS_SB_MAGIC;
854 sb->s_blocksize = mp->m_sb.sb_blocksize;
855 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1da177e4
LT
856 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
857 sb->s_time_gran = 1;
858 set_posix_acl_flag(sb);
859
745f6919 860 error = xfs_root(mp, &rootvp);
1da177e4
LT
861 if (error)
862 goto fail_unmount;
863
ec86dc02 864 sb->s_root = d_alloc_root(vn_to_inode(rootvp));
1da177e4
LT
865 if (!sb->s_root) {
866 error = ENOMEM;
867 goto fail_vnrele;
868 }
869 if (is_bad_inode(sb->s_root->d_inode)) {
870 error = EINVAL;
871 goto fail_vnrele;
872 }
74394496
CH
873
874 mp->m_sync_work.w_syncer = xfs_sync_worker;
875 mp->m_sync_work.w_mount = mp;
876 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
877 if (IS_ERR(mp->m_sync_task)) {
878 error = -PTR_ERR(mp->m_sync_task);
1da177e4 879 goto fail_vnrele;
74394496
CH
880 }
881
cf441eeb 882 xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
1da177e4
LT
883
884 kmem_free(args, sizeof(*args));
885 return 0;
886
887fail_vnrele:
888 if (sb->s_root) {
889 dput(sb->s_root);
890 sb->s_root = NULL;
891 } else {
892 VN_RELE(rootvp);
893 }
894
895fail_unmount:
745f6919 896 xfs_unmount(mp, 0, NULL);
1da177e4
LT
897
898fail_vfsop:
1da177e4
LT
899 kmem_free(args, sizeof(*args));
900 return -error;
901}
902
454e2398 903STATIC int
a50cd269 904xfs_fs_get_sb(
1da177e4
LT
905 struct file_system_type *fs_type,
906 int flags,
907 const char *dev_name,
454e2398
DH
908 void *data,
909 struct vfsmount *mnt)
1da177e4 910{
454e2398
DH
911 return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
912 mnt);
a50cd269
NS
913}
914
7989cb8e 915static struct super_operations xfs_super_operations = {
a50cd269
NS
916 .alloc_inode = xfs_fs_alloc_inode,
917 .destroy_inode = xfs_fs_destroy_inode,
918 .write_inode = xfs_fs_write_inode,
919 .clear_inode = xfs_fs_clear_inode,
920 .put_super = xfs_fs_put_super,
921 .write_super = xfs_fs_write_super,
922 .sync_fs = xfs_fs_sync_super,
923 .write_super_lockfs = xfs_fs_lockfs,
924 .statfs = xfs_fs_statfs,
925 .remount_fs = xfs_fs_remount,
926 .show_options = xfs_fs_show_options,
1da177e4
LT
927};
928
7989cb8e 929static struct quotactl_ops xfs_quotactl_operations = {
a50cd269
NS
930 .quota_sync = xfs_fs_quotasync,
931 .get_xstate = xfs_fs_getxstate,
932 .set_xstate = xfs_fs_setxstate,
933 .get_xquota = xfs_fs_getxquota,
934 .set_xquota = xfs_fs_setxquota,
1da177e4
LT
935};
936
5085b607 937static struct file_system_type xfs_fs_type = {
1da177e4
LT
938 .owner = THIS_MODULE,
939 .name = "xfs",
a50cd269 940 .get_sb = xfs_fs_get_sb,
1da177e4
LT
941 .kill_sb = kill_block_super,
942 .fs_flags = FS_REQUIRES_DEV,
943};
944
945
946STATIC int __init
947init_xfs_fs( void )
948{
949 int error;
1da177e4
LT
950 static char message[] __initdata = KERN_INFO \
951 XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
952
953 printk(message);
954
1da177e4
LT
955 ktrace_init(64);
956
8758280f 957 error = xfs_init_zones();
1da177e4 958 if (error < 0)
0829c360 959 goto undo_zones;
1da177e4 960
ce8e922c 961 error = xfs_buf_init();
1da177e4 962 if (error < 0)
ce8e922c 963 goto undo_buffers;
1da177e4
LT
964
965 vn_init();
966 xfs_init();
967 uuid_init();
968 vfs_initquota();
969
970 error = register_filesystem(&xfs_fs_type);
971 if (error)
972 goto undo_register;
1da177e4
LT
973 return 0;
974
975undo_register:
ce8e922c 976 xfs_buf_terminate();
1da177e4 977
ce8e922c 978undo_buffers:
8758280f 979 xfs_destroy_zones();
1da177e4 980
0829c360 981undo_zones:
1da177e4
LT
982 return error;
983}
984
985STATIC void __exit
986exit_xfs_fs( void )
987{
988 vfs_exitquota();
1da177e4
LT
989 unregister_filesystem(&xfs_fs_type);
990 xfs_cleanup();
ce8e922c 991 xfs_buf_terminate();
8758280f 992 xfs_destroy_zones();
1da177e4
LT
993 ktrace_uninit();
994}
995
996module_init(init_xfs_fs);
997module_exit(exit_xfs_fs);
998
999MODULE_AUTHOR("Silicon Graphics, Inc.");
1000MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1001MODULE_LICENSE("GPL");