2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
49 #include "xfs_buf_item.h"
50 #include "xfs_utils.h"
51 #include "xfs_version.h"
53 #include <linux/namei.h>
54 #include <linux/init.h>
55 #include <linux/mount.h>
56 #include <linux/mempool.h>
57 #include <linux/writeback.h>
58 #include <linux/kthread.h>
60 STATIC
struct quotactl_ops xfs_quotactl_operations
;
61 STATIC
struct super_operations xfs_super_operations
;
62 STATIC kmem_zone_t
*xfs_vnode_zone
;
63 STATIC kmem_zone_t
*xfs_ioend_zone
;
64 mempool_t
*xfs_ioend_pool
;
66 STATIC
struct xfs_mount_args
*
68 struct super_block
*sb
,
71 struct xfs_mount_args
*args
;
73 args
= kmem_zalloc(sizeof(struct xfs_mount_args
), KM_SLEEP
);
74 args
->logbufs
= args
->logbufsize
= -1;
75 strncpy(args
->fsname
, sb
->s_id
, MAXNAMELEN
);
77 /* Copy the already-parsed mount(2) flags we're interested in */
78 if (sb
->s_flags
& MS_DIRSYNC
)
79 args
->flags
|= XFSMNT_DIRSYNC
;
80 if (sb
->s_flags
& MS_SYNCHRONOUS
)
81 args
->flags
|= XFSMNT_WSYNC
;
83 args
->flags
|= XFSMNT_QUIET
;
84 args
->flags
|= XFSMNT_32BITINODES
;
91 unsigned int blockshift
)
93 unsigned int pagefactor
= 1;
94 unsigned int bitshift
= BITS_PER_LONG
- 1;
96 /* Figure out maximum filesize, on Linux this can depend on
97 * the filesystem blocksize (on 32 bit platforms).
98 * __block_prepare_write does this in an [unsigned] long...
99 * page->index << (PAGE_CACHE_SHIFT - bbits)
100 * So, for page sized blocks (4K on 32 bit platforms),
101 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
102 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
103 * but for smaller blocksizes it is less (bbits = log2 bsize).
104 * Note1: get_block_t takes a long (implicit cast from above)
105 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
106 * can optionally convert the [unsigned] long from above into
107 * an [unsigned] long long.
110 #if BITS_PER_LONG == 32
111 # if defined(CONFIG_LBD)
112 ASSERT(sizeof(sector_t
) == 8);
113 pagefactor
= PAGE_CACHE_SIZE
;
114 bitshift
= BITS_PER_LONG
;
116 pagefactor
= PAGE_CACHE_SIZE
>> (PAGE_CACHE_SHIFT
- blockshift
);
120 return (((__uint64_t
)pagefactor
) << bitshift
) - 1;
123 STATIC __inline__
void
127 switch (inode
->i_mode
& S_IFMT
) {
129 inode
->i_op
= &xfs_inode_operations
;
130 inode
->i_fop
= &xfs_file_operations
;
131 inode
->i_mapping
->a_ops
= &xfs_address_space_operations
;
134 inode
->i_op
= &xfs_dir_inode_operations
;
135 inode
->i_fop
= &xfs_dir_file_operations
;
138 inode
->i_op
= &xfs_symlink_inode_operations
;
140 inode
->i_mapping
->a_ops
= &xfs_address_space_operations
;
143 inode
->i_op
= &xfs_inode_operations
;
144 init_special_inode(inode
, inode
->i_mode
, inode
->i_rdev
);
149 STATIC __inline__
void
150 xfs_revalidate_inode(
155 struct inode
*inode
= vn_to_inode(vp
);
157 inode
->i_mode
= ip
->i_d
.di_mode
;
158 inode
->i_nlink
= ip
->i_d
.di_nlink
;
159 inode
->i_uid
= ip
->i_d
.di_uid
;
160 inode
->i_gid
= ip
->i_d
.di_gid
;
162 switch (inode
->i_mode
& S_IFMT
) {
166 MKDEV(sysv_major(ip
->i_df
.if_u2
.if_rdev
) & 0x1ff,
167 sysv_minor(ip
->i_df
.if_u2
.if_rdev
));
174 inode
->i_blksize
= xfs_preferred_iosize(mp
);
175 inode
->i_generation
= ip
->i_d
.di_gen
;
176 i_size_write(inode
, ip
->i_d
.di_size
);
178 XFS_FSB_TO_BB(mp
, ip
->i_d
.di_nblocks
+ ip
->i_delayed_blks
);
179 inode
->i_atime
.tv_sec
= ip
->i_d
.di_atime
.t_sec
;
180 inode
->i_atime
.tv_nsec
= ip
->i_d
.di_atime
.t_nsec
;
181 inode
->i_mtime
.tv_sec
= ip
->i_d
.di_mtime
.t_sec
;
182 inode
->i_mtime
.tv_nsec
= ip
->i_d
.di_mtime
.t_nsec
;
183 inode
->i_ctime
.tv_sec
= ip
->i_d
.di_ctime
.t_sec
;
184 inode
->i_ctime
.tv_nsec
= ip
->i_d
.di_ctime
.t_nsec
;
185 if (ip
->i_d
.di_flags
& XFS_DIFLAG_IMMUTABLE
)
186 inode
->i_flags
|= S_IMMUTABLE
;
188 inode
->i_flags
&= ~S_IMMUTABLE
;
189 if (ip
->i_d
.di_flags
& XFS_DIFLAG_APPEND
)
190 inode
->i_flags
|= S_APPEND
;
192 inode
->i_flags
&= ~S_APPEND
;
193 if (ip
->i_d
.di_flags
& XFS_DIFLAG_SYNC
)
194 inode
->i_flags
|= S_SYNC
;
196 inode
->i_flags
&= ~S_SYNC
;
197 if (ip
->i_d
.di_flags
& XFS_DIFLAG_NOATIME
)
198 inode
->i_flags
|= S_NOATIME
;
200 inode
->i_flags
&= ~S_NOATIME
;
201 vp
->v_flag
&= ~VMODIFIED
;
205 xfs_initialize_vnode(
208 bhv_desc_t
*inode_bhv
,
211 xfs_inode_t
*ip
= XFS_BHVTOI(inode_bhv
);
212 struct inode
*inode
= vn_to_inode(vp
);
214 if (!inode_bhv
->bd_vobj
) {
215 vp
->v_vfsp
= bhvtovfs(bdp
);
216 bhv_desc_init(inode_bhv
, ip
, vp
, &xfs_vnodeops
);
217 bhv_insert(VN_BHV_HEAD(vp
), inode_bhv
);
221 * We need to set the ops vectors, and unlock the inode, but if
222 * we have been called during the new inode create process, it is
223 * too early to fill in the Linux inode. We will get called a
224 * second time once the inode is properly set up, and then we can
227 if (ip
->i_d
.di_mode
!= 0 && unlock
&& (inode
->i_state
& I_NEW
)) {
228 xfs_revalidate_inode(XFS_BHVTOM(bdp
), vp
, ip
);
229 xfs_set_inodeops(inode
);
231 ip
->i_flags
&= ~XFS_INEW
;
234 unlock_new_inode(inode
);
242 struct block_device
**bdevp
)
246 *bdevp
= open_bdev_excl(name
, 0, mp
);
247 if (IS_ERR(*bdevp
)) {
248 error
= PTR_ERR(*bdevp
);
249 printk("XFS: Invalid device [%s], error=%d\n", name
, error
);
257 struct block_device
*bdev
)
260 close_bdev_excl(bdev
);
264 * Try to write out the superblock using barriers.
270 xfs_buf_t
*sbp
= xfs_getsb(mp
, 0);
275 XFS_BUF_UNDELAYWRITE(sbp
);
277 XFS_BUF_UNASYNC(sbp
);
278 XFS_BUF_ORDERED(sbp
);
281 error
= xfs_iowait(sbp
);
284 * Clear all the flags we set and possible error state in the
285 * buffer. We only did the write to try out whether barriers
286 * worked and shouldn't leave any traces in the superblock
290 XFS_BUF_ERROR(sbp
, 0);
291 XFS_BUF_UNORDERED(sbp
);
298 xfs_mountfs_check_barriers(xfs_mount_t
*mp
)
302 if (mp
->m_logdev_targp
!= mp
->m_ddev_targp
) {
303 xfs_fs_cmn_err(CE_NOTE
, mp
,
304 "Disabling barriers, not supported with external log device");
305 mp
->m_flags
&= ~XFS_MOUNT_BARRIER
;
309 if (mp
->m_ddev_targp
->bt_bdev
->bd_disk
->queue
->ordered
==
310 QUEUE_ORDERED_NONE
) {
311 xfs_fs_cmn_err(CE_NOTE
, mp
,
312 "Disabling barriers, not supported by the underlying device");
313 mp
->m_flags
&= ~XFS_MOUNT_BARRIER
;
317 error
= xfs_barrier_test(mp
);
319 xfs_fs_cmn_err(CE_NOTE
, mp
,
320 "Disabling barriers, trial barrier write failed");
321 mp
->m_flags
&= ~XFS_MOUNT_BARRIER
;
327 xfs_blkdev_issue_flush(
328 xfs_buftarg_t
*buftarg
)
330 blkdev_issue_flush(buftarg
->bt_bdev
, NULL
);
333 STATIC
struct inode
*
335 struct super_block
*sb
)
339 vp
= kmem_zone_alloc(xfs_vnode_zone
, KM_SLEEP
);
342 return vn_to_inode(vp
);
346 xfs_fs_destroy_inode(
349 kmem_zone_free(xfs_vnode_zone
, vn_from_inode(inode
));
353 xfs_fs_inode_init_once(
358 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) ==
359 SLAB_CTOR_CONSTRUCTOR
)
360 inode_init_once(vn_to_inode((bhv_vnode_t
*)vnode
));
366 xfs_vnode_zone
= kmem_zone_init_flags(sizeof(bhv_vnode_t
), "xfs_vnode",
367 KM_ZONE_HWALIGN
| KM_ZONE_RECLAIM
|
369 xfs_fs_inode_init_once
);
373 xfs_ioend_zone
= kmem_zone_init(sizeof(xfs_ioend_t
), "xfs_ioend");
375 goto out_destroy_vnode_zone
;
377 xfs_ioend_pool
= mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE
,
380 goto out_free_ioend_zone
;
384 kmem_zone_destroy(xfs_ioend_zone
);
385 out_destroy_vnode_zone
:
386 kmem_zone_destroy(xfs_vnode_zone
);
392 xfs_destroy_zones(void)
394 mempool_destroy(xfs_ioend_pool
);
395 kmem_zone_destroy(xfs_vnode_zone
);
396 kmem_zone_destroy(xfs_ioend_zone
);
400 * Attempt to flush the inode, this will actually fail
401 * if the inode is pinned, but we dirty the inode again
402 * at the point when it is unpinned after a log write,
403 * since this is when the inode itself becomes flushable.
410 bhv_vnode_t
*vp
= vn_from_inode(inode
);
411 int error
= 0, flags
= FLUSH_INODE
;
414 vn_trace_entry(vp
, __FUNCTION__
, (inst_t
*)__return_address
);
417 error
= bhv_vop_iflush(vp
, flags
);
419 error
= sync
? bhv_vop_iflush(vp
, flags
| FLUSH_LOG
) : 0;
428 bhv_vnode_t
*vp
= vn_from_inode(inode
);
430 vn_trace_entry(vp
, __FUNCTION__
, (inst_t
*)__return_address
);
432 XFS_STATS_INC(vn_rele
);
433 XFS_STATS_INC(vn_remove
);
434 XFS_STATS_INC(vn_reclaim
);
435 XFS_STATS_DEC(vn_active
);
438 * This can happen because xfs_iget_core calls xfs_idestroy if we
439 * find an inode with di_mode == 0 but without IGET_CREATE set.
442 bhv_vop_inactive(vp
, NULL
);
445 vp
->v_flag
&= ~VMODIFIED
;
449 if (bhv_vop_reclaim(vp
))
450 panic("%s: cannot reclaim 0x%p\n", __FUNCTION__
, vp
);
452 ASSERT(VNHEAD(vp
) == NULL
);
454 #ifdef XFS_VNODE_TRACE
455 ktrace_free(vp
->v_trace
);
460 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
461 * Doing this has two advantages:
462 * - It saves on stack space, which is tight in certain situations
463 * - It can be used (with care) as a mechanism to avoid deadlocks.
464 * Flushing while allocating in a full filesystem requires both.
467 xfs_syncd_queue_work(
470 void (*syncer
)(bhv_vfs_t
*, void *))
472 struct bhv_vfs_sync_work
*work
;
474 work
= kmem_alloc(sizeof(struct bhv_vfs_sync_work
), KM_SLEEP
);
475 INIT_LIST_HEAD(&work
->w_list
);
476 work
->w_syncer
= syncer
;
479 spin_lock(&vfs
->vfs_sync_lock
);
480 list_add_tail(&work
->w_list
, &vfs
->vfs_sync_list
);
481 spin_unlock(&vfs
->vfs_sync_lock
);
482 wake_up_process(vfs
->vfs_sync_task
);
486 * Flush delayed allocate data, attempting to free up reserved space
487 * from existing allocations. At this point a new allocation attempt
488 * has failed with ENOSPC and we are in the process of scratching our
489 * heads, looking about for more room...
492 xfs_flush_inode_work(
496 filemap_flush(((struct inode
*)inode
)->i_mapping
);
497 iput((struct inode
*)inode
);
504 struct inode
*inode
= vn_to_inode(XFS_ITOV(ip
));
505 struct bhv_vfs
*vfs
= XFS_MTOVFS(ip
->i_mount
);
508 xfs_syncd_queue_work(vfs
, inode
, xfs_flush_inode_work
);
509 delay(msecs_to_jiffies(500));
513 * This is the "bigger hammer" version of xfs_flush_inode_work...
514 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
517 xfs_flush_device_work(
521 sync_blockdev(vfs
->vfs_super
->s_bdev
);
522 iput((struct inode
*)inode
);
529 struct inode
*inode
= vn_to_inode(XFS_ITOV(ip
));
530 struct bhv_vfs
*vfs
= XFS_MTOVFS(ip
->i_mount
);
533 xfs_syncd_queue_work(vfs
, inode
, xfs_flush_device_work
);
534 delay(msecs_to_jiffies(500));
535 xfs_log_force(ip
->i_mount
, (xfs_lsn_t
)0, XFS_LOG_FORCE
|XFS_LOG_SYNC
);
545 if (!(vfsp
->vfs_flag
& VFS_RDONLY
))
546 error
= bhv_vfs_sync(vfsp
, SYNC_FSDATA
| SYNC_BDFLUSH
| \
547 SYNC_ATTR
| SYNC_REFCACHE
, NULL
);
548 vfsp
->vfs_sync_seq
++;
550 wake_up(&vfsp
->vfs_wait_single_sync_task
);
558 bhv_vfs_t
*vfsp
= (bhv_vfs_t
*) arg
;
559 bhv_vfs_sync_work_t
*work
, *n
;
562 timeleft
= xfs_syncd_centisecs
* msecs_to_jiffies(10);
564 timeleft
= schedule_timeout_interruptible(timeleft
);
567 if (kthread_should_stop() && list_empty(&vfsp
->vfs_sync_list
))
570 spin_lock(&vfsp
->vfs_sync_lock
);
572 * We can get woken by laptop mode, to do a sync -
573 * that's the (only!) case where the list would be
574 * empty with time remaining.
576 if (!timeleft
|| list_empty(&vfsp
->vfs_sync_list
)) {
578 timeleft
= xfs_syncd_centisecs
*
579 msecs_to_jiffies(10);
580 INIT_LIST_HEAD(&vfsp
->vfs_sync_work
.w_list
);
581 list_add_tail(&vfsp
->vfs_sync_work
.w_list
,
582 &vfsp
->vfs_sync_list
);
584 list_for_each_entry_safe(work
, n
, &vfsp
->vfs_sync_list
, w_list
)
585 list_move(&work
->w_list
, &tmp
);
586 spin_unlock(&vfsp
->vfs_sync_lock
);
588 list_for_each_entry_safe(work
, n
, &tmp
, w_list
) {
589 (*work
->w_syncer
)(vfsp
, work
->w_data
);
590 list_del(&work
->w_list
);
591 if (work
== &vfsp
->vfs_sync_work
)
593 kmem_free(work
, sizeof(struct bhv_vfs_sync_work
));
604 vfsp
->vfs_sync_work
.w_syncer
= vfs_sync_worker
;
605 vfsp
->vfs_sync_work
.w_vfs
= vfsp
;
606 vfsp
->vfs_sync_task
= kthread_run(xfssyncd
, vfsp
, "xfssyncd");
607 if (IS_ERR(vfsp
->vfs_sync_task
))
608 return -PTR_ERR(vfsp
->vfs_sync_task
);
616 kthread_stop(vfsp
->vfs_sync_task
);
621 struct super_block
*sb
)
623 bhv_vfs_t
*vfsp
= vfs_from_sb(sb
);
626 xfs_fs_stop_syncd(vfsp
);
627 bhv_vfs_sync(vfsp
, SYNC_ATTR
| SYNC_DELWRI
, NULL
);
628 error
= bhv_vfs_unmount(vfsp
, 0, NULL
);
630 printk("XFS: unmount got error=%d\n", error
);
631 printk("%s: vfs=0x%p left dangling!\n", __FUNCTION__
, vfsp
);
633 vfs_deallocate(vfsp
);
639 struct super_block
*sb
)
641 if (!(sb
->s_flags
& MS_RDONLY
))
642 bhv_vfs_sync(vfs_from_sb(sb
), SYNC_FSDATA
, NULL
);
648 struct super_block
*sb
,
651 bhv_vfs_t
*vfsp
= vfs_from_sb(sb
);
655 if (unlikely(sb
->s_frozen
== SB_FREEZE_WRITE
))
656 flags
= SYNC_QUIESCE
;
658 flags
= SYNC_FSDATA
| (wait
? SYNC_WAIT
: 0);
660 error
= bhv_vfs_sync(vfsp
, flags
, NULL
);
663 if (unlikely(laptop_mode
)) {
664 int prev_sync_seq
= vfsp
->vfs_sync_seq
;
667 * The disk must be active because we're syncing.
668 * We schedule xfssyncd now (now that the disk is
669 * active) instead of later (when it might not be).
671 wake_up_process(vfsp
->vfs_sync_task
);
673 * We have to wait for the sync iteration to complete.
674 * If we don't, the disk activity caused by the sync
675 * will come after the sync is completed, and that
676 * triggers another sync from laptop mode.
678 wait_event(vfsp
->vfs_wait_single_sync_task
,
679 vfsp
->vfs_sync_seq
!= prev_sync_seq
);
687 struct dentry
*dentry
,
688 struct kstatfs
*statp
)
690 return -bhv_vfs_statvfs(vfs_from_sb(dentry
->d_sb
), statp
, NULL
);
695 struct super_block
*sb
,
699 bhv_vfs_t
*vfsp
= vfs_from_sb(sb
);
700 struct xfs_mount_args
*args
= xfs_args_allocate(sb
, 0);
703 error
= bhv_vfs_parseargs(vfsp
, options
, args
, 1);
705 error
= bhv_vfs_mntupdate(vfsp
, flags
, args
);
706 kmem_free(args
, sizeof(*args
));
712 struct super_block
*sb
)
714 bhv_vfs_freeze(vfs_from_sb(sb
));
720 struct vfsmount
*mnt
)
722 return -bhv_vfs_showargs(vfs_from_sb(mnt
->mnt_sb
), m
);
727 struct super_block
*sb
,
730 return -bhv_vfs_quotactl(vfs_from_sb(sb
), Q_XQUOTASYNC
, 0, NULL
);
735 struct super_block
*sb
,
736 struct fs_quota_stat
*fqs
)
738 return -bhv_vfs_quotactl(vfs_from_sb(sb
), Q_XGETQSTAT
, 0, (caddr_t
)fqs
);
743 struct super_block
*sb
,
747 return -bhv_vfs_quotactl(vfs_from_sb(sb
), op
, 0, (caddr_t
)&flags
);
752 struct super_block
*sb
,
755 struct fs_disk_quota
*fdq
)
757 return -bhv_vfs_quotactl(vfs_from_sb(sb
),
758 (type
== USRQUOTA
) ? Q_XGETQUOTA
:
759 ((type
== GRPQUOTA
) ? Q_XGETGQUOTA
:
760 Q_XGETPQUOTA
), id
, (caddr_t
)fdq
);
765 struct super_block
*sb
,
768 struct fs_disk_quota
*fdq
)
770 return -bhv_vfs_quotactl(vfs_from_sb(sb
),
771 (type
== USRQUOTA
) ? Q_XSETQLIM
:
772 ((type
== GRPQUOTA
) ? Q_XSETGQLIM
:
773 Q_XSETPQLIM
), id
, (caddr_t
)fdq
);
778 struct super_block
*sb
,
782 struct bhv_vnode
*rootvp
;
783 struct bhv_vfs
*vfsp
= vfs_allocate(sb
);
784 struct xfs_mount_args
*args
= xfs_args_allocate(sb
, silent
);
785 struct kstatfs statvfs
;
788 bhv_insert_all_vfsops(vfsp
);
790 error
= bhv_vfs_parseargs(vfsp
, (char *)data
, args
, 0);
792 bhv_remove_all_vfsops(vfsp
, 1);
796 sb_min_blocksize(sb
, BBSIZE
);
797 sb
->s_export_op
= &xfs_export_operations
;
798 sb
->s_qcop
= &xfs_quotactl_operations
;
799 sb
->s_op
= &xfs_super_operations
;
801 error
= bhv_vfs_mount(vfsp
, args
, NULL
);
803 bhv_remove_all_vfsops(vfsp
, 1);
807 error
= bhv_vfs_statvfs(vfsp
, &statvfs
, NULL
);
812 sb
->s_magic
= statvfs
.f_type
;
813 sb
->s_blocksize
= statvfs
.f_bsize
;
814 sb
->s_blocksize_bits
= ffs(statvfs
.f_bsize
) - 1;
815 sb
->s_maxbytes
= xfs_max_file_offset(sb
->s_blocksize_bits
);
817 set_posix_acl_flag(sb
);
819 error
= bhv_vfs_root(vfsp
, &rootvp
);
823 sb
->s_root
= d_alloc_root(vn_to_inode(rootvp
));
828 if (is_bad_inode(sb
->s_root
->d_inode
)) {
832 if ((error
= xfs_fs_start_syncd(vfsp
)))
834 vn_trace_exit(rootvp
, __FUNCTION__
, (inst_t
*)__return_address
);
836 kmem_free(args
, sizeof(*args
));
848 bhv_vfs_unmount(vfsp
, 0, NULL
);
851 vfs_deallocate(vfsp
);
852 kmem_free(args
, sizeof(*args
));
858 struct file_system_type
*fs_type
,
860 const char *dev_name
,
862 struct vfsmount
*mnt
)
864 return get_sb_bdev(fs_type
, flags
, dev_name
, data
, xfs_fs_fill_super
,
868 STATIC
struct super_operations xfs_super_operations
= {
869 .alloc_inode
= xfs_fs_alloc_inode
,
870 .destroy_inode
= xfs_fs_destroy_inode
,
871 .write_inode
= xfs_fs_write_inode
,
872 .clear_inode
= xfs_fs_clear_inode
,
873 .put_super
= xfs_fs_put_super
,
874 .write_super
= xfs_fs_write_super
,
875 .sync_fs
= xfs_fs_sync_super
,
876 .write_super_lockfs
= xfs_fs_lockfs
,
877 .statfs
= xfs_fs_statfs
,
878 .remount_fs
= xfs_fs_remount
,
879 .show_options
= xfs_fs_show_options
,
882 STATIC
struct quotactl_ops xfs_quotactl_operations
= {
883 .quota_sync
= xfs_fs_quotasync
,
884 .get_xstate
= xfs_fs_getxstate
,
885 .set_xstate
= xfs_fs_setxstate
,
886 .get_xquota
= xfs_fs_getxquota
,
887 .set_xquota
= xfs_fs_setxquota
,
890 STATIC
struct file_system_type xfs_fs_type
= {
891 .owner
= THIS_MODULE
,
893 .get_sb
= xfs_fs_get_sb
,
894 .kill_sb
= kill_block_super
,
895 .fs_flags
= FS_REQUIRES_DEV
,
904 static char message
[] __initdata
= KERN_INFO \
905 XFS_VERSION_STRING
" with " XFS_BUILD_OPTIONS
" enabled\n";
910 xfs_physmem
= si
.totalram
;
914 error
= xfs_init_zones();
918 error
= xfs_buf_init();
927 error
= register_filesystem(&xfs_fs_type
);
946 unregister_filesystem(&xfs_fs_type
);
953 module_init(init_xfs_fs
);
954 module_exit(exit_xfs_fs
);
956 MODULE_AUTHOR("Silicon Graphics, Inc.");
957 MODULE_DESCRIPTION(XFS_VERSION_STRING
" with " XFS_BUILD_OPTIONS
" enabled");
958 MODULE_LICENSE("GPL");