2 * Copyright (C) International Business Machines Corp., 2000-2004
3 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <linux/module.h>
22 #include <linux/parser.h>
23 #include <linux/completion.h>
24 #include <linux/vfs.h>
25 #include <linux/quotaops.h>
26 #include <linux/mount.h>
27 #include <linux/moduleparam.h>
28 #include <linux/kthread.h>
29 #include <linux/posix_acl.h>
30 #include <linux/buffer_head.h>
31 #include <linux/exportfs.h>
32 #include <linux/crc32.h>
33 #include <linux/slab.h>
34 #include <asm/uaccess.h>
35 #include <linux/seq_file.h>
36 #include <linux/blkdev.h>
38 #include "jfs_incore.h"
39 #include "jfs_filsys.h"
40 #include "jfs_inode.h"
41 #include "jfs_metapage.h"
42 #include "jfs_superblock.h"
46 #include "jfs_debug.h"
47 #include "jfs_xattr.h"
49 MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
50 MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
51 MODULE_LICENSE("GPL");
53 static struct kmem_cache
*jfs_inode_cachep
;
55 static const struct super_operations jfs_super_operations
;
56 static const struct export_operations jfs_export_operations
;
57 static struct file_system_type jfs_fs_type
;
59 #define MAX_COMMIT_THREADS 64
60 static int commit_threads
;
61 module_param(commit_threads
, int, 0);
62 MODULE_PARM_DESC(commit_threads
, "Number of commit threads");
64 static struct task_struct
*jfsCommitThread
[MAX_COMMIT_THREADS
];
65 struct task_struct
*jfsIOthread
;
66 struct task_struct
*jfsSyncThread
;
68 #ifdef CONFIG_JFS_DEBUG
69 int jfsloglevel
= JFS_LOGLEVEL_WARN
;
70 module_param(jfsloglevel
, int, 0644);
71 MODULE_PARM_DESC(jfsloglevel
, "Specify JFS loglevel (0, 1 or 2)");
74 static void jfs_handle_error(struct super_block
*sb
)
76 struct jfs_sb_info
*sbi
= JFS_SBI(sb
);
78 if (sb
->s_flags
& MS_RDONLY
)
81 updateSuper(sb
, FM_DIRTY
);
83 if (sbi
->flag
& JFS_ERR_PANIC
)
84 panic("JFS (device %s): panic forced after error\n",
86 else if (sbi
->flag
& JFS_ERR_REMOUNT_RO
) {
87 jfs_err("ERROR: (device %s): remounting filesystem as read-only\n",
89 sb
->s_flags
|= MS_RDONLY
;
92 /* nothing is done for continue beyond marking the superblock dirty */
95 void jfs_error(struct super_block
*sb
, const char *fmt
, ...)
105 pr_err("ERROR: (device %s): %pf: %pV\n",
106 sb
->s_id
, __builtin_return_address(0), &vaf
);
110 jfs_handle_error(sb
);
113 static struct inode
*jfs_alloc_inode(struct super_block
*sb
)
115 struct jfs_inode_info
*jfs_inode
;
117 jfs_inode
= kmem_cache_alloc(jfs_inode_cachep
, GFP_NOFS
);
120 return &jfs_inode
->vfs_inode
;
123 static void jfs_i_callback(struct rcu_head
*head
)
125 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
126 struct jfs_inode_info
*ji
= JFS_IP(inode
);
127 kmem_cache_free(jfs_inode_cachep
, ji
);
130 static void jfs_destroy_inode(struct inode
*inode
)
132 struct jfs_inode_info
*ji
= JFS_IP(inode
);
134 BUG_ON(!list_empty(&ji
->anon_inode_list
));
136 spin_lock_irq(&ji
->ag_lock
);
137 if (ji
->active_ag
!= -1) {
138 struct bmap
*bmap
= JFS_SBI(inode
->i_sb
)->bmap
;
139 atomic_dec(&bmap
->db_active
[ji
->active_ag
]);
142 spin_unlock_irq(&ji
->ag_lock
);
143 call_rcu(&inode
->i_rcu
, jfs_i_callback
);
146 static int jfs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
148 struct jfs_sb_info
*sbi
= JFS_SBI(dentry
->d_sb
);
150 struct inomap
*imap
= JFS_IP(sbi
->ipimap
)->i_imap
;
152 jfs_info("In jfs_statfs");
153 buf
->f_type
= JFS_SUPER_MAGIC
;
154 buf
->f_bsize
= sbi
->bsize
;
155 buf
->f_blocks
= sbi
->bmap
->db_mapsize
;
156 buf
->f_bfree
= sbi
->bmap
->db_nfree
;
157 buf
->f_bavail
= sbi
->bmap
->db_nfree
;
159 * If we really return the number of allocated & free inodes, some
160 * applications will fail because they won't see enough free inodes.
161 * We'll try to calculate some guess as to how many inodes we can
164 * buf->f_files = atomic_read(&imap->im_numinos);
165 * buf->f_ffree = atomic_read(&imap->im_numfree);
167 maxinodes
= min((s64
) atomic_read(&imap
->im_numinos
) +
168 ((sbi
->bmap
->db_nfree
>> imap
->im_l2nbperiext
)
169 << L2INOSPEREXT
), (s64
) 0xffffffffLL
);
170 buf
->f_files
= maxinodes
;
171 buf
->f_ffree
= maxinodes
- (atomic_read(&imap
->im_numinos
) -
172 atomic_read(&imap
->im_numfree
));
173 buf
->f_fsid
.val
[0] = (u32
)crc32_le(0, sbi
->uuid
, sizeof(sbi
->uuid
)/2);
174 buf
->f_fsid
.val
[1] = (u32
)crc32_le(0, sbi
->uuid
+ sizeof(sbi
->uuid
)/2,
175 sizeof(sbi
->uuid
)/2);
177 buf
->f_namelen
= JFS_NAME_MAX
;
181 static void jfs_put_super(struct super_block
*sb
)
183 struct jfs_sb_info
*sbi
= JFS_SBI(sb
);
186 jfs_info("In jfs_put_super");
188 dquot_disable(sb
, -1, DQUOT_USAGE_ENABLED
| DQUOT_LIMITS_ENABLED
);
192 jfs_err("jfs_umount failed with return code %d", rc
);
194 unload_nls(sbi
->nls_tab
);
196 truncate_inode_pages(sbi
->direct_inode
->i_mapping
, 0);
197 iput(sbi
->direct_inode
);
203 Opt_integrity
, Opt_nointegrity
, Opt_iocharset
, Opt_resize
,
204 Opt_resize_nosize
, Opt_errors
, Opt_ignore
, Opt_err
, Opt_quota
,
205 Opt_usrquota
, Opt_grpquota
, Opt_uid
, Opt_gid
, Opt_umask
,
206 Opt_discard
, Opt_nodiscard
, Opt_discard_minblk
209 static const match_table_t tokens
= {
210 {Opt_integrity
, "integrity"},
211 {Opt_nointegrity
, "nointegrity"},
212 {Opt_iocharset
, "iocharset=%s"},
213 {Opt_resize
, "resize=%u"},
214 {Opt_resize_nosize
, "resize"},
215 {Opt_errors
, "errors=%s"},
216 {Opt_ignore
, "noquota"},
217 {Opt_ignore
, "quota"},
218 {Opt_usrquota
, "usrquota"},
219 {Opt_grpquota
, "grpquota"},
222 {Opt_umask
, "umask=%u"},
223 {Opt_discard
, "discard"},
224 {Opt_nodiscard
, "nodiscard"},
225 {Opt_discard_minblk
, "discard=%u"},
229 static int parse_options(char *options
, struct super_block
*sb
, s64
*newLVSize
,
232 void *nls_map
= (void *)-1; /* -1: no change; NULL: none */
234 struct jfs_sb_info
*sbi
= JFS_SBI(sb
);
241 while ((p
= strsep(&options
, ",")) != NULL
) {
242 substring_t args
[MAX_OPT_ARGS
];
247 token
= match_token(p
, tokens
, args
);
250 *flag
&= ~JFS_NOINTEGRITY
;
252 case Opt_nointegrity
:
253 *flag
|= JFS_NOINTEGRITY
;
256 /* Silently ignore the quota options */
257 /* Don't do anything ;-) */
260 if (nls_map
&& nls_map
!= (void *) -1)
262 if (!strcmp(args
[0].from
, "none"))
265 nls_map
= load_nls(args
[0].from
);
267 pr_err("JFS: charset not found\n");
274 char *resize
= args
[0].from
;
275 int rc
= kstrtoll(resize
, 0, newLVSize
);
281 case Opt_resize_nosize
:
283 *newLVSize
= sb
->s_bdev
->bd_inode
->i_size
>>
284 sb
->s_blocksize_bits
;
286 pr_err("JFS: Cannot determine volume size\n");
291 char *errors
= args
[0].from
;
292 if (!errors
|| !*errors
)
294 if (!strcmp(errors
, "continue")) {
295 *flag
&= ~JFS_ERR_REMOUNT_RO
;
296 *flag
&= ~JFS_ERR_PANIC
;
297 *flag
|= JFS_ERR_CONTINUE
;
298 } else if (!strcmp(errors
, "remount-ro")) {
299 *flag
&= ~JFS_ERR_CONTINUE
;
300 *flag
&= ~JFS_ERR_PANIC
;
301 *flag
|= JFS_ERR_REMOUNT_RO
;
302 } else if (!strcmp(errors
, "panic")) {
303 *flag
&= ~JFS_ERR_CONTINUE
;
304 *flag
&= ~JFS_ERR_REMOUNT_RO
;
305 *flag
|= JFS_ERR_PANIC
;
307 pr_err("JFS: %s is an invalid error handler\n",
317 *flag
|= JFS_USRQUOTA
;
320 *flag
|= JFS_GRPQUOTA
;
326 pr_err("JFS: quota operations not supported\n");
331 char *uid
= args
[0].from
;
333 int rc
= kstrtouint(uid
, 0, &val
);
337 sbi
->uid
= make_kuid(current_user_ns(), val
);
338 if (!uid_valid(sbi
->uid
))
345 char *gid
= args
[0].from
;
347 int rc
= kstrtouint(gid
, 0, &val
);
351 sbi
->gid
= make_kgid(current_user_ns(), val
);
352 if (!gid_valid(sbi
->gid
))
359 char *umask
= args
[0].from
;
360 int rc
= kstrtouint(umask
, 8, &sbi
->umask
);
364 if (sbi
->umask
& ~0777) {
365 pr_err("JFS: Invalid value of umask\n");
373 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
374 /* if set to 1, even copying files will cause
376 * -> user has more control over the online trimming
378 sbi
->minblks_trim
= 64;
379 if (blk_queue_discard(q
))
380 *flag
|= JFS_DISCARD
;
382 pr_err("JFS: discard option not supported on device\n");
387 *flag
&= ~JFS_DISCARD
;
390 case Opt_discard_minblk
:
392 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
393 char *minblks_trim
= args
[0].from
;
395 if (blk_queue_discard(q
)) {
396 *flag
|= JFS_DISCARD
;
397 rc
= kstrtouint(minblks_trim
, 0,
402 pr_err("JFS: discard option not supported on device\n");
407 printk("jfs: Unrecognized mount option \"%s\" or missing value\n",
413 if (nls_map
!= (void *) -1) {
414 /* Discard old (if remount) */
415 unload_nls(sbi
->nls_tab
);
416 sbi
->nls_tab
= nls_map
;
421 if (nls_map
&& nls_map
!= (void *) -1)
426 static int jfs_remount(struct super_block
*sb
, int *flags
, char *data
)
430 int flag
= JFS_SBI(sb
)->flag
;
434 if (!parse_options(data
, sb
, &newLVSize
, &flag
))
438 if (sb
->s_flags
& MS_RDONLY
) {
439 pr_err("JFS: resize requires volume to be mounted read-write\n");
442 rc
= jfs_extendfs(sb
, newLVSize
, 0);
447 if ((sb
->s_flags
& MS_RDONLY
) && !(*flags
& MS_RDONLY
)) {
449 * Invalidate any previously read metadata. fsck may have
450 * changed the on-disk data since we mounted r/o
452 truncate_inode_pages(JFS_SBI(sb
)->direct_inode
->i_mapping
, 0);
454 JFS_SBI(sb
)->flag
= flag
;
455 ret
= jfs_mount_rw(sb
, 1);
457 /* mark the fs r/w for quota activity */
458 sb
->s_flags
&= ~MS_RDONLY
;
460 dquot_resume(sb
, -1);
463 if ((!(sb
->s_flags
& MS_RDONLY
)) && (*flags
& MS_RDONLY
)) {
464 rc
= dquot_suspend(sb
, -1);
467 rc
= jfs_umount_rw(sb
);
468 JFS_SBI(sb
)->flag
= flag
;
471 if ((JFS_SBI(sb
)->flag
& JFS_NOINTEGRITY
) != (flag
& JFS_NOINTEGRITY
))
472 if (!(sb
->s_flags
& MS_RDONLY
)) {
473 rc
= jfs_umount_rw(sb
);
477 JFS_SBI(sb
)->flag
= flag
;
478 ret
= jfs_mount_rw(sb
, 1);
481 JFS_SBI(sb
)->flag
= flag
;
486 static int jfs_fill_super(struct super_block
*sb
, void *data
, int silent
)
488 struct jfs_sb_info
*sbi
;
492 int flag
, ret
= -EINVAL
;
494 jfs_info("In jfs_read_super: s_flags=0x%lx", sb
->s_flags
);
496 if (!new_valid_dev(sb
->s_bdev
->bd_dev
))
499 sbi
= kzalloc(sizeof(struct jfs_sb_info
), GFP_KERNEL
);
504 sb
->s_max_links
= JFS_LINK_MAX
;
506 sbi
->uid
= INVALID_UID
;
507 sbi
->gid
= INVALID_GID
;
510 /* initialize the mount flag and determine the default error handler */
511 flag
= JFS_ERR_REMOUNT_RO
;
513 if (!parse_options((char *) data
, sb
, &newLVSize
, &flag
))
517 #ifdef CONFIG_JFS_POSIX_ACL
518 sb
->s_flags
|= MS_POSIXACL
;
522 pr_err("resize option for remount only\n");
527 * Initialize blocksize to 4K.
529 sb_set_blocksize(sb
, PSIZE
);
532 * Set method vectors.
534 sb
->s_op
= &jfs_super_operations
;
535 sb
->s_export_op
= &jfs_export_operations
;
536 sb
->s_xattr
= jfs_xattr_handlers
;
538 sb
->dq_op
= &dquot_operations
;
539 sb
->s_qcop
= &dquot_quotactl_ops
;
543 * Initialize direct-mapping inode/address-space
545 inode
= new_inode(sb
);
551 inode
->i_size
= sb
->s_bdev
->bd_inode
->i_size
;
552 inode
->i_mapping
->a_ops
= &jfs_metapage_aops
;
553 hlist_add_fake(&inode
->i_hash
);
554 mapping_set_gfp_mask(inode
->i_mapping
, GFP_NOFS
);
556 sbi
->direct_inode
= inode
;
561 jfs_err("jfs_mount failed w/return code = %d", rc
);
562 goto out_mount_failed
;
564 if (sb
->s_flags
& MS_RDONLY
)
567 rc
= jfs_mount_rw(sb
, 0);
570 jfs_err("jfs_mount_rw failed, return code = %d",
577 sb
->s_magic
= JFS_SUPER_MAGIC
;
579 if (sbi
->mntflag
& JFS_OS2
)
580 sb
->s_d_op
= &jfs_ci_dentry_operations
;
582 inode
= jfs_iget(sb
, ROOT_I
);
584 ret
= PTR_ERR(inode
);
587 sb
->s_root
= d_make_root(inode
);
591 /* logical blocks are represented by 40 bits in pxd_t, etc. */
592 sb
->s_maxbytes
= ((u64
) sb
->s_blocksize
) << 40;
593 #if BITS_PER_LONG == 32
595 * Page cache is indexed by long.
596 * I would use MAX_LFS_FILESIZE, but it's only half as big
598 sb
->s_maxbytes
= min(((u64
) PAGE_CACHE_SIZE
<< 32) - 1,
599 (u64
)sb
->s_maxbytes
);
605 jfs_err("jfs_read_super: get root dentry failed");
610 jfs_err("jfs_umount failed with return code %d", rc
);
612 filemap_write_and_wait(sbi
->direct_inode
->i_mapping
);
613 truncate_inode_pages(sbi
->direct_inode
->i_mapping
, 0);
614 make_bad_inode(sbi
->direct_inode
);
615 iput(sbi
->direct_inode
);
616 sbi
->direct_inode
= NULL
;
619 unload_nls(sbi
->nls_tab
);
625 static int jfs_freeze(struct super_block
*sb
)
627 struct jfs_sb_info
*sbi
= JFS_SBI(sb
);
628 struct jfs_log
*log
= sbi
->log
;
631 if (!(sb
->s_flags
& MS_RDONLY
)) {
633 rc
= lmLogShutdown(log
);
635 jfs_error(sb
, "lmLogShutdown failed\n");
637 /* let operations fail rather than hang */
642 rc
= updateSuper(sb
, FM_CLEAN
);
644 jfs_err("jfs_freeze: updateSuper failed\n");
646 * Don't fail here. Everything succeeded except
647 * marking the superblock clean, so there's really
648 * no harm in leaving it frozen for now.
655 static int jfs_unfreeze(struct super_block
*sb
)
657 struct jfs_sb_info
*sbi
= JFS_SBI(sb
);
658 struct jfs_log
*log
= sbi
->log
;
661 if (!(sb
->s_flags
& MS_RDONLY
)) {
662 rc
= updateSuper(sb
, FM_MOUNT
);
664 jfs_error(sb
, "updateSuper failed\n");
669 jfs_error(sb
, "lmLogInit failed\n");
676 static struct dentry
*jfs_do_mount(struct file_system_type
*fs_type
,
677 int flags
, const char *dev_name
, void *data
)
679 return mount_bdev(fs_type
, flags
, dev_name
, data
, jfs_fill_super
);
682 static int jfs_sync_fs(struct super_block
*sb
, int wait
)
684 struct jfs_log
*log
= JFS_SBI(sb
)->log
;
686 /* log == NULL indicates read-only mount */
689 * Write quota structures to quota file, sync_blockdev() will
690 * write them to disk later
692 dquot_writeback_dquots(sb
, -1);
693 jfs_flush_journal(log
, wait
);
700 static int jfs_show_options(struct seq_file
*seq
, struct dentry
*root
)
702 struct jfs_sb_info
*sbi
= JFS_SBI(root
->d_sb
);
704 if (uid_valid(sbi
->uid
))
705 seq_printf(seq
, ",uid=%d", from_kuid(&init_user_ns
, sbi
->uid
));
706 if (gid_valid(sbi
->gid
))
707 seq_printf(seq
, ",gid=%d", from_kgid(&init_user_ns
, sbi
->gid
));
708 if (sbi
->umask
!= -1)
709 seq_printf(seq
, ",umask=%03o", sbi
->umask
);
710 if (sbi
->flag
& JFS_NOINTEGRITY
)
711 seq_puts(seq
, ",nointegrity");
712 if (sbi
->flag
& JFS_DISCARD
)
713 seq_printf(seq
, ",discard=%u", sbi
->minblks_trim
);
715 seq_printf(seq
, ",iocharset=%s", sbi
->nls_tab
->charset
);
716 if (sbi
->flag
& JFS_ERR_CONTINUE
)
717 seq_printf(seq
, ",errors=continue");
718 if (sbi
->flag
& JFS_ERR_PANIC
)
719 seq_printf(seq
, ",errors=panic");
722 if (sbi
->flag
& JFS_USRQUOTA
)
723 seq_puts(seq
, ",usrquota");
725 if (sbi
->flag
& JFS_GRPQUOTA
)
726 seq_puts(seq
, ",grpquota");
734 /* Read data from quotafile - avoid pagecache and such because we cannot afford
735 * acquiring the locks... As quota files are never truncated and quota code
736 * itself serializes the operations (and no one else should touch the files)
737 * we don't have to be afraid of races */
738 static ssize_t
jfs_quota_read(struct super_block
*sb
, int type
, char *data
,
739 size_t len
, loff_t off
)
741 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
742 sector_t blk
= off
>> sb
->s_blocksize_bits
;
744 int offset
= off
& (sb
->s_blocksize
- 1);
747 struct buffer_head tmp_bh
;
748 struct buffer_head
*bh
;
749 loff_t i_size
= i_size_read(inode
);
753 if (off
+len
> i_size
)
757 tocopy
= sb
->s_blocksize
- offset
< toread
?
758 sb
->s_blocksize
- offset
: toread
;
761 tmp_bh
.b_size
= 1 << inode
->i_blkbits
;
762 err
= jfs_get_block(inode
, blk
, &tmp_bh
, 0);
765 if (!buffer_mapped(&tmp_bh
)) /* A hole? */
766 memset(data
, 0, tocopy
);
768 bh
= sb_bread(sb
, tmp_bh
.b_blocknr
);
771 memcpy(data
, bh
->b_data
+offset
, tocopy
);
782 /* Write to quotafile */
783 static ssize_t
jfs_quota_write(struct super_block
*sb
, int type
,
784 const char *data
, size_t len
, loff_t off
)
786 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
787 sector_t blk
= off
>> sb
->s_blocksize_bits
;
789 int offset
= off
& (sb
->s_blocksize
- 1);
791 size_t towrite
= len
;
792 struct buffer_head tmp_bh
;
793 struct buffer_head
*bh
;
795 mutex_lock(&inode
->i_mutex
);
796 while (towrite
> 0) {
797 tocopy
= sb
->s_blocksize
- offset
< towrite
?
798 sb
->s_blocksize
- offset
: towrite
;
801 tmp_bh
.b_size
= 1 << inode
->i_blkbits
;
802 err
= jfs_get_block(inode
, blk
, &tmp_bh
, 1);
805 if (offset
|| tocopy
!= sb
->s_blocksize
)
806 bh
= sb_bread(sb
, tmp_bh
.b_blocknr
);
808 bh
= sb_getblk(sb
, tmp_bh
.b_blocknr
);
814 memcpy(bh
->b_data
+offset
, data
, tocopy
);
815 flush_dcache_page(bh
->b_page
);
816 set_buffer_uptodate(bh
);
817 mark_buffer_dirty(bh
);
826 if (len
== towrite
) {
827 mutex_unlock(&inode
->i_mutex
);
830 if (inode
->i_size
< off
+len
-towrite
)
831 i_size_write(inode
, off
+len
-towrite
);
833 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
834 mark_inode_dirty(inode
);
835 mutex_unlock(&inode
->i_mutex
);
836 return len
- towrite
;
841 static const struct super_operations jfs_super_operations
= {
842 .alloc_inode
= jfs_alloc_inode
,
843 .destroy_inode
= jfs_destroy_inode
,
844 .dirty_inode
= jfs_dirty_inode
,
845 .write_inode
= jfs_write_inode
,
846 .evict_inode
= jfs_evict_inode
,
847 .put_super
= jfs_put_super
,
848 .sync_fs
= jfs_sync_fs
,
849 .freeze_fs
= jfs_freeze
,
850 .unfreeze_fs
= jfs_unfreeze
,
851 .statfs
= jfs_statfs
,
852 .remount_fs
= jfs_remount
,
853 .show_options
= jfs_show_options
,
855 .quota_read
= jfs_quota_read
,
856 .quota_write
= jfs_quota_write
,
860 static const struct export_operations jfs_export_operations
= {
861 .fh_to_dentry
= jfs_fh_to_dentry
,
862 .fh_to_parent
= jfs_fh_to_parent
,
863 .get_parent
= jfs_get_parent
,
866 static struct file_system_type jfs_fs_type
= {
867 .owner
= THIS_MODULE
,
869 .mount
= jfs_do_mount
,
870 .kill_sb
= kill_block_super
,
871 .fs_flags
= FS_REQUIRES_DEV
,
873 MODULE_ALIAS_FS("jfs");
875 static void init_once(void *foo
)
877 struct jfs_inode_info
*jfs_ip
= (struct jfs_inode_info
*) foo
;
879 memset(jfs_ip
, 0, sizeof(struct jfs_inode_info
));
880 INIT_LIST_HEAD(&jfs_ip
->anon_inode_list
);
881 init_rwsem(&jfs_ip
->rdwrlock
);
882 mutex_init(&jfs_ip
->commit_mutex
);
883 init_rwsem(&jfs_ip
->xattr_sem
);
884 spin_lock_init(&jfs_ip
->ag_lock
);
885 jfs_ip
->active_ag
= -1;
886 inode_init_once(&jfs_ip
->vfs_inode
);
889 static int __init
init_jfs_fs(void)
895 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info
), 0,
896 SLAB_RECLAIM_ACCOUNT
|SLAB_MEM_SPREAD
,
898 if (jfs_inode_cachep
== NULL
)
902 * Metapage initialization
904 rc
= metapage_init();
906 jfs_err("metapage_init failed w/rc = %d", rc
);
911 * Transaction Manager initialization
915 jfs_err("txInit failed w/rc = %d", rc
);
920 * I/O completion thread (endio)
922 jfsIOthread
= kthread_run(jfsIOWait
, NULL
, "jfsIO");
923 if (IS_ERR(jfsIOthread
)) {
924 rc
= PTR_ERR(jfsIOthread
);
925 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc
);
929 if (commit_threads
< 1)
930 commit_threads
= num_online_cpus();
931 if (commit_threads
> MAX_COMMIT_THREADS
)
932 commit_threads
= MAX_COMMIT_THREADS
;
934 for (i
= 0; i
< commit_threads
; i
++) {
935 jfsCommitThread
[i
] = kthread_run(jfs_lazycommit
, NULL
,
937 if (IS_ERR(jfsCommitThread
[i
])) {
938 rc
= PTR_ERR(jfsCommitThread
[i
]);
939 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc
);
941 goto kill_committask
;
945 jfsSyncThread
= kthread_run(jfs_sync
, NULL
, "jfsSync");
946 if (IS_ERR(jfsSyncThread
)) {
947 rc
= PTR_ERR(jfsSyncThread
);
948 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc
);
949 goto kill_committask
;
956 rc
= register_filesystem(&jfs_fs_type
);
963 kthread_stop(jfsSyncThread
);
965 for (i
= 0; i
< commit_threads
; i
++)
966 kthread_stop(jfsCommitThread
[i
]);
967 kthread_stop(jfsIOthread
);
973 kmem_cache_destroy(jfs_inode_cachep
);
977 static void __exit
exit_jfs_fs(void)
981 jfs_info("exit_jfs_fs called");
986 kthread_stop(jfsIOthread
);
987 for (i
= 0; i
< commit_threads
; i
++)
988 kthread_stop(jfsCommitThread
[i
]);
989 kthread_stop(jfsSyncThread
);
993 unregister_filesystem(&jfs_fs_type
);
996 * Make sure all delayed rcu free inodes are flushed before we
1000 kmem_cache_destroy(jfs_inode_cachep
);
1003 module_init(init_jfs_fs
)
1004 module_exit(exit_jfs_fs
)