2 * Copyright (C) International Business Machines Corp., 2000-2004
3 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <linux/mpage.h>
22 #include <linux/buffer_head.h>
23 #include <linux/pagemap.h>
24 #include <linux/quotaops.h>
25 #include <linux/uio.h>
26 #include <linux/writeback.h>
27 #include "jfs_incore.h"
28 #include "jfs_inode.h"
29 #include "jfs_filsys.h"
31 #include "jfs_extent.h"
32 #include "jfs_unicode.h"
33 #include "jfs_debug.h"
37 struct inode
*jfs_iget(struct super_block
*sb
, unsigned long ino
)
42 inode
= iget_locked(sb
, ino
);
44 return ERR_PTR(-ENOMEM
);
45 if (!(inode
->i_state
& I_NEW
))
54 if (S_ISREG(inode
->i_mode
)) {
55 inode
->i_op
= &jfs_file_inode_operations
;
56 inode
->i_fop
= &jfs_file_operations
;
57 inode
->i_mapping
->a_ops
= &jfs_aops
;
58 } else if (S_ISDIR(inode
->i_mode
)) {
59 inode
->i_op
= &jfs_dir_inode_operations
;
60 inode
->i_fop
= &jfs_dir_operations
;
61 } else if (S_ISLNK(inode
->i_mode
)) {
62 if (inode
->i_size
>= IDATASIZE
) {
63 inode
->i_op
= &page_symlink_inode_operations
;
64 inode_nohighmem(inode
);
65 inode
->i_mapping
->a_ops
= &jfs_aops
;
67 inode
->i_op
= &jfs_fast_symlink_inode_operations
;
68 inode
->i_link
= JFS_IP(inode
)->i_inline
;
70 * The inline data should be null-terminated, but
71 * don't let on-disk corruption crash the kernel
73 inode
->i_link
[inode
->i_size
] = '\0';
76 inode
->i_op
= &jfs_file_inode_operations
;
77 init_special_inode(inode
, inode
->i_mode
, inode
->i_rdev
);
79 unlock_new_inode(inode
);
84 * Workhorse of both fsync & write_inode
86 int jfs_commit_inode(struct inode
*inode
, int wait
)
92 jfs_info("In jfs_commit_inode, inode = 0x%p", inode
);
95 * Don't commit if inode has been committed since last being
96 * marked dirty, or if it has been deleted.
98 if (inode
->i_nlink
== 0 || !test_cflag(COMMIT_Dirty
, inode
))
101 if (isReadOnly(inode
)) {
102 /* kernel allows writes to devices on read-only
103 * partitions and may think inode is dirty
105 if (!special_file(inode
->i_mode
) && noisy
) {
106 jfs_err("jfs_commit_inode(0x%p) called on read-only volume",
108 jfs_err("Is remount racy?");
114 tid
= txBegin(inode
->i_sb
, COMMIT_INODE
);
115 mutex_lock(&JFS_IP(inode
)->commit_mutex
);
118 * Retest inode state after taking commit_mutex
120 if (inode
->i_nlink
&& test_cflag(COMMIT_Dirty
, inode
))
121 rc
= txCommit(tid
, 1, &inode
, wait
? COMMIT_SYNC
: 0);
124 mutex_unlock(&JFS_IP(inode
)->commit_mutex
);
128 int jfs_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
130 int wait
= wbc
->sync_mode
== WB_SYNC_ALL
;
132 if (inode
->i_nlink
== 0)
135 * If COMMIT_DIRTY is not set, the inode isn't really dirty.
136 * It has been committed since the last change, but was still
137 * on the dirty inode list.
139 if (!test_cflag(COMMIT_Dirty
, inode
)) {
140 /* Make sure committed changes hit the disk */
141 jfs_flush_journal(JFS_SBI(inode
->i_sb
)->log
, wait
);
145 if (jfs_commit_inode(inode
, wait
)) {
146 jfs_err("jfs_write_inode: jfs_commit_inode failed!");
152 void jfs_evict_inode(struct inode
*inode
)
154 struct jfs_inode_info
*ji
= JFS_IP(inode
);
156 jfs_info("In jfs_evict_inode, inode = 0x%p", inode
);
158 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
159 dquot_initialize(inode
);
161 if (JFS_IP(inode
)->fileset
== FILESYSTEM_I
) {
162 truncate_inode_pages_final(&inode
->i_data
);
164 if (test_cflag(COMMIT_Freewmap
, inode
))
165 jfs_free_zero_link(inode
);
170 * Free the inode from the quota allocation.
172 dquot_free_inode(inode
);
175 truncate_inode_pages_final(&inode
->i_data
);
180 BUG_ON(!list_empty(&ji
->anon_inode_list
));
182 spin_lock_irq(&ji
->ag_lock
);
183 if (ji
->active_ag
!= -1) {
184 struct bmap
*bmap
= JFS_SBI(inode
->i_sb
)->bmap
;
185 atomic_dec(&bmap
->db_active
[ji
->active_ag
]);
188 spin_unlock_irq(&ji
->ag_lock
);
191 void jfs_dirty_inode(struct inode
*inode
, int flags
)
193 static int noisy
= 5;
195 if (isReadOnly(inode
)) {
196 if (!special_file(inode
->i_mode
) && noisy
) {
197 /* kernel allows writes to devices on read-only
198 * partitions and may try to mark inode dirty
200 jfs_err("jfs_dirty_inode called on read-only volume");
201 jfs_err("Is remount racy?");
207 set_cflag(COMMIT_Dirty
, inode
);
210 int jfs_get_block(struct inode
*ip
, sector_t lblock
,
211 struct buffer_head
*bh_result
, int create
)
213 s64 lblock64
= lblock
;
218 s32 xlen
= bh_result
->b_size
>> ip
->i_blkbits
;
221 * Take appropriate lock on inode
224 IWRITE_LOCK(ip
, RDWRLOCK_NORMAL
);
226 IREAD_LOCK(ip
, RDWRLOCK_NORMAL
);
228 if (((lblock64
<< ip
->i_sb
->s_blocksize_bits
) < ip
->i_size
) &&
229 (!xtLookup(ip
, lblock64
, xlen
, &xflag
, &xaddr
, &xlen
, 0)) &&
231 if (xflag
& XAD_NOTRECORDED
) {
234 * Allocated but not recorded, read treats
239 XADoffset(&xad
, lblock64
);
240 XADlength(&xad
, xlen
);
241 XADaddress(&xad
, xaddr
);
244 * As long as block size = 4K, this isn't a problem.
245 * We should mark the whole page not ABNR, but how
246 * will we know to mark the other blocks BH_New?
250 rc
= extRecord(ip
, &xad
);
253 set_buffer_new(bh_result
);
256 map_bh(bh_result
, ip
->i_sb
, xaddr
);
257 bh_result
->b_size
= xlen
<< ip
->i_blkbits
;
264 * Allocate a new block
267 if ((rc
= extHint(ip
, lblock64
<< ip
->i_sb
->s_blocksize_bits
, &xad
)))
269 rc
= extAlloc(ip
, xlen
, lblock64
, &xad
, false);
273 set_buffer_new(bh_result
);
274 map_bh(bh_result
, ip
->i_sb
, addressXAD(&xad
));
275 bh_result
->b_size
= lengthXAD(&xad
) << ip
->i_blkbits
;
279 * We need to do whatever it takes to keep all but the last buffers
280 * in 4K pages - see jfs_write.c
287 * Release lock on inode
296 static int jfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
298 return block_write_full_page(page
, jfs_get_block
, wbc
);
301 static int jfs_writepages(struct address_space
*mapping
,
302 struct writeback_control
*wbc
)
304 return mpage_writepages(mapping
, wbc
, jfs_get_block
);
307 static int jfs_readpage(struct file
*file
, struct page
*page
)
309 return mpage_readpage(page
, jfs_get_block
);
312 static int jfs_readpages(struct file
*file
, struct address_space
*mapping
,
313 struct list_head
*pages
, unsigned nr_pages
)
315 return mpage_readpages(mapping
, pages
, nr_pages
, jfs_get_block
);
318 static void jfs_write_failed(struct address_space
*mapping
, loff_t to
)
320 struct inode
*inode
= mapping
->host
;
322 if (to
> inode
->i_size
) {
323 truncate_pagecache(inode
, inode
->i_size
);
328 static int jfs_write_begin(struct file
*file
, struct address_space
*mapping
,
329 loff_t pos
, unsigned len
, unsigned flags
,
330 struct page
**pagep
, void **fsdata
)
334 ret
= nobh_write_begin(mapping
, pos
, len
, flags
, pagep
, fsdata
,
337 jfs_write_failed(mapping
, pos
+ len
);
342 static sector_t
jfs_bmap(struct address_space
*mapping
, sector_t block
)
344 return generic_block_bmap(mapping
, block
, jfs_get_block
);
347 static ssize_t
jfs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
349 struct file
*file
= iocb
->ki_filp
;
350 struct address_space
*mapping
= file
->f_mapping
;
351 struct inode
*inode
= file
->f_mapping
->host
;
352 size_t count
= iov_iter_count(iter
);
355 ret
= blockdev_direct_IO(iocb
, inode
, iter
, jfs_get_block
);
358 * In case of error extending write may have instantiated a few
359 * blocks outside i_size. Trim these off again.
361 if (unlikely(iov_iter_rw(iter
) == WRITE
&& ret
< 0)) {
362 loff_t isize
= i_size_read(inode
);
363 loff_t end
= iocb
->ki_pos
+ count
;
366 jfs_write_failed(mapping
, end
);
372 const struct address_space_operations jfs_aops
= {
373 .readpage
= jfs_readpage
,
374 .readpages
= jfs_readpages
,
375 .writepage
= jfs_writepage
,
376 .writepages
= jfs_writepages
,
377 .write_begin
= jfs_write_begin
,
378 .write_end
= nobh_write_end
,
380 .direct_IO
= jfs_direct_IO
,
384 * Guts of jfs_truncate. Called with locks already held. Can be called
385 * with directory for truncating directory index table.
387 void jfs_truncate_nolock(struct inode
*ip
, loff_t length
)
394 if (test_cflag(COMMIT_Nolink
, ip
)) {
395 xtTruncate(0, ip
, length
, COMMIT_WMAP
);
400 tid
= txBegin(ip
->i_sb
, 0);
403 * The commit_mutex cannot be taken before txBegin.
404 * txBegin may block and there is a chance the inode
405 * could be marked dirty and need to be committed
406 * before txBegin unblocks
408 mutex_lock(&JFS_IP(ip
)->commit_mutex
);
410 newsize
= xtTruncate(tid
, ip
, length
,
411 COMMIT_TRUNCATE
| COMMIT_PWMAP
);
414 mutex_unlock(&JFS_IP(ip
)->commit_mutex
);
418 ip
->i_mtime
= ip
->i_ctime
= current_time(ip
);
419 mark_inode_dirty(ip
);
421 txCommit(tid
, 1, &ip
, 0);
423 mutex_unlock(&JFS_IP(ip
)->commit_mutex
);
424 } while (newsize
> length
); /* Truncate isn't always atomic */
427 void jfs_truncate(struct inode
*ip
)
429 jfs_info("jfs_truncate: size = 0x%lx", (ulong
) ip
->i_size
);
431 nobh_truncate_page(ip
->i_mapping
, ip
->i_size
, jfs_get_block
);
433 IWRITE_LOCK(ip
, RDWRLOCK_NORMAL
);
434 jfs_truncate_nolock(ip
, ip
->i_size
);