]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/nilfs2/inode.c
nilfs2: optimize rec_len functions
[mirror_ubuntu-artful-kernel.git] / fs / nilfs2 / inode.c
CommitLineData
05fe58fd
RK
1/*
2 * inode.c - NILFS inode operations.
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
21 *
22 */
23
24#include <linux/buffer_head.h>
5a0e3ad6 25#include <linux/gfp.h>
05fe58fd
RK
26#include <linux/mpage.h>
27#include <linux/writeback.h>
f30bf3e4 28#include <linux/uio.h>
05fe58fd 29#include "nilfs.h"
6fd1e5c9 30#include "btnode.h"
05fe58fd
RK
31#include "segment.h"
32#include "page.h"
33#include "mdt.h"
34#include "cpfile.h"
35#include "ifile.h"
36
0e14a359
RK
37struct nilfs_iget_args {
38 u64 ino;
39 __u64 cno;
4d8d9293 40 struct nilfs_root *root;
0e14a359
RK
41 int for_gc;
42};
05fe58fd
RK
43
44/**
45 * nilfs_get_block() - get a file block on the filesystem (callback function)
46 * @inode - inode struct of the target file
47 * @blkoff - file block number
48 * @bh_result - buffer head to be mapped on
49 * @create - indicate whether allocating the block or not when it has not
50 * been allocated yet.
51 *
52 * This function does not issue actual read request of the specified data
53 * block. It is done by VFS.
05fe58fd
RK
54 */
55int nilfs_get_block(struct inode *inode, sector_t blkoff,
56 struct buffer_head *bh_result, int create)
57{
58 struct nilfs_inode_info *ii = NILFS_I(inode);
c3a7abf0 59 __u64 blknum = 0;
05fe58fd 60 int err = 0, ret;
365e215c 61 struct inode *dat = NILFS_I_NILFS(inode)->ns_dat;
c3a7abf0 62 unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
05fe58fd 63
c3a7abf0
RK
64 down_read(&NILFS_MDT(dat)->mi_sem);
65 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
66 up_read(&NILFS_MDT(dat)->mi_sem);
67 if (ret >= 0) { /* found */
05fe58fd 68 map_bh(bh_result, inode->i_sb, blknum);
c3a7abf0
RK
69 if (ret > 0)
70 bh_result->b_size = (ret << inode->i_blkbits);
05fe58fd
RK
71 goto out;
72 }
05fe58fd
RK
73 /* data block was not found */
74 if (ret == -ENOENT && create) {
75 struct nilfs_transaction_info ti;
76
77 bh_result->b_blocknr = 0;
78 err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
79 if (unlikely(err))
80 goto out;
81 err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
82 (unsigned long)bh_result);
05fe58fd
RK
83 if (unlikely(err != 0)) {
84 if (err == -EEXIST) {
85 /*
86 * The get_block() function could be called
87 * from multiple callers for an inode.
88 * However, the page having this block must
89 * be locked in this case.
90 */
1f5abe7e 91 printk(KERN_WARNING
05fe58fd
RK
92 "nilfs_get_block: a race condition "
93 "while inserting a data block. "
94 "(inode number=%lu, file block "
95 "offset=%llu)\n",
96 inode->i_ino,
97 (unsigned long long)blkoff);
1f5abe7e 98 err = 0;
05fe58fd 99 }
47420c79 100 nilfs_transaction_abort(inode->i_sb);
05fe58fd
RK
101 goto out;
102 }
abdb318b 103 nilfs_mark_inode_dirty(inode);
47420c79 104 nilfs_transaction_commit(inode->i_sb); /* never fails */
05fe58fd
RK
105 /* Error handling should be detailed */
106 set_buffer_new(bh_result);
27e6c7a3 107 set_buffer_delay(bh_result);
05fe58fd
RK
108 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
109 to proper value */
110 } else if (ret == -ENOENT) {
111 /* not found is not error (e.g. hole); must return without
112 the mapped state flag. */
113 ;
114 } else {
115 err = ret;
116 }
117
118 out:
119 return err;
120}
121
122/**
123 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
124 * address_space_operations.
125 * @file - file struct of the file to be read
126 * @page - the page to be read
127 */
128static int nilfs_readpage(struct file *file, struct page *page)
129{
130 return mpage_readpage(page, nilfs_get_block);
131}
132
133/**
134 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
135 * address_space_operations.
136 * @file - file struct of the file to be read
137 * @mapping - address_space struct used for reading multiple pages
138 * @pages - the pages to be read
139 * @nr_pages - number of pages to be read
140 */
141static int nilfs_readpages(struct file *file, struct address_space *mapping,
142 struct list_head *pages, unsigned nr_pages)
143{
144 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
145}
146
147static int nilfs_writepages(struct address_space *mapping,
148 struct writeback_control *wbc)
149{
f30bf3e4
RK
150 struct inode *inode = mapping->host;
151 int err = 0;
152
153 if (wbc->sync_mode == WB_SYNC_ALL)
154 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
155 wbc->range_start,
156 wbc->range_end);
157 return err;
05fe58fd
RK
158}
159
160static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
161{
162 struct inode *inode = page->mapping->host;
163 int err;
164
165 redirty_page_for_writepage(wbc, page);
166 unlock_page(page);
167
168 if (wbc->sync_mode == WB_SYNC_ALL) {
169 err = nilfs_construct_segment(inode->i_sb);
170 if (unlikely(err))
171 return err;
172 } else if (wbc->for_reclaim)
173 nilfs_flush_segment(inode->i_sb, inode->i_ino);
174
175 return 0;
176}
177
178static int nilfs_set_page_dirty(struct page *page)
179{
180 int ret = __set_page_dirty_buffers(page);
181
182 if (ret) {
183 struct inode *inode = page->mapping->host;
05fe58fd
RK
184 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
185
bcbc8c64 186 nilfs_set_file_dirty(inode, nr_dirty);
05fe58fd
RK
187 }
188 return ret;
189}
190
191static int nilfs_write_begin(struct file *file, struct address_space *mapping,
192 loff_t pos, unsigned len, unsigned flags,
193 struct page **pagep, void **fsdata)
194
195{
196 struct inode *inode = mapping->host;
197 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
198
199 if (unlikely(err))
200 return err;
201
155130a4
CH
202 err = block_write_begin(mapping, pos, len, flags, pagep,
203 nilfs_get_block);
204 if (unlikely(err)) {
205 loff_t isize = mapping->host->i_size;
206 if (pos + len > isize)
207 vmtruncate(mapping->host, isize);
208
47420c79 209 nilfs_transaction_abort(inode->i_sb);
155130a4 210 }
05fe58fd
RK
211 return err;
212}
213
214static int nilfs_write_end(struct file *file, struct address_space *mapping,
215 loff_t pos, unsigned len, unsigned copied,
216 struct page *page, void *fsdata)
217{
218 struct inode *inode = mapping->host;
219 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
220 unsigned nr_dirty;
221 int err;
222
223 nr_dirty = nilfs_page_count_clean_buffers(page, start,
224 start + copied);
225 copied = generic_write_end(file, mapping, pos, len, copied, page,
226 fsdata);
bcbc8c64 227 nilfs_set_file_dirty(inode, nr_dirty);
47420c79 228 err = nilfs_transaction_commit(inode->i_sb);
05fe58fd
RK
229 return err ? : copied;
230}
231
232static ssize_t
233nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
234 loff_t offset, unsigned long nr_segs)
235{
236 struct file *file = iocb->ki_filp;
237 struct inode *inode = file->f_mapping->host;
238 ssize_t size;
05fe58fd
RK
239
240 if (rw == WRITE)
241 return 0;
242
243 /* Needs synchronization with the cleaner */
244 size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
245 offset, nr_segs, nilfs_get_block, NULL);
eafdc7d1
CH
246
247 /*
248 * In case of error extending write may have instantiated a few
249 * blocks outside i_size. Trim these off again.
250 */
251 if (unlikely((rw & WRITE) && size < 0)) {
252 loff_t isize = i_size_read(inode);
253 loff_t end = offset + iov_length(iov, nr_segs);
254
255 if (end > isize)
256 vmtruncate(inode, isize);
257 }
258
05fe58fd
RK
259 return size;
260}
261
7f09410b 262const struct address_space_operations nilfs_aops = {
05fe58fd
RK
263 .writepage = nilfs_writepage,
264 .readpage = nilfs_readpage,
e85dc1d5 265 .sync_page = block_sync_page,
05fe58fd
RK
266 .writepages = nilfs_writepages,
267 .set_page_dirty = nilfs_set_page_dirty,
268 .readpages = nilfs_readpages,
269 .write_begin = nilfs_write_begin,
270 .write_end = nilfs_write_end,
271 /* .releasepage = nilfs_releasepage, */
272 .invalidatepage = block_invalidatepage,
273 .direct_IO = nilfs_direct_IO,
258ef67e 274 .is_partially_uptodate = block_is_partially_uptodate,
05fe58fd
RK
275};
276
277struct inode *nilfs_new_inode(struct inode *dir, int mode)
278{
279 struct super_block *sb = dir->i_sb;
280 struct nilfs_sb_info *sbi = NILFS_SB(sb);
281 struct inode *inode;
282 struct nilfs_inode_info *ii;
4d8d9293 283 struct nilfs_root *root;
05fe58fd
RK
284 int err = -ENOMEM;
285 ino_t ino;
286
287 inode = new_inode(sb);
288 if (unlikely(!inode))
289 goto failed;
290
291 mapping_set_gfp_mask(inode->i_mapping,
292 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
293
4d8d9293 294 root = NILFS_I(dir)->i_root;
05fe58fd
RK
295 ii = NILFS_I(inode);
296 ii->i_state = 1 << NILFS_I_NEW;
4d8d9293 297 ii->i_root = root;
05fe58fd 298
e912a5b6 299 err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
05fe58fd
RK
300 if (unlikely(err))
301 goto failed_ifile_create_inode;
302 /* reference count of i_bh inherits from nilfs_mdt_read_block() */
303
b7c06342 304 atomic_inc(&root->inodes_count);
73459dcc 305 inode_init_owner(inode, dir, mode);
05fe58fd
RK
306 inode->i_ino = ino;
307 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
308
309 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
310 err = nilfs_bmap_read(ii->i_bmap, NULL);
311 if (err < 0)
312 goto failed_bmap;
313
314 set_bit(NILFS_I_BMAP, &ii->i_state);
315 /* No lock is needed; iget() ensures it. */
316 }
317
b253a3e4
RK
318 ii->i_flags = nilfs_mask_flags(
319 mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
05fe58fd
RK
320
321 /* ii->i_file_acl = 0; */
322 /* ii->i_dir_acl = 0; */
05fe58fd 323 ii->i_dir_start_lookup = 0;
05fe58fd
RK
324 nilfs_set_inode_flags(inode);
325 spin_lock(&sbi->s_next_gen_lock);
326 inode->i_generation = sbi->s_next_generation++;
327 spin_unlock(&sbi->s_next_gen_lock);
328 insert_inode_hash(inode);
329
330 err = nilfs_init_acl(inode, dir);
331 if (unlikely(err))
332 goto failed_acl; /* never occur. When supporting
333 nilfs_init_acl(), proper cancellation of
334 above jobs should be considered */
335
05fe58fd
RK
336 return inode;
337
338 failed_acl:
339 failed_bmap:
340 inode->i_nlink = 0;
341 iput(inode); /* raw_inode will be deleted through
342 generic_delete_inode() */
343 goto failed;
344
345 failed_ifile_create_inode:
346 make_bad_inode(inode);
347 iput(inode); /* if i_nlink == 1, generic_forget_inode() will be
348 called */
349 failed:
350 return ERR_PTR(err);
351}
352
05fe58fd
RK
353void nilfs_set_inode_flags(struct inode *inode)
354{
355 unsigned int flags = NILFS_I(inode)->i_flags;
356
357 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
358 S_DIRSYNC);
f0c9f242 359 if (flags & FS_SYNC_FL)
05fe58fd 360 inode->i_flags |= S_SYNC;
f0c9f242 361 if (flags & FS_APPEND_FL)
05fe58fd 362 inode->i_flags |= S_APPEND;
f0c9f242 363 if (flags & FS_IMMUTABLE_FL)
05fe58fd 364 inode->i_flags |= S_IMMUTABLE;
f0c9f242 365 if (flags & FS_NOATIME_FL)
05fe58fd 366 inode->i_flags |= S_NOATIME;
f0c9f242 367 if (flags & FS_DIRSYNC_FL)
05fe58fd
RK
368 inode->i_flags |= S_DIRSYNC;
369 mapping_set_gfp_mask(inode->i_mapping,
370 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
371}
372
373int nilfs_read_inode_common(struct inode *inode,
374 struct nilfs_inode *raw_inode)
375{
376 struct nilfs_inode_info *ii = NILFS_I(inode);
377 int err;
378
379 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
380 inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid);
381 inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid);
382 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
383 inode->i_size = le64_to_cpu(raw_inode->i_size);
384 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
385 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
386 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
61239230
RK
387 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
388 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
389 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
390 if (inode->i_nlink == 0 && inode->i_mode == 0)
05fe58fd
RK
391 return -EINVAL; /* this inode is deleted */
392
393 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
394 ii->i_flags = le32_to_cpu(raw_inode->i_flags);
395#if 0
396 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
397 ii->i_dir_acl = S_ISREG(inode->i_mode) ?
398 0 : le32_to_cpu(raw_inode->i_dir_acl);
399#endif
3cc811bf 400 ii->i_dir_start_lookup = 0;
05fe58fd
RK
401 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
402
403 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
404 S_ISLNK(inode->i_mode)) {
405 err = nilfs_bmap_read(ii->i_bmap, raw_inode);
406 if (err < 0)
407 return err;
408 set_bit(NILFS_I_BMAP, &ii->i_state);
409 /* No lock is needed; iget() ensures it. */
410 }
411 return 0;
412}
413
e912a5b6
RK
414static int __nilfs_read_inode(struct super_block *sb,
415 struct nilfs_root *root, unsigned long ino,
05fe58fd
RK
416 struct inode *inode)
417{
365e215c 418 struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
05fe58fd
RK
419 struct buffer_head *bh;
420 struct nilfs_inode *raw_inode;
421 int err;
422
365e215c 423 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
e912a5b6 424 err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
05fe58fd
RK
425 if (unlikely(err))
426 goto bad_inode;
427
e912a5b6 428 raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
05fe58fd 429
1b2f5a64
RK
430 err = nilfs_read_inode_common(inode, raw_inode);
431 if (err)
05fe58fd
RK
432 goto failed_unmap;
433
434 if (S_ISREG(inode->i_mode)) {
435 inode->i_op = &nilfs_file_inode_operations;
436 inode->i_fop = &nilfs_file_operations;
437 inode->i_mapping->a_ops = &nilfs_aops;
05fe58fd
RK
438 } else if (S_ISDIR(inode->i_mode)) {
439 inode->i_op = &nilfs_dir_inode_operations;
440 inode->i_fop = &nilfs_dir_operations;
441 inode->i_mapping->a_ops = &nilfs_aops;
442 } else if (S_ISLNK(inode->i_mode)) {
443 inode->i_op = &nilfs_symlink_inode_operations;
444 inode->i_mapping->a_ops = &nilfs_aops;
445 } else {
446 inode->i_op = &nilfs_special_inode_operations;
447 init_special_inode(
448 inode, inode->i_mode,
cdce214e 449 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
05fe58fd 450 }
e912a5b6 451 nilfs_ifile_unmap_inode(root->ifile, ino, bh);
05fe58fd 452 brelse(bh);
365e215c 453 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
05fe58fd
RK
454 nilfs_set_inode_flags(inode);
455 return 0;
456
457 failed_unmap:
e912a5b6 458 nilfs_ifile_unmap_inode(root->ifile, ino, bh);
05fe58fd
RK
459 brelse(bh);
460
461 bad_inode:
365e215c 462 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
05fe58fd
RK
463 return err;
464}
465
0e14a359
RK
466static int nilfs_iget_test(struct inode *inode, void *opaque)
467{
468 struct nilfs_iget_args *args = opaque;
469 struct nilfs_inode_info *ii;
470
4d8d9293 471 if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
0e14a359
RK
472 return 0;
473
474 ii = NILFS_I(inode);
475 if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
476 return !args->for_gc;
477
478 return args->for_gc && args->cno == ii->i_cno;
479}
480
481static int nilfs_iget_set(struct inode *inode, void *opaque)
482{
483 struct nilfs_iget_args *args = opaque;
484
485 inode->i_ino = args->ino;
486 if (args->for_gc) {
487 NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
488 NILFS_I(inode)->i_cno = args->cno;
4d8d9293
RK
489 NILFS_I(inode)->i_root = NULL;
490 } else {
491 if (args->root && args->ino == NILFS_ROOT_INO)
492 nilfs_get_root(args->root);
493 NILFS_I(inode)->i_root = args->root;
0e14a359
RK
494 }
495 return 0;
496}
497
032dbb3b
RK
498struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
499 unsigned long ino)
500{
501 struct nilfs_iget_args args = {
502 .ino = ino, .root = root, .cno = 0, .for_gc = 0
503 };
504
505 return ilookup5(sb, ino, nilfs_iget_test, &args);
506}
507
f1e89c86
RK
508struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
509 unsigned long ino)
05fe58fd 510{
4d8d9293
RK
511 struct nilfs_iget_args args = {
512 .ino = ino, .root = root, .cno = 0, .for_gc = 0
513 };
f1e89c86
RK
514
515 return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
516}
517
518struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
519 unsigned long ino)
520{
05fe58fd
RK
521 struct inode *inode;
522 int err;
523
f1e89c86 524 inode = nilfs_iget_locked(sb, root, ino);
05fe58fd
RK
525 if (unlikely(!inode))
526 return ERR_PTR(-ENOMEM);
527 if (!(inode->i_state & I_NEW))
528 return inode;
529
e912a5b6 530 err = __nilfs_read_inode(sb, root, ino, inode);
05fe58fd
RK
531 if (unlikely(err)) {
532 iget_failed(inode);
533 return ERR_PTR(err);
534 }
535 unlock_new_inode(inode);
536 return inode;
537}
538
263d90ce
RK
539struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
540 __u64 cno)
541{
4d8d9293
RK
542 struct nilfs_iget_args args = {
543 .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
544 };
263d90ce
RK
545 struct inode *inode;
546 int err;
547
548 inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
549 if (unlikely(!inode))
550 return ERR_PTR(-ENOMEM);
551 if (!(inode->i_state & I_NEW))
552 return inode;
553
554 err = nilfs_init_gcinode(inode);
555 if (unlikely(err)) {
556 iget_failed(inode);
557 return ERR_PTR(err);
558 }
559 unlock_new_inode(inode);
560 return inode;
561}
562
05fe58fd
RK
563void nilfs_write_inode_common(struct inode *inode,
564 struct nilfs_inode *raw_inode, int has_bmap)
565{
566 struct nilfs_inode_info *ii = NILFS_I(inode);
567
568 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
569 raw_inode->i_uid = cpu_to_le32(inode->i_uid);
570 raw_inode->i_gid = cpu_to_le32(inode->i_gid);
571 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
572 raw_inode->i_size = cpu_to_le64(inode->i_size);
573 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
574 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
61239230
RK
575 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
576 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
05fe58fd
RK
577 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
578
05fe58fd
RK
579 raw_inode->i_flags = cpu_to_le32(ii->i_flags);
580 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
581
582 if (has_bmap)
583 nilfs_bmap_write(ii->i_bmap, raw_inode);
584 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
585 raw_inode->i_device_code =
cdce214e 586 cpu_to_le64(huge_encode_dev(inode->i_rdev));
05fe58fd
RK
587 /* When extending inode, nilfs->ns_inode_size should be checked
588 for substitutions of appended fields */
589}
590
591void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
592{
593 ino_t ino = inode->i_ino;
594 struct nilfs_inode_info *ii = NILFS_I(inode);
e912a5b6 595 struct inode *ifile = ii->i_root->ifile;
05fe58fd
RK
596 struct nilfs_inode *raw_inode;
597
e912a5b6 598 raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
05fe58fd 599
05fe58fd 600 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
e912a5b6 601 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
05fe58fd
RK
602 set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
603
604 nilfs_write_inode_common(inode, raw_inode, 0);
605 /* XXX: call with has_bmap = 0 is a workaround to avoid
606 deadlock of bmap. This delays update of i_bmap to just
607 before writing */
e912a5b6 608 nilfs_ifile_unmap_inode(ifile, ino, ibh);
05fe58fd
RK
609}
610
611#define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
612
613static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
614 unsigned long from)
615{
616 unsigned long b;
617 int ret;
618
619 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
620 return;
e828949e 621repeat:
05fe58fd
RK
622 ret = nilfs_bmap_last_key(ii->i_bmap, &b);
623 if (ret == -ENOENT)
624 return;
625 else if (ret < 0)
626 goto failed;
627
628 if (b < from)
629 return;
630
631 b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
632 ret = nilfs_bmap_truncate(ii->i_bmap, b);
633 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
634 if (!ret || (ret == -ENOMEM &&
635 nilfs_bmap_truncate(ii->i_bmap, b) == 0))
636 goto repeat;
637
e828949e
RK
638failed:
639 nilfs_warning(ii->vfs_inode.i_sb, __func__,
640 "failed to truncate bmap (ino=%lu, err=%d)",
641 ii->vfs_inode.i_ino, ret);
05fe58fd
RK
642}
643
644void nilfs_truncate(struct inode *inode)
645{
646 unsigned long blkoff;
647 unsigned int blocksize;
648 struct nilfs_transaction_info ti;
649 struct super_block *sb = inode->i_sb;
650 struct nilfs_inode_info *ii = NILFS_I(inode);
05fe58fd
RK
651
652 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
653 return;
654 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
655 return;
656
657 blocksize = sb->s_blocksize;
658 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
1f5abe7e 659 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
05fe58fd
RK
660
661 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
662
663 nilfs_truncate_bmap(ii, blkoff);
664
665 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
666 if (IS_SYNC(inode))
667 nilfs_set_transaction_flag(NILFS_TI_SYNC);
668
abdb318b 669 nilfs_mark_inode_dirty(inode);
bcbc8c64 670 nilfs_set_file_dirty(inode, 0);
47420c79 671 nilfs_transaction_commit(sb);
05fe58fd
RK
672 /* May construct a logical segment and may fail in sync mode.
673 But truncate has no return value. */
674}
675
6fd1e5c9
AV
676static void nilfs_clear_inode(struct inode *inode)
677{
678 struct nilfs_inode_info *ii = NILFS_I(inode);
518d1a6a 679 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
6fd1e5c9
AV
680
681 /*
682 * Free resources allocated in nilfs_read_inode(), here.
683 */
684 BUG_ON(!list_empty(&ii->i_dirty));
685 brelse(ii->i_bh);
686 ii->i_bh = NULL;
687
518d1a6a
RK
688 if (mdi && mdi->mi_palloc_cache)
689 nilfs_palloc_destroy_cache(inode);
690
6fd1e5c9
AV
691 if (test_bit(NILFS_I_BMAP, &ii->i_state))
692 nilfs_bmap_clear(ii->i_bmap);
693
694 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
4d8d9293
RK
695
696 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
697 nilfs_put_root(ii->i_root);
6fd1e5c9
AV
698}
699
700void nilfs_evict_inode(struct inode *inode)
05fe58fd
RK
701{
702 struct nilfs_transaction_info ti;
703 struct super_block *sb = inode->i_sb;
704 struct nilfs_inode_info *ii = NILFS_I(inode);
25b18d39 705 int ret;
05fe58fd 706
4d8d9293 707 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
05fe58fd
RK
708 if (inode->i_data.nrpages)
709 truncate_inode_pages(&inode->i_data, 0);
6fd1e5c9
AV
710 end_writeback(inode);
711 nilfs_clear_inode(inode);
05fe58fd
RK
712 return;
713 }
1f5abe7e
RK
714 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
715
05fe58fd
RK
716 if (inode->i_data.nrpages)
717 truncate_inode_pages(&inode->i_data, 0);
718
e912a5b6 719 /* TODO: some of the following operations may fail. */
05fe58fd 720 nilfs_truncate_bmap(ii, 0);
abdb318b 721 nilfs_mark_inode_dirty(inode);
6fd1e5c9 722 end_writeback(inode);
e912a5b6 723
25b18d39
RK
724 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
725 if (!ret)
726 atomic_dec(&ii->i_root->inodes_count);
e912a5b6 727
6fd1e5c9 728 nilfs_clear_inode(inode);
e912a5b6 729
05fe58fd
RK
730 if (IS_SYNC(inode))
731 nilfs_set_transaction_flag(NILFS_TI_SYNC);
47420c79 732 nilfs_transaction_commit(sb);
05fe58fd
RK
733 /* May construct a logical segment and may fail in sync mode.
734 But delete_inode has no return value. */
735}
736
737int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
738{
739 struct nilfs_transaction_info ti;
740 struct inode *inode = dentry->d_inode;
741 struct super_block *sb = inode->i_sb;
47420c79 742 int err;
05fe58fd
RK
743
744 err = inode_change_ok(inode, iattr);
745 if (err)
746 return err;
747
748 err = nilfs_transaction_begin(sb, &ti, 0);
749 if (unlikely(err))
750 return err;
1025774c
CH
751
752 if ((iattr->ia_valid & ATTR_SIZE) &&
753 iattr->ia_size != i_size_read(inode)) {
754 err = vmtruncate(inode, iattr->ia_size);
755 if (unlikely(err))
756 goto out_err;
757 }
758
759 setattr_copy(inode, iattr);
760 mark_inode_dirty(inode);
761
762 if (iattr->ia_valid & ATTR_MODE) {
05fe58fd 763 err = nilfs_acl_chmod(inode);
1025774c
CH
764 if (unlikely(err))
765 goto out_err;
766 }
767
768 return nilfs_transaction_commit(sb);
47420c79 769
1025774c
CH
770out_err:
771 nilfs_transaction_abort(sb);
47420c79 772 return err;
05fe58fd
RK
773}
774
b74c79e9 775int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
dc3d3b81 776{
b74c79e9
NP
777 struct nilfs_root *root;
778
779 if (flags & IPERM_FLAG_RCU)
780 return -ECHILD;
dc3d3b81 781
b74c79e9 782 root = NILFS_I(inode)->i_root;
dc3d3b81
RK
783 if ((mask & MAY_WRITE) && root &&
784 root->cno != NILFS_CPTREE_CURRENT_CNO)
785 return -EROFS; /* snapshot is not writable */
786
b74c79e9 787 return generic_permission(inode, mask, flags, NULL);
dc3d3b81
RK
788}
789
bcbc8c64 790int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
05fe58fd 791{
bcbc8c64 792 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
05fe58fd
RK
793 struct nilfs_inode_info *ii = NILFS_I(inode);
794 int err;
795
796 spin_lock(&sbi->s_inode_lock);
05fe58fd
RK
797 if (ii->i_bh == NULL) {
798 spin_unlock(&sbi->s_inode_lock);
e912a5b6
RK
799 err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
800 inode->i_ino, pbh);
05fe58fd
RK
801 if (unlikely(err))
802 return err;
803 spin_lock(&sbi->s_inode_lock);
804 if (ii->i_bh == NULL)
805 ii->i_bh = *pbh;
806 else {
807 brelse(*pbh);
808 *pbh = ii->i_bh;
809 }
810 } else
811 *pbh = ii->i_bh;
812
813 get_bh(*pbh);
814 spin_unlock(&sbi->s_inode_lock);
815 return 0;
816}
817
818int nilfs_inode_dirty(struct inode *inode)
819{
820 struct nilfs_inode_info *ii = NILFS_I(inode);
821 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
822 int ret = 0;
823
824 if (!list_empty(&ii->i_dirty)) {
825 spin_lock(&sbi->s_inode_lock);
826 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
827 test_bit(NILFS_I_BUSY, &ii->i_state);
828 spin_unlock(&sbi->s_inode_lock);
829 }
830 return ret;
831}
832
bcbc8c64 833int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
05fe58fd 834{
bcbc8c64 835 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
05fe58fd
RK
836 struct nilfs_inode_info *ii = NILFS_I(inode);
837
838 atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks);
839
458c5b08 840 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
05fe58fd
RK
841 return 0;
842
843 spin_lock(&sbi->s_inode_lock);
844 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
845 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
846 /* Because this routine may race with nilfs_dispose_list(),
847 we have to check NILFS_I_QUEUED here, too. */
848 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
849 /* This will happen when somebody is freeing
850 this inode. */
851 nilfs_warning(sbi->s_super, __func__,
852 "cannot get inode (ino=%lu)\n",
853 inode->i_ino);
854 spin_unlock(&sbi->s_inode_lock);
855 return -EINVAL; /* NILFS_I_DIRTY may remain for
856 freeing inode */
857 }
858 list_del(&ii->i_dirty);
859 list_add_tail(&ii->i_dirty, &sbi->s_dirty_files);
860 set_bit(NILFS_I_QUEUED, &ii->i_state);
861 }
862 spin_unlock(&sbi->s_inode_lock);
863 return 0;
864}
865
866int nilfs_mark_inode_dirty(struct inode *inode)
867{
05fe58fd
RK
868 struct buffer_head *ibh;
869 int err;
870
bcbc8c64 871 err = nilfs_load_inode_block(inode, &ibh);
05fe58fd
RK
872 if (unlikely(err)) {
873 nilfs_warning(inode->i_sb, __func__,
874 "failed to reget inode block.\n");
875 return err;
876 }
05fe58fd 877 nilfs_update_inode(inode, ibh);
05fe58fd 878 nilfs_mdt_mark_buffer_dirty(ibh);
e912a5b6 879 nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
05fe58fd
RK
880 brelse(ibh);
881 return 0;
882}
883
884/**
885 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
886 * @inode: inode of the file to be registered.
887 *
888 * nilfs_dirty_inode() loads a inode block containing the specified
889 * @inode and copies data from a nilfs_inode to a corresponding inode
890 * entry in the inode block. This operation is excluded from the segment
891 * construction. This function can be called both as a single operation
892 * and as a part of indivisible file operations.
893 */
894void nilfs_dirty_inode(struct inode *inode)
895{
896 struct nilfs_transaction_info ti;
7d6cd92f 897 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
05fe58fd
RK
898
899 if (is_bad_inode(inode)) {
900 nilfs_warning(inode->i_sb, __func__,
901 "tried to mark bad_inode dirty. ignored.\n");
902 dump_stack();
903 return;
904 }
7d6cd92f
RK
905 if (mdi) {
906 nilfs_mdt_mark_dirty(inode);
907 return;
908 }
05fe58fd 909 nilfs_transaction_begin(inode->i_sb, &ti, 0);
458c5b08 910 nilfs_mark_inode_dirty(inode);
47420c79 911 nilfs_transaction_commit(inode->i_sb); /* never fails */
05fe58fd 912}
622daaff
RK
913
914int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
915 __u64 start, __u64 len)
916{
917 struct the_nilfs *nilfs = NILFS_I_NILFS(inode);
918 __u64 logical = 0, phys = 0, size = 0;
919 __u32 flags = 0;
920 loff_t isize;
921 sector_t blkoff, end_blkoff;
922 sector_t delalloc_blkoff;
923 unsigned long delalloc_blklen;
924 unsigned int blkbits = inode->i_blkbits;
925 int ret, n;
926
927 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
928 if (ret)
929 return ret;
930
931 mutex_lock(&inode->i_mutex);
932
933 isize = i_size_read(inode);
934
935 blkoff = start >> blkbits;
936 end_blkoff = (start + len - 1) >> blkbits;
937
938 delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
939 &delalloc_blkoff);
940
941 do {
942 __u64 blkphy;
943 unsigned int maxblocks;
944
945 if (delalloc_blklen && blkoff == delalloc_blkoff) {
946 if (size) {
947 /* End of the current extent */
948 ret = fiemap_fill_next_extent(
949 fieinfo, logical, phys, size, flags);
950 if (ret)
951 break;
952 }
953 if (blkoff > end_blkoff)
954 break;
955
956 flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
957 logical = blkoff << blkbits;
958 phys = 0;
959 size = delalloc_blklen << blkbits;
960
961 blkoff = delalloc_blkoff + delalloc_blklen;
962 delalloc_blklen = nilfs_find_uncommitted_extent(
963 inode, blkoff, &delalloc_blkoff);
964 continue;
965 }
966
967 /*
968 * Limit the number of blocks that we look up so as
969 * not to get into the next delayed allocation extent.
970 */
971 maxblocks = INT_MAX;
972 if (delalloc_blklen)
973 maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
974 maxblocks);
975 blkphy = 0;
976
977 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
978 n = nilfs_bmap_lookup_contig(
979 NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
980 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
981
982 if (n < 0) {
983 int past_eof;
984
985 if (unlikely(n != -ENOENT))
986 break; /* error */
987
988 /* HOLE */
989 blkoff++;
990 past_eof = ((blkoff << blkbits) >= isize);
991
992 if (size) {
993 /* End of the current extent */
994
995 if (past_eof)
996 flags |= FIEMAP_EXTENT_LAST;
997
998 ret = fiemap_fill_next_extent(
999 fieinfo, logical, phys, size, flags);
1000 if (ret)
1001 break;
1002 size = 0;
1003 }
1004 if (blkoff > end_blkoff || past_eof)
1005 break;
1006 } else {
1007 if (size) {
1008 if (phys && blkphy << blkbits == phys + size) {
1009 /* The current extent goes on */
1010 size += n << blkbits;
1011 } else {
1012 /* Terminate the current extent */
1013 ret = fiemap_fill_next_extent(
1014 fieinfo, logical, phys, size,
1015 flags);
1016 if (ret || blkoff > end_blkoff)
1017 break;
1018
1019 /* Start another extent */
1020 flags = FIEMAP_EXTENT_MERGED;
1021 logical = blkoff << blkbits;
1022 phys = blkphy << blkbits;
1023 size = n << blkbits;
1024 }
1025 } else {
1026 /* Start a new extent */
1027 flags = FIEMAP_EXTENT_MERGED;
1028 logical = blkoff << blkbits;
1029 phys = blkphy << blkbits;
1030 size = n << blkbits;
1031 }
1032 blkoff += n;
1033 }
1034 cond_resched();
1035 } while (true);
1036
1037 /* If ret is 1 then we just hit the end of the extent array */
1038 if (ret == 1)
1039 ret = 0;
1040
1041 mutex_unlock(&inode->i_mutex);
1042 return ret;
1043}