5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 * linux/fs/ext2/inode.c
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
19 * linux/fs/minix/inode.c
21 * Copyright (C) 1991, 1992 Linus Torvalds
23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
28 #include <asm/uaccess.h>
30 #include <linux/errno.h>
32 #include <linux/time.h>
33 #include <linux/stat.h>
34 #include <linux/string.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
44 static int ufs_block_to_path(struct inode
*inode
, sector_t i_block
, unsigned offsets
[4])
46 struct ufs_sb_private_info
*uspi
= UFS_SB(inode
->i_sb
)->s_uspi
;
47 int ptrs
= uspi
->s_apb
;
48 int ptrs_bits
= uspi
->s_apbshift
;
49 const long direct_blocks
= UFS_NDADDR
,
50 indirect_blocks
= ptrs
,
51 double_blocks
= (1 << (ptrs_bits
* 2));
55 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs
,double_blocks
);
56 if (i_block
< direct_blocks
) {
57 offsets
[n
++] = i_block
;
58 } else if ((i_block
-= direct_blocks
) < indirect_blocks
) {
59 offsets
[n
++] = UFS_IND_BLOCK
;
60 offsets
[n
++] = i_block
;
61 } else if ((i_block
-= indirect_blocks
) < double_blocks
) {
62 offsets
[n
++] = UFS_DIND_BLOCK
;
63 offsets
[n
++] = i_block
>> ptrs_bits
;
64 offsets
[n
++] = i_block
& (ptrs
- 1);
65 } else if (((i_block
-= double_blocks
) >> (ptrs_bits
* 2)) < ptrs
) {
66 offsets
[n
++] = UFS_TIND_BLOCK
;
67 offsets
[n
++] = i_block
>> (ptrs_bits
* 2);
68 offsets
[n
++] = (i_block
>> ptrs_bits
) & (ptrs
- 1);
69 offsets
[n
++] = i_block
& (ptrs
- 1);
71 ufs_warning(inode
->i_sb
, "ufs_block_to_path", "block > big");
82 struct buffer_head
*bh
;
85 static inline int grow_chain32(struct ufs_inode_info
*ufsi
,
86 struct buffer_head
*bh
, __fs32
*v
,
87 Indirect
*from
, Indirect
*to
)
93 seq
= read_seqbegin(&ufsi
->meta_lock
);
94 to
->key32
= *(__fs32
*)(to
->p
= v
);
95 for (p
= from
; p
<= to
&& p
->key32
== *(__fs32
*)p
->p
; p
++)
97 } while (read_seqretry(&ufsi
->meta_lock
, seq
));
101 static inline int grow_chain64(struct ufs_inode_info
*ufsi
,
102 struct buffer_head
*bh
, __fs64
*v
,
103 Indirect
*from
, Indirect
*to
)
109 seq
= read_seqbegin(&ufsi
->meta_lock
);
110 to
->key64
= *(__fs64
*)(to
->p
= v
);
111 for (p
= from
; p
<= to
&& p
->key64
== *(__fs64
*)p
->p
; p
++)
113 } while (read_seqretry(&ufsi
->meta_lock
, seq
));
118 * Returns the location of the fragment from
119 * the beginning of the filesystem.
122 static u64
ufs_frag_map(struct inode
*inode
, sector_t frag
)
124 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
125 struct super_block
*sb
= inode
->i_sb
;
126 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
127 u64 mask
= (u64
) uspi
->s_apbmask
>>uspi
->s_fpbshift
;
128 int shift
= uspi
->s_apbshift
-uspi
->s_fpbshift
;
129 unsigned offsets
[4], *p
;
130 Indirect chain
[4], *q
= chain
;
131 int depth
= ufs_block_to_path(inode
, frag
>> uspi
->s_fpbshift
, offsets
);
132 unsigned flags
= UFS_SB(sb
)->s_flags
;
135 UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag
, depth
);
136 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
137 uspi
->s_fpbshift
, uspi
->s_apbmask
,
138 (unsigned long long)mask
);
146 if ((flags
& UFS_TYPE_MASK
) == UFS_TYPE_UFS2
)
149 if (!grow_chain32(ufsi
, NULL
, &ufsi
->i_u1
.i_data
[*p
++], chain
, q
))
155 struct buffer_head
*bh
;
158 bh
= sb_bread(sb
, uspi
->s_sbbase
+
159 fs32_to_cpu(sb
, q
->key32
) + (n
>>shift
));
162 ptr
= (__fs32
*)bh
->b_data
+ (n
& mask
);
163 if (!grow_chain32(ufsi
, bh
, ptr
, chain
, ++q
))
168 res
= fs32_to_cpu(sb
, q
->key32
);
172 if (!grow_chain64(ufsi
, NULL
, &ufsi
->i_u1
.u2_i_data
[*p
++], chain
, q
))
179 struct buffer_head
*bh
;
182 bh
= sb_bread(sb
, uspi
->s_sbbase
+
183 fs64_to_cpu(sb
, q
->key64
) + (n
>>shift
));
186 ptr
= (__fs64
*)bh
->b_data
+ (n
& mask
);
187 if (!grow_chain64(ufsi
, bh
, ptr
, chain
, ++q
))
192 res
= fs64_to_cpu(sb
, q
->key64
);
194 res
+= uspi
->s_sbbase
+ (frag
& uspi
->s_fpbmask
);
211 * ufs_inode_getfrag() - allocate new fragment(s)
212 * @inode: pointer to inode
213 * @fragment: number of `fragment' which hold pointer
214 * to new allocated fragment(s)
215 * @new_fragment: number of new allocated fragment(s)
216 * @required: how many fragment(s) we require
217 * @err: we set it if something wrong
218 * @phys: pointer to where we save physical number of new allocated fragments,
219 * NULL if we allocate not data(indirect blocks for example).
220 * @new: we set it if we allocate new block
221 * @locked_page: for ufs_new_fragments()
223 static struct buffer_head
*
224 ufs_inode_getfrag(struct inode
*inode
, u64 fragment
,
225 sector_t new_fragment
, unsigned int required
, int *err
,
226 long *phys
, int *new, struct page
*locked_page
)
228 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
229 struct super_block
*sb
= inode
->i_sb
;
230 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
231 struct buffer_head
* result
;
232 unsigned blockoff
, lastblockoff
;
233 u64 tmp
, goal
, lastfrag
, block
, lastblock
;
236 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, "
237 "metadata %d\n", inode
->i_ino
, (unsigned long long)fragment
,
238 (unsigned long long)new_fragment
, required
, !phys
);
240 /* TODO : to be done for write support
241 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
245 block
= ufs_fragstoblks (fragment
);
246 blockoff
= ufs_fragnum (fragment
);
247 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, block
);
252 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
254 lastfrag
= ufsi
->i_lastfrag
;
255 if (tmp
&& fragment
< lastfrag
) {
257 result
= sb_getblk(sb
, uspi
->s_sbbase
+ tmp
+ blockoff
);
258 if (tmp
== ufs_data_ptr_to_cpu(sb
, p
)) {
259 UFSD("EXIT, result %llu\n",
260 (unsigned long long)tmp
+ blockoff
);
266 *phys
= uspi
->s_sbbase
+ tmp
+ blockoff
;
271 lastblock
= ufs_fragstoblks (lastfrag
);
272 lastblockoff
= ufs_fragnum (lastfrag
);
274 * We will extend file into new block beyond last allocated block
276 if (lastblock
< block
) {
278 * We must reallocate last allocated block
281 p2
= ufs_get_direct_data_ptr(uspi
, ufsi
, lastblock
);
282 tmp
= ufs_new_fragments(inode
, p2
, lastfrag
,
283 ufs_data_ptr_to_cpu(sb
, p2
),
284 uspi
->s_fpb
- lastblockoff
,
287 if (lastfrag
!= ufsi
->i_lastfrag
)
292 lastfrag
= ufsi
->i_lastfrag
;
295 tmp
= ufs_data_ptr_to_cpu(sb
,
296 ufs_get_direct_data_ptr(uspi
, ufsi
,
299 goal
= tmp
+ uspi
->s_fpb
;
300 tmp
= ufs_new_fragments (inode
, p
, fragment
- blockoff
,
301 goal
, required
+ blockoff
,
303 phys
!= NULL
? locked_page
: NULL
);
304 } else if (lastblock
== block
) {
306 * We will extend last allocated block
308 tmp
= ufs_new_fragments(inode
, p
, fragment
-
309 (blockoff
- lastblockoff
),
310 ufs_data_ptr_to_cpu(sb
, p
),
311 required
+ (blockoff
- lastblockoff
),
312 err
, phys
!= NULL
? locked_page
: NULL
);
313 } else /* (lastblock > block) */ {
315 * We will allocate new block before last allocated block
318 tmp
= ufs_data_ptr_to_cpu(sb
,
319 ufs_get_direct_data_ptr(uspi
, ufsi
, block
- 1));
321 goal
= tmp
+ uspi
->s_fpb
;
323 tmp
= ufs_new_fragments(inode
, p
, fragment
- blockoff
,
324 goal
, uspi
->s_fpb
, err
,
325 phys
!= NULL
? locked_page
: NULL
);
328 if ((!blockoff
&& ufs_data_ptr_to_cpu(sb
, p
)) ||
329 (blockoff
&& lastfrag
!= ufsi
->i_lastfrag
))
336 result
= sb_getblk(sb
, uspi
->s_sbbase
+ tmp
+ blockoff
);
338 *phys
= uspi
->s_sbbase
+ tmp
+ blockoff
;
344 inode
->i_ctime
= CURRENT_TIME_SEC
;
346 ufs_sync_inode (inode
);
347 mark_inode_dirty(inode
);
348 UFSD("EXIT, result %llu\n", (unsigned long long)tmp
+ blockoff
);
351 /* This part : To be implemented ....
352 Required only for writing, not required for READ-ONLY.
355 u2_block = ufs_fragstoblks(fragment);
356 u2_blockoff = ufs_fragnum(fragment);
357 p = ufsi->i_u1.u2_i_data + block;
361 tmp = fs32_to_cpu(sb, *p);
362 lastfrag = ufsi->i_lastfrag;
368 * ufs_inode_getblock() - allocate new block
369 * @inode: pointer to inode
370 * @bh: pointer to block which hold "pointer" to new allocated block
371 * @fragment: number of `fragment' which hold pointer
372 * to new allocated block
373 * @new_fragment: number of new allocated fragment
374 * (block will hold this fragment and also uspi->s_fpb-1)
375 * @err: see ufs_inode_getfrag()
376 * @phys: see ufs_inode_getfrag()
377 * @new: see ufs_inode_getfrag()
378 * @locked_page: see ufs_inode_getfrag()
380 static struct buffer_head
*
381 ufs_inode_getblock(struct inode
*inode
, struct buffer_head
*bh
,
382 u64 fragment
, sector_t new_fragment
, int *err
,
383 long *phys
, int *new, struct page
*locked_page
)
385 struct super_block
*sb
= inode
->i_sb
;
386 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
387 struct buffer_head
* result
;
389 u64 tmp
, goal
, block
;
392 block
= ufs_fragstoblks (fragment
);
393 blockoff
= ufs_fragnum (fragment
);
395 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n",
396 inode
->i_ino
, (unsigned long long)fragment
,
397 (unsigned long long)new_fragment
, !phys
);
402 if (!buffer_uptodate(bh
)) {
403 ll_rw_block (READ
, 1, &bh
);
405 if (!buffer_uptodate(bh
))
408 if (uspi
->fs_magic
== UFS2_MAGIC
)
409 p
= (__fs64
*)bh
->b_data
+ block
;
411 p
= (__fs32
*)bh
->b_data
+ block
;
413 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
416 result
= sb_getblk(sb
, uspi
->s_sbbase
+ tmp
+ blockoff
);
417 if (tmp
== ufs_data_ptr_to_cpu(sb
, p
))
422 *phys
= uspi
->s_sbbase
+ tmp
+ blockoff
;
427 if (block
&& (uspi
->fs_magic
== UFS2_MAGIC
?
428 (tmp
= fs64_to_cpu(sb
, ((__fs64
*)bh
->b_data
)[block
-1])) :
429 (tmp
= fs32_to_cpu(sb
, ((__fs32
*)bh
->b_data
)[block
-1]))))
430 goal
= tmp
+ uspi
->s_fpb
;
432 goal
= bh
->b_blocknr
+ uspi
->s_fpb
;
433 tmp
= ufs_new_fragments(inode
, p
, ufs_blknum(new_fragment
), goal
,
434 uspi
->s_fpb
, err
, locked_page
);
436 if (ufs_data_ptr_to_cpu(sb
, p
))
443 result
= sb_getblk(sb
, uspi
->s_sbbase
+ tmp
+ blockoff
);
445 *phys
= uspi
->s_sbbase
+ tmp
+ blockoff
;
449 mark_buffer_dirty(bh
);
451 sync_dirty_buffer(bh
);
452 inode
->i_ctime
= CURRENT_TIME_SEC
;
453 mark_inode_dirty(inode
);
454 UFSD("result %llu\n", (unsigned long long)tmp
+ blockoff
);
462 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
463 * readpage, writepage and so on
466 static int ufs_getfrag_block(struct inode
*inode
, sector_t fragment
, struct buffer_head
*bh_result
, int create
)
468 struct super_block
* sb
= inode
->i_sb
;
469 struct ufs_sb_info
* sbi
= UFS_SB(sb
);
470 struct ufs_sb_private_info
* uspi
= sbi
->s_uspi
;
471 struct buffer_head
* bh
;
473 unsigned long ptr
,phys
;
477 phys64
= ufs_frag_map(inode
, fragment
);
478 UFSD("phys64 = %llu\n", (unsigned long long)phys64
);
480 map_bh(bh_result
, sb
, phys64
);
484 /* This code entered only while writing ....? */
491 mutex_lock(&UFS_I(inode
)->truncate_mutex
);
493 UFSD("ENTER, ino %lu, fragment %llu\n", inode
->i_ino
, (unsigned long long)fragment
);
495 ((UFS_NDADDR
+ uspi
->s_apb
+ uspi
->s_2apb
+ uspi
->s_3apb
)
496 << uspi
->s_fpbshift
))
503 * ok, these macros clean the logic up a bit and make
504 * it much more readable:
506 #define GET_INODE_DATABLOCK(x) \
507 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\
509 #define GET_INODE_PTR(x) \
510 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\
512 #define GET_INDIRECT_DATABLOCK(x) \
513 ufs_inode_getblock(inode, bh, x, fragment, \
514 &err, &phys, &new, bh_result->b_page)
515 #define GET_INDIRECT_PTR(x) \
516 ufs_inode_getblock(inode, bh, x, fragment, \
517 &err, NULL, NULL, NULL)
519 if (ptr
< UFS_NDIR_FRAGMENT
) {
520 bh
= GET_INODE_DATABLOCK(ptr
);
523 ptr
-= UFS_NDIR_FRAGMENT
;
524 if (ptr
< (1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
))) {
525 bh
= GET_INODE_PTR(UFS_IND_FRAGMENT
+ (ptr
>> uspi
->s_apbshift
));
528 ptr
-= 1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
);
529 if (ptr
< (1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
))) {
530 bh
= GET_INODE_PTR(UFS_DIND_FRAGMENT
+ (ptr
>> uspi
->s_2apbshift
));
533 ptr
-= 1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
);
534 bh
= GET_INODE_PTR(UFS_TIND_FRAGMENT
+ (ptr
>> uspi
->s_3apbshift
));
535 bh
= GET_INDIRECT_PTR((ptr
>> uspi
->s_2apbshift
) & uspi
->s_apbmask
);
537 bh
= GET_INDIRECT_PTR((ptr
>> uspi
->s_apbshift
) & uspi
->s_apbmask
);
539 bh
= GET_INDIRECT_DATABLOCK(ptr
& uspi
->s_apbmask
);
541 #undef GET_INODE_DATABLOCK
543 #undef GET_INDIRECT_DATABLOCK
544 #undef GET_INDIRECT_PTR
550 set_buffer_new(bh_result
);
551 map_bh(bh_result
, sb
, phys
);
553 mutex_unlock(&UFS_I(inode
)->truncate_mutex
);
558 ufs_warning(sb
, "ufs_get_block", "block > big");
562 static int ufs_writepage(struct page
*page
, struct writeback_control
*wbc
)
564 return block_write_full_page(page
,ufs_getfrag_block
,wbc
);
567 static int ufs_readpage(struct file
*file
, struct page
*page
)
569 return block_read_full_page(page
,ufs_getfrag_block
);
572 int ufs_prepare_chunk(struct page
*page
, loff_t pos
, unsigned len
)
574 return __block_write_begin(page
, pos
, len
, ufs_getfrag_block
);
577 static void ufs_truncate_blocks(struct inode
*);
579 static void ufs_write_failed(struct address_space
*mapping
, loff_t to
)
581 struct inode
*inode
= mapping
->host
;
583 if (to
> inode
->i_size
) {
584 truncate_pagecache(inode
, inode
->i_size
);
585 ufs_truncate_blocks(inode
);
589 static int ufs_write_begin(struct file
*file
, struct address_space
*mapping
,
590 loff_t pos
, unsigned len
, unsigned flags
,
591 struct page
**pagep
, void **fsdata
)
595 ret
= block_write_begin(mapping
, pos
, len
, flags
, pagep
,
598 ufs_write_failed(mapping
, pos
+ len
);
603 static int ufs_write_end(struct file
*file
, struct address_space
*mapping
,
604 loff_t pos
, unsigned len
, unsigned copied
,
605 struct page
*page
, void *fsdata
)
609 ret
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
611 ufs_write_failed(mapping
, pos
+ len
);
615 static sector_t
ufs_bmap(struct address_space
*mapping
, sector_t block
)
617 return generic_block_bmap(mapping
,block
,ufs_getfrag_block
);
620 const struct address_space_operations ufs_aops
= {
621 .readpage
= ufs_readpage
,
622 .writepage
= ufs_writepage
,
623 .write_begin
= ufs_write_begin
,
624 .write_end
= ufs_write_end
,
628 static void ufs_set_inode_ops(struct inode
*inode
)
630 if (S_ISREG(inode
->i_mode
)) {
631 inode
->i_op
= &ufs_file_inode_operations
;
632 inode
->i_fop
= &ufs_file_operations
;
633 inode
->i_mapping
->a_ops
= &ufs_aops
;
634 } else if (S_ISDIR(inode
->i_mode
)) {
635 inode
->i_op
= &ufs_dir_inode_operations
;
636 inode
->i_fop
= &ufs_dir_operations
;
637 inode
->i_mapping
->a_ops
= &ufs_aops
;
638 } else if (S_ISLNK(inode
->i_mode
)) {
639 if (!inode
->i_blocks
) {
640 inode
->i_op
= &ufs_fast_symlink_inode_operations
;
641 inode
->i_link
= (char *)UFS_I(inode
)->i_u1
.i_symlink
;
643 inode
->i_op
= &ufs_symlink_inode_operations
;
644 inode
->i_mapping
->a_ops
= &ufs_aops
;
647 init_special_inode(inode
, inode
->i_mode
,
648 ufs_get_inode_dev(inode
->i_sb
, UFS_I(inode
)));
651 static int ufs1_read_inode(struct inode
*inode
, struct ufs_inode
*ufs_inode
)
653 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
654 struct super_block
*sb
= inode
->i_sb
;
658 * Copy data to the in-core inode.
660 inode
->i_mode
= mode
= fs16_to_cpu(sb
, ufs_inode
->ui_mode
);
661 set_nlink(inode
, fs16_to_cpu(sb
, ufs_inode
->ui_nlink
));
662 if (inode
->i_nlink
== 0) {
663 ufs_error (sb
, "ufs_read_inode", "inode %lu has zero nlink\n", inode
->i_ino
);
668 * Linux now has 32-bit uid and gid, so we can support EFT.
670 i_uid_write(inode
, ufs_get_inode_uid(sb
, ufs_inode
));
671 i_gid_write(inode
, ufs_get_inode_gid(sb
, ufs_inode
));
673 inode
->i_size
= fs64_to_cpu(sb
, ufs_inode
->ui_size
);
674 inode
->i_atime
.tv_sec
= fs32_to_cpu(sb
, ufs_inode
->ui_atime
.tv_sec
);
675 inode
->i_ctime
.tv_sec
= fs32_to_cpu(sb
, ufs_inode
->ui_ctime
.tv_sec
);
676 inode
->i_mtime
.tv_sec
= fs32_to_cpu(sb
, ufs_inode
->ui_mtime
.tv_sec
);
677 inode
->i_mtime
.tv_nsec
= 0;
678 inode
->i_atime
.tv_nsec
= 0;
679 inode
->i_ctime
.tv_nsec
= 0;
680 inode
->i_blocks
= fs32_to_cpu(sb
, ufs_inode
->ui_blocks
);
681 inode
->i_generation
= fs32_to_cpu(sb
, ufs_inode
->ui_gen
);
682 ufsi
->i_flags
= fs32_to_cpu(sb
, ufs_inode
->ui_flags
);
683 ufsi
->i_shadow
= fs32_to_cpu(sb
, ufs_inode
->ui_u3
.ui_sun
.ui_shadow
);
684 ufsi
->i_oeftflag
= fs32_to_cpu(sb
, ufs_inode
->ui_u3
.ui_sun
.ui_oeftflag
);
687 if (S_ISCHR(mode
) || S_ISBLK(mode
) || inode
->i_blocks
) {
688 memcpy(ufsi
->i_u1
.i_data
, &ufs_inode
->ui_u2
.ui_addr
,
689 sizeof(ufs_inode
->ui_u2
.ui_addr
));
691 memcpy(ufsi
->i_u1
.i_symlink
, ufs_inode
->ui_u2
.ui_symlink
,
692 sizeof(ufs_inode
->ui_u2
.ui_symlink
) - 1);
693 ufsi
->i_u1
.i_symlink
[sizeof(ufs_inode
->ui_u2
.ui_symlink
) - 1] = 0;
698 static int ufs2_read_inode(struct inode
*inode
, struct ufs2_inode
*ufs2_inode
)
700 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
701 struct super_block
*sb
= inode
->i_sb
;
704 UFSD("Reading ufs2 inode, ino %lu\n", inode
->i_ino
);
706 * Copy data to the in-core inode.
708 inode
->i_mode
= mode
= fs16_to_cpu(sb
, ufs2_inode
->ui_mode
);
709 set_nlink(inode
, fs16_to_cpu(sb
, ufs2_inode
->ui_nlink
));
710 if (inode
->i_nlink
== 0) {
711 ufs_error (sb
, "ufs_read_inode", "inode %lu has zero nlink\n", inode
->i_ino
);
716 * Linux now has 32-bit uid and gid, so we can support EFT.
718 i_uid_write(inode
, fs32_to_cpu(sb
, ufs2_inode
->ui_uid
));
719 i_gid_write(inode
, fs32_to_cpu(sb
, ufs2_inode
->ui_gid
));
721 inode
->i_size
= fs64_to_cpu(sb
, ufs2_inode
->ui_size
);
722 inode
->i_atime
.tv_sec
= fs64_to_cpu(sb
, ufs2_inode
->ui_atime
);
723 inode
->i_ctime
.tv_sec
= fs64_to_cpu(sb
, ufs2_inode
->ui_ctime
);
724 inode
->i_mtime
.tv_sec
= fs64_to_cpu(sb
, ufs2_inode
->ui_mtime
);
725 inode
->i_atime
.tv_nsec
= fs32_to_cpu(sb
, ufs2_inode
->ui_atimensec
);
726 inode
->i_ctime
.tv_nsec
= fs32_to_cpu(sb
, ufs2_inode
->ui_ctimensec
);
727 inode
->i_mtime
.tv_nsec
= fs32_to_cpu(sb
, ufs2_inode
->ui_mtimensec
);
728 inode
->i_blocks
= fs64_to_cpu(sb
, ufs2_inode
->ui_blocks
);
729 inode
->i_generation
= fs32_to_cpu(sb
, ufs2_inode
->ui_gen
);
730 ufsi
->i_flags
= fs32_to_cpu(sb
, ufs2_inode
->ui_flags
);
732 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
733 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
736 if (S_ISCHR(mode
) || S_ISBLK(mode
) || inode
->i_blocks
) {
737 memcpy(ufsi
->i_u1
.u2_i_data
, &ufs2_inode
->ui_u2
.ui_addr
,
738 sizeof(ufs2_inode
->ui_u2
.ui_addr
));
740 memcpy(ufsi
->i_u1
.i_symlink
, ufs2_inode
->ui_u2
.ui_symlink
,
741 sizeof(ufs2_inode
->ui_u2
.ui_symlink
) - 1);
742 ufsi
->i_u1
.i_symlink
[sizeof(ufs2_inode
->ui_u2
.ui_symlink
) - 1] = 0;
747 struct inode
*ufs_iget(struct super_block
*sb
, unsigned long ino
)
749 struct ufs_inode_info
*ufsi
;
750 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
751 struct buffer_head
* bh
;
755 UFSD("ENTER, ino %lu\n", ino
);
757 if (ino
< UFS_ROOTINO
|| ino
> (uspi
->s_ncg
* uspi
->s_ipg
)) {
758 ufs_warning(sb
, "ufs_read_inode", "bad inode number (%lu)\n",
760 return ERR_PTR(-EIO
);
763 inode
= iget_locked(sb
, ino
);
765 return ERR_PTR(-ENOMEM
);
766 if (!(inode
->i_state
& I_NEW
))
771 bh
= sb_bread(sb
, uspi
->s_sbbase
+ ufs_inotofsba(inode
->i_ino
));
773 ufs_warning(sb
, "ufs_read_inode", "unable to read inode %lu\n",
777 if ((UFS_SB(sb
)->s_flags
& UFS_TYPE_MASK
) == UFS_TYPE_UFS2
) {
778 struct ufs2_inode
*ufs2_inode
= (struct ufs2_inode
*)bh
->b_data
;
780 err
= ufs2_read_inode(inode
,
781 ufs2_inode
+ ufs_inotofsbo(inode
->i_ino
));
783 struct ufs_inode
*ufs_inode
= (struct ufs_inode
*)bh
->b_data
;
785 err
= ufs1_read_inode(inode
,
786 ufs_inode
+ ufs_inotofsbo(inode
->i_ino
));
793 (inode
->i_size
+ uspi
->s_fsize
- 1) >> uspi
->s_fshift
;
794 ufsi
->i_dir_start_lookup
= 0;
797 ufs_set_inode_ops(inode
);
802 unlock_new_inode(inode
);
807 return ERR_PTR(-EIO
);
810 static void ufs1_update_inode(struct inode
*inode
, struct ufs_inode
*ufs_inode
)
812 struct super_block
*sb
= inode
->i_sb
;
813 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
815 ufs_inode
->ui_mode
= cpu_to_fs16(sb
, inode
->i_mode
);
816 ufs_inode
->ui_nlink
= cpu_to_fs16(sb
, inode
->i_nlink
);
818 ufs_set_inode_uid(sb
, ufs_inode
, i_uid_read(inode
));
819 ufs_set_inode_gid(sb
, ufs_inode
, i_gid_read(inode
));
821 ufs_inode
->ui_size
= cpu_to_fs64(sb
, inode
->i_size
);
822 ufs_inode
->ui_atime
.tv_sec
= cpu_to_fs32(sb
, inode
->i_atime
.tv_sec
);
823 ufs_inode
->ui_atime
.tv_usec
= 0;
824 ufs_inode
->ui_ctime
.tv_sec
= cpu_to_fs32(sb
, inode
->i_ctime
.tv_sec
);
825 ufs_inode
->ui_ctime
.tv_usec
= 0;
826 ufs_inode
->ui_mtime
.tv_sec
= cpu_to_fs32(sb
, inode
->i_mtime
.tv_sec
);
827 ufs_inode
->ui_mtime
.tv_usec
= 0;
828 ufs_inode
->ui_blocks
= cpu_to_fs32(sb
, inode
->i_blocks
);
829 ufs_inode
->ui_flags
= cpu_to_fs32(sb
, ufsi
->i_flags
);
830 ufs_inode
->ui_gen
= cpu_to_fs32(sb
, inode
->i_generation
);
832 if ((UFS_SB(sb
)->s_flags
& UFS_UID_MASK
) == UFS_UID_EFT
) {
833 ufs_inode
->ui_u3
.ui_sun
.ui_shadow
= cpu_to_fs32(sb
, ufsi
->i_shadow
);
834 ufs_inode
->ui_u3
.ui_sun
.ui_oeftflag
= cpu_to_fs32(sb
, ufsi
->i_oeftflag
);
837 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
838 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
839 ufs_inode
->ui_u2
.ui_addr
.ui_db
[0] = ufsi
->i_u1
.i_data
[0];
840 } else if (inode
->i_blocks
) {
841 memcpy(&ufs_inode
->ui_u2
.ui_addr
, ufsi
->i_u1
.i_data
,
842 sizeof(ufs_inode
->ui_u2
.ui_addr
));
845 memcpy(&ufs_inode
->ui_u2
.ui_symlink
, ufsi
->i_u1
.i_symlink
,
846 sizeof(ufs_inode
->ui_u2
.ui_symlink
));
850 memset (ufs_inode
, 0, sizeof(struct ufs_inode
));
853 static void ufs2_update_inode(struct inode
*inode
, struct ufs2_inode
*ufs_inode
)
855 struct super_block
*sb
= inode
->i_sb
;
856 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
859 ufs_inode
->ui_mode
= cpu_to_fs16(sb
, inode
->i_mode
);
860 ufs_inode
->ui_nlink
= cpu_to_fs16(sb
, inode
->i_nlink
);
862 ufs_inode
->ui_uid
= cpu_to_fs32(sb
, i_uid_read(inode
));
863 ufs_inode
->ui_gid
= cpu_to_fs32(sb
, i_gid_read(inode
));
865 ufs_inode
->ui_size
= cpu_to_fs64(sb
, inode
->i_size
);
866 ufs_inode
->ui_atime
= cpu_to_fs64(sb
, inode
->i_atime
.tv_sec
);
867 ufs_inode
->ui_atimensec
= cpu_to_fs32(sb
, inode
->i_atime
.tv_nsec
);
868 ufs_inode
->ui_ctime
= cpu_to_fs64(sb
, inode
->i_ctime
.tv_sec
);
869 ufs_inode
->ui_ctimensec
= cpu_to_fs32(sb
, inode
->i_ctime
.tv_nsec
);
870 ufs_inode
->ui_mtime
= cpu_to_fs64(sb
, inode
->i_mtime
.tv_sec
);
871 ufs_inode
->ui_mtimensec
= cpu_to_fs32(sb
, inode
->i_mtime
.tv_nsec
);
873 ufs_inode
->ui_blocks
= cpu_to_fs64(sb
, inode
->i_blocks
);
874 ufs_inode
->ui_flags
= cpu_to_fs32(sb
, ufsi
->i_flags
);
875 ufs_inode
->ui_gen
= cpu_to_fs32(sb
, inode
->i_generation
);
877 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
878 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
879 ufs_inode
->ui_u2
.ui_addr
.ui_db
[0] = ufsi
->i_u1
.u2_i_data
[0];
880 } else if (inode
->i_blocks
) {
881 memcpy(&ufs_inode
->ui_u2
.ui_addr
, ufsi
->i_u1
.u2_i_data
,
882 sizeof(ufs_inode
->ui_u2
.ui_addr
));
884 memcpy(&ufs_inode
->ui_u2
.ui_symlink
, ufsi
->i_u1
.i_symlink
,
885 sizeof(ufs_inode
->ui_u2
.ui_symlink
));
889 memset (ufs_inode
, 0, sizeof(struct ufs2_inode
));
893 static int ufs_update_inode(struct inode
* inode
, int do_sync
)
895 struct super_block
*sb
= inode
->i_sb
;
896 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
897 struct buffer_head
* bh
;
899 UFSD("ENTER, ino %lu\n", inode
->i_ino
);
901 if (inode
->i_ino
< UFS_ROOTINO
||
902 inode
->i_ino
> (uspi
->s_ncg
* uspi
->s_ipg
)) {
903 ufs_warning (sb
, "ufs_read_inode", "bad inode number (%lu)\n", inode
->i_ino
);
907 bh
= sb_bread(sb
, ufs_inotofsba(inode
->i_ino
));
909 ufs_warning (sb
, "ufs_read_inode", "unable to read inode %lu\n", inode
->i_ino
);
912 if (uspi
->fs_magic
== UFS2_MAGIC
) {
913 struct ufs2_inode
*ufs2_inode
= (struct ufs2_inode
*)bh
->b_data
;
915 ufs2_update_inode(inode
,
916 ufs2_inode
+ ufs_inotofsbo(inode
->i_ino
));
918 struct ufs_inode
*ufs_inode
= (struct ufs_inode
*) bh
->b_data
;
920 ufs1_update_inode(inode
, ufs_inode
+ ufs_inotofsbo(inode
->i_ino
));
923 mark_buffer_dirty(bh
);
925 sync_dirty_buffer(bh
);
932 int ufs_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
934 return ufs_update_inode(inode
, wbc
->sync_mode
== WB_SYNC_ALL
);
937 int ufs_sync_inode (struct inode
*inode
)
939 return ufs_update_inode (inode
, 1);
942 void ufs_evict_inode(struct inode
* inode
)
946 if (!inode
->i_nlink
&& !is_bad_inode(inode
))
949 truncate_inode_pages_final(&inode
->i_data
);
953 ufs_truncate_blocks(inode
);
956 invalidate_inode_buffers(inode
);
960 ufs_free_inode(inode
);
969 static inline void free_data(struct to_free
*ctx
, u64 from
, unsigned count
)
971 if (ctx
->count
&& ctx
->to
!= from
) {
972 ufs_free_blocks(ctx
->inode
, ctx
->to
- ctx
->count
, ctx
->count
);
976 ctx
->to
= from
+ count
;
979 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
980 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
982 static void ufs_trunc_direct(struct inode
*inode
)
984 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
985 struct super_block
* sb
;
986 struct ufs_sb_private_info
* uspi
;
988 u64 frag1
, frag2
, frag3
, frag4
, block1
, block2
;
989 struct to_free ctx
= {.inode
= inode
};
992 UFSD("ENTER: ino %lu\n", inode
->i_ino
);
995 uspi
= UFS_SB(sb
)->s_uspi
;
997 frag1
= DIRECT_FRAGMENT
;
998 frag4
= min_t(u64
, UFS_NDIR_FRAGMENT
, ufsi
->i_lastfrag
);
999 frag2
= ((frag1
& uspi
->s_fpbmask
) ? ((frag1
| uspi
->s_fpbmask
) + 1) : frag1
);
1000 frag3
= frag4
& ~uspi
->s_fpbmask
;
1001 block1
= block2
= 0;
1002 if (frag2
> frag3
) {
1005 } else if (frag2
< frag3
) {
1006 block1
= ufs_fragstoblks (frag2
);
1007 block2
= ufs_fragstoblks (frag3
);
1010 UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
1011 " frag3 %llu, frag4 %llu\n", inode
->i_ino
,
1012 (unsigned long long)frag1
, (unsigned long long)frag2
,
1013 (unsigned long long)block1
, (unsigned long long)block2
,
1014 (unsigned long long)frag3
, (unsigned long long)frag4
);
1020 * Free first free fragments
1022 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, ufs_fragstoblks(frag1
));
1023 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
1025 ufs_panic (sb
, "ufs_trunc_direct", "internal error");
1027 frag1
= ufs_fragnum (frag1
);
1029 ufs_free_fragments(inode
, tmp
+ frag1
, frag2
);
1035 for (i
= block1
; i
< block2
; i
++) {
1036 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, i
);
1037 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
1040 write_seqlock(&ufsi
->meta_lock
);
1041 ufs_data_ptr_clear(uspi
, p
);
1042 write_sequnlock(&ufsi
->meta_lock
);
1044 free_data(&ctx
, tmp
, uspi
->s_fpb
);
1047 free_data(&ctx
, 0, 0);
1053 * Free last free fragments
1055 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, ufs_fragstoblks(frag3
));
1056 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
1058 ufs_panic(sb
, "ufs_truncate_direct", "internal error");
1059 frag4
= ufs_fragnum (frag4
);
1060 write_seqlock(&ufsi
->meta_lock
);
1061 ufs_data_ptr_clear(uspi
, p
);
1062 write_sequnlock(&ufsi
->meta_lock
);
1064 ufs_free_fragments (inode
, tmp
, frag4
);
1067 UFSD("EXIT: ino %lu\n", inode
->i_ino
);
1070 static void free_full_branch(struct inode
*inode
, u64 ind_block
, int depth
)
1072 struct super_block
*sb
= inode
->i_sb
;
1073 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
1074 struct ufs_buffer_head
*ubh
= ubh_bread(sb
, ind_block
, uspi
->s_bsize
);
1081 for (i
= 0; i
< uspi
->s_apb
; i
++) {
1082 void *p
= ubh_get_data_ptr(uspi
, ubh
, i
);
1083 u64 block
= ufs_data_ptr_to_cpu(sb
, p
);
1085 free_full_branch(inode
, block
, depth
);
1088 struct to_free ctx
= {.inode
= inode
};
1090 for (i
= 0; i
< uspi
->s_apb
; i
++) {
1091 void *p
= ubh_get_data_ptr(uspi
, ubh
, i
);
1092 u64 block
= ufs_data_ptr_to_cpu(sb
, p
);
1094 free_data(&ctx
, block
, uspi
->s_fpb
);
1096 free_data(&ctx
, 0, 0);
1100 ufs_free_blocks(inode
, ind_block
, uspi
->s_fpb
);
1103 static void free_branch_tail(struct inode
*inode
, unsigned from
, struct ufs_buffer_head
*ubh
, int depth
)
1105 struct super_block
*sb
= inode
->i_sb
;
1106 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
1110 for (i
= from
; i
< uspi
->s_apb
; i
++) {
1111 void *p
= ubh_get_data_ptr(uspi
, ubh
, i
);
1112 u64 block
= ufs_data_ptr_to_cpu(sb
, p
);
1114 write_seqlock(&UFS_I(inode
)->meta_lock
);
1115 ufs_data_ptr_clear(uspi
, p
);
1116 write_sequnlock(&UFS_I(inode
)->meta_lock
);
1117 ubh_mark_buffer_dirty(ubh
);
1118 free_full_branch(inode
, block
, depth
);
1122 struct to_free ctx
= {.inode
= inode
};
1124 for (i
= from
; i
< uspi
->s_apb
; i
++) {
1125 void *p
= ubh_get_data_ptr(uspi
, ubh
, i
);
1126 u64 block
= ufs_data_ptr_to_cpu(sb
, p
);
1128 write_seqlock(&UFS_I(inode
)->meta_lock
);
1129 ufs_data_ptr_clear(uspi
, p
);
1130 write_sequnlock(&UFS_I(inode
)->meta_lock
);
1131 ubh_mark_buffer_dirty(ubh
);
1132 free_data(&ctx
, block
, uspi
->s_fpb
);
1135 free_data(&ctx
, 0, 0);
1137 if (IS_SYNC(inode
) && ubh_buffer_dirty(ubh
))
1138 ubh_sync_block(ubh
);
1142 static int ufs_alloc_lastblock(struct inode
*inode
, loff_t size
)
1145 struct super_block
*sb
= inode
->i_sb
;
1146 struct address_space
*mapping
= inode
->i_mapping
;
1147 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
1150 struct page
*lastpage
;
1151 struct buffer_head
*bh
;
1154 lastfrag
= (size
+ uspi
->s_fsize
- 1) >> uspi
->s_fshift
;
1161 lastpage
= ufs_get_locked_page(mapping
, lastfrag
>>
1162 (PAGE_CACHE_SHIFT
- inode
->i_blkbits
));
1163 if (IS_ERR(lastpage
)) {
1168 end
= lastfrag
& ((1 << (PAGE_CACHE_SHIFT
- inode
->i_blkbits
)) - 1);
1169 bh
= page_buffers(lastpage
);
1170 for (i
= 0; i
< end
; ++i
)
1171 bh
= bh
->b_this_page
;
1174 err
= ufs_getfrag_block(inode
, lastfrag
, bh
, 1);
1179 if (buffer_new(bh
)) {
1180 clear_buffer_new(bh
);
1181 unmap_underlying_metadata(bh
->b_bdev
,
1184 * we do not zeroize fragment, because of
1185 * if it maped to hole, it already contains zeroes
1187 set_buffer_uptodate(bh
);
1188 mark_buffer_dirty(bh
);
1189 set_page_dirty(lastpage
);
1192 if (lastfrag
>= UFS_IND_FRAGMENT
) {
1193 end
= uspi
->s_fpb
- ufs_fragnum(lastfrag
) - 1;
1194 phys64
= bh
->b_blocknr
+ 1;
1195 for (i
= 0; i
< end
; ++i
) {
1196 bh
= sb_getblk(sb
, i
+ phys64
);
1198 memset(bh
->b_data
, 0, sb
->s_blocksize
);
1199 set_buffer_uptodate(bh
);
1200 mark_buffer_dirty(bh
);
1202 sync_dirty_buffer(bh
);
1207 ufs_put_locked_page(lastpage
);
1212 static void __ufs_truncate_blocks(struct inode
*inode
)
1214 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
1215 struct super_block
*sb
= inode
->i_sb
;
1216 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
1217 unsigned offsets
[4];
1218 int depth
= ufs_block_to_path(inode
, DIRECT_BLOCK
, offsets
);
1221 struct ufs_buffer_head
*ubh
[3];
1228 /* find the last non-zero in offsets[] */
1229 for (depth2
= depth
- 1; depth2
; depth2
--)
1230 if (offsets
[depth2
])
1233 mutex_lock(&ufsi
->truncate_mutex
);
1235 ufs_trunc_direct(inode
);
1236 offsets
[0] = UFS_IND_BLOCK
;
1238 /* get the blocks that should be partially emptied */
1239 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, offsets
[0]);
1240 for (i
= 0; i
< depth2
; i
++) {
1241 offsets
[i
]++; /* next branch is fully freed */
1242 block
= ufs_data_ptr_to_cpu(sb
, p
);
1245 ubh
[i
] = ubh_bread(sb
, block
, uspi
->s_bsize
);
1247 write_seqlock(&ufsi
->meta_lock
);
1248 ufs_data_ptr_clear(uspi
, p
);
1249 write_sequnlock(&ufsi
->meta_lock
);
1252 p
= ubh_get_data_ptr(uspi
, ubh
[i
], offsets
[i
+ 1]);
1255 ubh_mark_buffer_dirty(ubh
[i
]);
1256 free_branch_tail(inode
, offsets
[i
+ 1], ubh
[i
], depth
- i
- 1);
1259 for (i
= offsets
[0]; i
<= UFS_TIND_BLOCK
; i
++) {
1260 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, i
);
1261 block
= ufs_data_ptr_to_cpu(sb
, p
);
1263 write_seqlock(&ufsi
->meta_lock
);
1264 ufs_data_ptr_clear(uspi
, p
);
1265 write_sequnlock(&ufsi
->meta_lock
);
1266 free_full_branch(inode
, block
, i
- UFS_IND_BLOCK
+ 1);
1269 ufsi
->i_lastfrag
= DIRECT_FRAGMENT
;
1270 mark_inode_dirty(inode
);
1271 mutex_unlock(&ufsi
->truncate_mutex
);
1274 static int ufs_truncate(struct inode
*inode
, loff_t size
)
1278 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1279 inode
->i_ino
, (unsigned long long)size
,
1280 (unsigned long long)i_size_read(inode
));
1282 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1283 S_ISLNK(inode
->i_mode
)))
1285 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
1288 err
= ufs_alloc_lastblock(inode
, size
);
1293 block_truncate_page(inode
->i_mapping
, size
, ufs_getfrag_block
);
1295 truncate_setsize(inode
, size
);
1297 __ufs_truncate_blocks(inode
);
1298 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME_SEC
;
1299 mark_inode_dirty(inode
);
1301 UFSD("EXIT: err %d\n", err
);
1305 void ufs_truncate_blocks(struct inode
*inode
)
1307 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1308 S_ISLNK(inode
->i_mode
)))
1310 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
1312 __ufs_truncate_blocks(inode
);
1315 int ufs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
1317 struct inode
*inode
= d_inode(dentry
);
1318 unsigned int ia_valid
= attr
->ia_valid
;
1321 error
= inode_change_ok(inode
, attr
);
1325 if (ia_valid
& ATTR_SIZE
&& attr
->ia_size
!= inode
->i_size
) {
1326 error
= ufs_truncate(inode
, attr
->ia_size
);
1331 setattr_copy(inode
, attr
);
1332 mark_inode_dirty(inode
);
1336 const struct inode_operations ufs_file_inode_operations
= {
1337 .setattr
= ufs_setattr
,