]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ufs/inode.c
ufs_extend_tail(): fix the braino in calling conventions of ufs_new_fragments()
[mirror_ubuntu-artful-kernel.git] / fs / ufs / inode.c
1 /*
2 * linux/fs/ufs/inode.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 *
8 * from
9 *
10 * linux/fs/ext2/inode.c
11 *
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
16 *
17 * from
18 *
19 * linux/fs/minix/inode.c
20 *
21 * Copyright (C) 1991, 1992 Linus Torvalds
22 *
23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
26 */
27
28 #include <linux/uaccess.h>
29
30 #include <linux/errno.h>
31 #include <linux/fs.h>
32 #include <linux/time.h>
33 #include <linux/stat.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38
39 #include "ufs_fs.h"
40 #include "ufs.h"
41 #include "swab.h"
42 #include "util.h"
43
44 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
45 {
46 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
47 int ptrs = uspi->s_apb;
48 int ptrs_bits = uspi->s_apbshift;
49 const long direct_blocks = UFS_NDADDR,
50 indirect_blocks = ptrs,
51 double_blocks = (1 << (ptrs_bits * 2));
52 int n = 0;
53
54
55 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
56 if (i_block < direct_blocks) {
57 offsets[n++] = i_block;
58 } else if ((i_block -= direct_blocks) < indirect_blocks) {
59 offsets[n++] = UFS_IND_BLOCK;
60 offsets[n++] = i_block;
61 } else if ((i_block -= indirect_blocks) < double_blocks) {
62 offsets[n++] = UFS_DIND_BLOCK;
63 offsets[n++] = i_block >> ptrs_bits;
64 offsets[n++] = i_block & (ptrs - 1);
65 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
66 offsets[n++] = UFS_TIND_BLOCK;
67 offsets[n++] = i_block >> (ptrs_bits * 2);
68 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
69 offsets[n++] = i_block & (ptrs - 1);
70 } else {
71 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
72 }
73 return n;
74 }
75
76 typedef struct {
77 void *p;
78 union {
79 __fs32 key32;
80 __fs64 key64;
81 };
82 struct buffer_head *bh;
83 } Indirect;
84
85 static inline int grow_chain32(struct ufs_inode_info *ufsi,
86 struct buffer_head *bh, __fs32 *v,
87 Indirect *from, Indirect *to)
88 {
89 Indirect *p;
90 unsigned seq;
91 to->bh = bh;
92 do {
93 seq = read_seqbegin(&ufsi->meta_lock);
94 to->key32 = *(__fs32 *)(to->p = v);
95 for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
96 ;
97 } while (read_seqretry(&ufsi->meta_lock, seq));
98 return (p > to);
99 }
100
101 static inline int grow_chain64(struct ufs_inode_info *ufsi,
102 struct buffer_head *bh, __fs64 *v,
103 Indirect *from, Indirect *to)
104 {
105 Indirect *p;
106 unsigned seq;
107 to->bh = bh;
108 do {
109 seq = read_seqbegin(&ufsi->meta_lock);
110 to->key64 = *(__fs64 *)(to->p = v);
111 for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
112 ;
113 } while (read_seqretry(&ufsi->meta_lock, seq));
114 return (p > to);
115 }
116
117 /*
118 * Returns the location of the fragment from
119 * the beginning of the filesystem.
120 */
121
122 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
123 {
124 struct ufs_inode_info *ufsi = UFS_I(inode);
125 struct super_block *sb = inode->i_sb;
126 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
127 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
128 int shift = uspi->s_apbshift-uspi->s_fpbshift;
129 Indirect chain[4], *q = chain;
130 unsigned *p;
131 unsigned flags = UFS_SB(sb)->s_flags;
132 u64 res = 0;
133
134 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
135 uspi->s_fpbshift, uspi->s_apbmask,
136 (unsigned long long)mask);
137
138 if (depth == 0)
139 goto no_block;
140
141 again:
142 p = offsets;
143
144 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
145 goto ufs2;
146
147 if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
148 goto changed;
149 if (!q->key32)
150 goto no_block;
151 while (--depth) {
152 __fs32 *ptr;
153 struct buffer_head *bh;
154 unsigned n = *p++;
155
156 bh = sb_bread(sb, uspi->s_sbbase +
157 fs32_to_cpu(sb, q->key32) + (n>>shift));
158 if (!bh)
159 goto no_block;
160 ptr = (__fs32 *)bh->b_data + (n & mask);
161 if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
162 goto changed;
163 if (!q->key32)
164 goto no_block;
165 }
166 res = fs32_to_cpu(sb, q->key32);
167 goto found;
168
169 ufs2:
170 if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
171 goto changed;
172 if (!q->key64)
173 goto no_block;
174
175 while (--depth) {
176 __fs64 *ptr;
177 struct buffer_head *bh;
178 unsigned n = *p++;
179
180 bh = sb_bread(sb, uspi->s_sbbase +
181 fs64_to_cpu(sb, q->key64) + (n>>shift));
182 if (!bh)
183 goto no_block;
184 ptr = (__fs64 *)bh->b_data + (n & mask);
185 if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
186 goto changed;
187 if (!q->key64)
188 goto no_block;
189 }
190 res = fs64_to_cpu(sb, q->key64);
191 found:
192 res += uspi->s_sbbase;
193 no_block:
194 while (q > chain) {
195 brelse(q->bh);
196 q--;
197 }
198 return res;
199
200 changed:
201 while (q > chain) {
202 brelse(q->bh);
203 q--;
204 }
205 goto again;
206 }
207
208 /*
209 * Unpacking tails: we have a file with partial final block and
210 * we had been asked to extend it. If the fragment being written
211 * is within the same block, we need to extend the tail just to cover
212 * that fragment. Otherwise the tail is extended to full block.
213 *
214 * Note that we might need to create a _new_ tail, but that will
215 * be handled elsewhere; this is strictly for resizing old
216 * ones.
217 */
218 static bool
219 ufs_extend_tail(struct inode *inode, u64 writes_to,
220 int *err, struct page *locked_page)
221 {
222 struct ufs_inode_info *ufsi = UFS_I(inode);
223 struct super_block *sb = inode->i_sb;
224 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
225 unsigned lastfrag = ufsi->i_lastfrag; /* it's a short file, so unsigned is enough */
226 unsigned block = ufs_fragstoblks(lastfrag);
227 unsigned new_size;
228 void *p;
229 u64 tmp;
230
231 if (writes_to < (lastfrag | uspi->s_fpbmask))
232 new_size = (writes_to & uspi->s_fpbmask) + 1;
233 else
234 new_size = uspi->s_fpb;
235
236 p = ufs_get_direct_data_ptr(uspi, ufsi, block);
237 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
238 new_size - (lastfrag & uspi->s_fpbmask), err,
239 locked_page);
240 return tmp != 0;
241 }
242
243 /**
244 * ufs_inode_getfrag() - allocate new fragment(s)
245 * @inode: pointer to inode
246 * @index: number of block pointer within the inode's array.
247 * @new_fragment: number of new allocated fragment(s)
248 * @err: we set it if something wrong
249 * @new: we set it if we allocate new block
250 * @locked_page: for ufs_new_fragments()
251 */
252 static u64
253 ufs_inode_getfrag(struct inode *inode, unsigned index,
254 sector_t new_fragment, int *err,
255 int *new, struct page *locked_page)
256 {
257 struct ufs_inode_info *ufsi = UFS_I(inode);
258 struct super_block *sb = inode->i_sb;
259 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
260 u64 tmp, goal, lastfrag;
261 unsigned nfrags = uspi->s_fpb;
262 void *p;
263
264 /* TODO : to be done for write support
265 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
266 goto ufs2;
267 */
268
269 p = ufs_get_direct_data_ptr(uspi, ufsi, index);
270 tmp = ufs_data_ptr_to_cpu(sb, p);
271 if (tmp)
272 goto out;
273
274 lastfrag = ufsi->i_lastfrag;
275
276 /* will that be a new tail? */
277 if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
278 nfrags = (new_fragment & uspi->s_fpbmask) + 1;
279
280 goal = 0;
281 if (index) {
282 goal = ufs_data_ptr_to_cpu(sb,
283 ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
284 if (goal)
285 goal += uspi->s_fpb;
286 }
287 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
288 goal, nfrags, err, locked_page);
289
290 if (!tmp) {
291 *err = -ENOSPC;
292 return 0;
293 }
294
295 if (new)
296 *new = 1;
297 inode->i_ctime = current_time(inode);
298 if (IS_SYNC(inode))
299 ufs_sync_inode (inode);
300 mark_inode_dirty(inode);
301 out:
302 return tmp + uspi->s_sbbase;
303
304 /* This part : To be implemented ....
305 Required only for writing, not required for READ-ONLY.
306 ufs2:
307
308 u2_block = ufs_fragstoblks(fragment);
309 u2_blockoff = ufs_fragnum(fragment);
310 p = ufsi->i_u1.u2_i_data + block;
311 goal = 0;
312
313 repeat2:
314 tmp = fs32_to_cpu(sb, *p);
315 lastfrag = ufsi->i_lastfrag;
316
317 */
318 }
319
320 /**
321 * ufs_inode_getblock() - allocate new block
322 * @inode: pointer to inode
323 * @ind_block: block number of the indirect block
324 * @index: number of pointer within the indirect block
325 * @new_fragment: number of new allocated fragment
326 * (block will hold this fragment and also uspi->s_fpb-1)
327 * @err: see ufs_inode_getfrag()
328 * @new: see ufs_inode_getfrag()
329 * @locked_page: see ufs_inode_getfrag()
330 */
331 static u64
332 ufs_inode_getblock(struct inode *inode, u64 ind_block,
333 unsigned index, sector_t new_fragment, int *err,
334 int *new, struct page *locked_page)
335 {
336 struct super_block *sb = inode->i_sb;
337 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
338 int shift = uspi->s_apbshift - uspi->s_fpbshift;
339 u64 tmp = 0, goal;
340 struct buffer_head *bh;
341 void *p;
342
343 if (!ind_block)
344 return 0;
345
346 bh = sb_bread(sb, ind_block + (index >> shift));
347 if (unlikely(!bh)) {
348 *err = -EIO;
349 return 0;
350 }
351
352 index &= uspi->s_apbmask >> uspi->s_fpbshift;
353 if (uspi->fs_magic == UFS2_MAGIC)
354 p = (__fs64 *)bh->b_data + index;
355 else
356 p = (__fs32 *)bh->b_data + index;
357
358 tmp = ufs_data_ptr_to_cpu(sb, p);
359 if (tmp)
360 goto out;
361
362 if (index && (uspi->fs_magic == UFS2_MAGIC ?
363 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
364 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
365 goal = tmp + uspi->s_fpb;
366 else
367 goal = bh->b_blocknr + uspi->s_fpb;
368 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
369 uspi->s_fpb, err, locked_page);
370 if (!tmp)
371 goto out;
372
373 if (new)
374 *new = 1;
375
376 mark_buffer_dirty(bh);
377 if (IS_SYNC(inode))
378 sync_dirty_buffer(bh);
379 inode->i_ctime = current_time(inode);
380 mark_inode_dirty(inode);
381 out:
382 brelse (bh);
383 UFSD("EXIT\n");
384 if (tmp)
385 tmp += uspi->s_sbbase;
386 return tmp;
387 }
388
389 /**
390 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
391 * readpage, writepage and so on
392 */
393
394 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
395 {
396 struct super_block *sb = inode->i_sb;
397 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
398 int err = 0, new = 0;
399 unsigned offsets[4];
400 int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
401 u64 phys64 = 0;
402 unsigned frag = fragment & uspi->s_fpbmask;
403
404 if (!create) {
405 phys64 = ufs_frag_map(inode, offsets, depth);
406 goto out;
407 }
408
409 /* This code entered only while writing ....? */
410
411 mutex_lock(&UFS_I(inode)->truncate_mutex);
412
413 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
414 if (unlikely(!depth)) {
415 ufs_warning(sb, "ufs_get_block", "block > big");
416 err = -EIO;
417 goto out;
418 }
419
420 if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
421 unsigned lastfrag = UFS_I(inode)->i_lastfrag;
422 unsigned tailfrags = lastfrag & uspi->s_fpbmask;
423 if (tailfrags && fragment >= lastfrag) {
424 if (!ufs_extend_tail(inode, fragment,
425 &err, bh_result->b_page))
426 goto out;
427 }
428 }
429
430 if (depth == 1) {
431 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
432 &err, &new, bh_result->b_page);
433 } else {
434 int i;
435 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
436 &err, NULL, NULL);
437 for (i = 1; i < depth - 1; i++)
438 phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
439 fragment, &err, NULL, NULL);
440 phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
441 fragment, &err, &new, bh_result->b_page);
442 }
443 out:
444 if (phys64) {
445 phys64 += frag;
446 map_bh(bh_result, sb, phys64);
447 if (new)
448 set_buffer_new(bh_result);
449 }
450 mutex_unlock(&UFS_I(inode)->truncate_mutex);
451 return err;
452 }
453
454 static int ufs_writepage(struct page *page, struct writeback_control *wbc)
455 {
456 return block_write_full_page(page,ufs_getfrag_block,wbc);
457 }
458
459 static int ufs_readpage(struct file *file, struct page *page)
460 {
461 return block_read_full_page(page,ufs_getfrag_block);
462 }
463
464 int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
465 {
466 return __block_write_begin(page, pos, len, ufs_getfrag_block);
467 }
468
469 static void ufs_truncate_blocks(struct inode *);
470
471 static void ufs_write_failed(struct address_space *mapping, loff_t to)
472 {
473 struct inode *inode = mapping->host;
474
475 if (to > inode->i_size) {
476 truncate_pagecache(inode, inode->i_size);
477 ufs_truncate_blocks(inode);
478 }
479 }
480
481 static int ufs_write_begin(struct file *file, struct address_space *mapping,
482 loff_t pos, unsigned len, unsigned flags,
483 struct page **pagep, void **fsdata)
484 {
485 int ret;
486
487 ret = block_write_begin(mapping, pos, len, flags, pagep,
488 ufs_getfrag_block);
489 if (unlikely(ret))
490 ufs_write_failed(mapping, pos + len);
491
492 return ret;
493 }
494
495 static int ufs_write_end(struct file *file, struct address_space *mapping,
496 loff_t pos, unsigned len, unsigned copied,
497 struct page *page, void *fsdata)
498 {
499 int ret;
500
501 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
502 if (ret < len)
503 ufs_write_failed(mapping, pos + len);
504 return ret;
505 }
506
507 static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
508 {
509 return generic_block_bmap(mapping,block,ufs_getfrag_block);
510 }
511
512 const struct address_space_operations ufs_aops = {
513 .readpage = ufs_readpage,
514 .writepage = ufs_writepage,
515 .write_begin = ufs_write_begin,
516 .write_end = ufs_write_end,
517 .bmap = ufs_bmap
518 };
519
520 static void ufs_set_inode_ops(struct inode *inode)
521 {
522 if (S_ISREG(inode->i_mode)) {
523 inode->i_op = &ufs_file_inode_operations;
524 inode->i_fop = &ufs_file_operations;
525 inode->i_mapping->a_ops = &ufs_aops;
526 } else if (S_ISDIR(inode->i_mode)) {
527 inode->i_op = &ufs_dir_inode_operations;
528 inode->i_fop = &ufs_dir_operations;
529 inode->i_mapping->a_ops = &ufs_aops;
530 } else if (S_ISLNK(inode->i_mode)) {
531 if (!inode->i_blocks) {
532 inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
533 inode->i_op = &simple_symlink_inode_operations;
534 } else {
535 inode->i_mapping->a_ops = &ufs_aops;
536 inode->i_op = &page_symlink_inode_operations;
537 inode_nohighmem(inode);
538 }
539 } else
540 init_special_inode(inode, inode->i_mode,
541 ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
542 }
543
544 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
545 {
546 struct ufs_inode_info *ufsi = UFS_I(inode);
547 struct super_block *sb = inode->i_sb;
548 umode_t mode;
549
550 /*
551 * Copy data to the in-core inode.
552 */
553 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
554 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
555 if (inode->i_nlink == 0) {
556 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
557 return -1;
558 }
559
560 /*
561 * Linux now has 32-bit uid and gid, so we can support EFT.
562 */
563 i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
564 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
565
566 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
567 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
568 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
569 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
570 inode->i_mtime.tv_nsec = 0;
571 inode->i_atime.tv_nsec = 0;
572 inode->i_ctime.tv_nsec = 0;
573 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
574 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
575 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
576 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
577 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
578
579
580 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
581 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
582 sizeof(ufs_inode->ui_u2.ui_addr));
583 } else {
584 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
585 sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
586 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
587 }
588 return 0;
589 }
590
591 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
592 {
593 struct ufs_inode_info *ufsi = UFS_I(inode);
594 struct super_block *sb = inode->i_sb;
595 umode_t mode;
596
597 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
598 /*
599 * Copy data to the in-core inode.
600 */
601 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
602 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
603 if (inode->i_nlink == 0) {
604 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
605 return -1;
606 }
607
608 /*
609 * Linux now has 32-bit uid and gid, so we can support EFT.
610 */
611 i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
612 i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
613
614 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
615 inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
616 inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
617 inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
618 inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
619 inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
620 inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
621 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
622 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
623 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
624 /*
625 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
626 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
627 */
628
629 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
630 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
631 sizeof(ufs2_inode->ui_u2.ui_addr));
632 } else {
633 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
634 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
635 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
636 }
637 return 0;
638 }
639
640 struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
641 {
642 struct ufs_inode_info *ufsi;
643 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
644 struct buffer_head * bh;
645 struct inode *inode;
646 int err;
647
648 UFSD("ENTER, ino %lu\n", ino);
649
650 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
651 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
652 ino);
653 return ERR_PTR(-EIO);
654 }
655
656 inode = iget_locked(sb, ino);
657 if (!inode)
658 return ERR_PTR(-ENOMEM);
659 if (!(inode->i_state & I_NEW))
660 return inode;
661
662 ufsi = UFS_I(inode);
663
664 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
665 if (!bh) {
666 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
667 inode->i_ino);
668 goto bad_inode;
669 }
670 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
671 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
672
673 err = ufs2_read_inode(inode,
674 ufs2_inode + ufs_inotofsbo(inode->i_ino));
675 } else {
676 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
677
678 err = ufs1_read_inode(inode,
679 ufs_inode + ufs_inotofsbo(inode->i_ino));
680 }
681
682 if (err)
683 goto bad_inode;
684 inode->i_version++;
685 ufsi->i_lastfrag =
686 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
687 ufsi->i_dir_start_lookup = 0;
688 ufsi->i_osync = 0;
689
690 ufs_set_inode_ops(inode);
691
692 brelse(bh);
693
694 UFSD("EXIT\n");
695 unlock_new_inode(inode);
696 return inode;
697
698 bad_inode:
699 iget_failed(inode);
700 return ERR_PTR(-EIO);
701 }
702
703 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
704 {
705 struct super_block *sb = inode->i_sb;
706 struct ufs_inode_info *ufsi = UFS_I(inode);
707
708 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
709 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
710
711 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
712 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
713
714 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
715 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
716 ufs_inode->ui_atime.tv_usec = 0;
717 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
718 ufs_inode->ui_ctime.tv_usec = 0;
719 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
720 ufs_inode->ui_mtime.tv_usec = 0;
721 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
722 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
723 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
724
725 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
726 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
727 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
728 }
729
730 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
731 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
732 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
733 } else if (inode->i_blocks) {
734 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
735 sizeof(ufs_inode->ui_u2.ui_addr));
736 }
737 else {
738 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
739 sizeof(ufs_inode->ui_u2.ui_symlink));
740 }
741
742 if (!inode->i_nlink)
743 memset (ufs_inode, 0, sizeof(struct ufs_inode));
744 }
745
746 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
747 {
748 struct super_block *sb = inode->i_sb;
749 struct ufs_inode_info *ufsi = UFS_I(inode);
750
751 UFSD("ENTER\n");
752 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
753 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
754
755 ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
756 ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
757
758 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
759 ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
760 ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
761 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
762 ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
763 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
764 ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
765
766 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
767 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
768 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
769
770 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
771 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
772 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
773 } else if (inode->i_blocks) {
774 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
775 sizeof(ufs_inode->ui_u2.ui_addr));
776 } else {
777 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
778 sizeof(ufs_inode->ui_u2.ui_symlink));
779 }
780
781 if (!inode->i_nlink)
782 memset (ufs_inode, 0, sizeof(struct ufs2_inode));
783 UFSD("EXIT\n");
784 }
785
786 static int ufs_update_inode(struct inode * inode, int do_sync)
787 {
788 struct super_block *sb = inode->i_sb;
789 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
790 struct buffer_head * bh;
791
792 UFSD("ENTER, ino %lu\n", inode->i_ino);
793
794 if (inode->i_ino < UFS_ROOTINO ||
795 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
796 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
797 return -1;
798 }
799
800 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
801 if (!bh) {
802 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
803 return -1;
804 }
805 if (uspi->fs_magic == UFS2_MAGIC) {
806 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
807
808 ufs2_update_inode(inode,
809 ufs2_inode + ufs_inotofsbo(inode->i_ino));
810 } else {
811 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
812
813 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
814 }
815
816 mark_buffer_dirty(bh);
817 if (do_sync)
818 sync_dirty_buffer(bh);
819 brelse (bh);
820
821 UFSD("EXIT\n");
822 return 0;
823 }
824
825 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
826 {
827 return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
828 }
829
830 int ufs_sync_inode (struct inode *inode)
831 {
832 return ufs_update_inode (inode, 1);
833 }
834
835 void ufs_evict_inode(struct inode * inode)
836 {
837 int want_delete = 0;
838
839 if (!inode->i_nlink && !is_bad_inode(inode))
840 want_delete = 1;
841
842 truncate_inode_pages_final(&inode->i_data);
843 if (want_delete) {
844 inode->i_size = 0;
845 if (inode->i_blocks)
846 ufs_truncate_blocks(inode);
847 }
848
849 invalidate_inode_buffers(inode);
850 clear_inode(inode);
851
852 if (want_delete)
853 ufs_free_inode(inode);
854 }
855
856 struct to_free {
857 struct inode *inode;
858 u64 to;
859 unsigned count;
860 };
861
862 static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
863 {
864 if (ctx->count && ctx->to != from) {
865 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
866 ctx->count = 0;
867 }
868 ctx->count += count;
869 ctx->to = from + count;
870 }
871
872 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
873 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
874
875 static void ufs_trunc_direct(struct inode *inode)
876 {
877 struct ufs_inode_info *ufsi = UFS_I(inode);
878 struct super_block * sb;
879 struct ufs_sb_private_info * uspi;
880 void *p;
881 u64 frag1, frag2, frag3, frag4, block1, block2;
882 struct to_free ctx = {.inode = inode};
883 unsigned i, tmp;
884
885 UFSD("ENTER: ino %lu\n", inode->i_ino);
886
887 sb = inode->i_sb;
888 uspi = UFS_SB(sb)->s_uspi;
889
890 frag1 = DIRECT_FRAGMENT;
891 frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
892 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
893 frag3 = frag4 & ~uspi->s_fpbmask;
894 block1 = block2 = 0;
895 if (frag2 > frag3) {
896 frag2 = frag4;
897 frag3 = frag4 = 0;
898 } else if (frag2 < frag3) {
899 block1 = ufs_fragstoblks (frag2);
900 block2 = ufs_fragstoblks (frag3);
901 }
902
903 UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
904 " frag3 %llu, frag4 %llu\n", inode->i_ino,
905 (unsigned long long)frag1, (unsigned long long)frag2,
906 (unsigned long long)block1, (unsigned long long)block2,
907 (unsigned long long)frag3, (unsigned long long)frag4);
908
909 if (frag1 >= frag2)
910 goto next1;
911
912 /*
913 * Free first free fragments
914 */
915 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
916 tmp = ufs_data_ptr_to_cpu(sb, p);
917 if (!tmp )
918 ufs_panic (sb, "ufs_trunc_direct", "internal error");
919 frag2 -= frag1;
920 frag1 = ufs_fragnum (frag1);
921
922 ufs_free_fragments(inode, tmp + frag1, frag2);
923
924 next1:
925 /*
926 * Free whole blocks
927 */
928 for (i = block1 ; i < block2; i++) {
929 p = ufs_get_direct_data_ptr(uspi, ufsi, i);
930 tmp = ufs_data_ptr_to_cpu(sb, p);
931 if (!tmp)
932 continue;
933 write_seqlock(&ufsi->meta_lock);
934 ufs_data_ptr_clear(uspi, p);
935 write_sequnlock(&ufsi->meta_lock);
936
937 free_data(&ctx, tmp, uspi->s_fpb);
938 }
939
940 free_data(&ctx, 0, 0);
941
942 if (frag3 >= frag4)
943 goto next3;
944
945 /*
946 * Free last free fragments
947 */
948 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
949 tmp = ufs_data_ptr_to_cpu(sb, p);
950 if (!tmp )
951 ufs_panic(sb, "ufs_truncate_direct", "internal error");
952 frag4 = ufs_fragnum (frag4);
953 write_seqlock(&ufsi->meta_lock);
954 ufs_data_ptr_clear(uspi, p);
955 write_sequnlock(&ufsi->meta_lock);
956
957 ufs_free_fragments (inode, tmp, frag4);
958 next3:
959
960 UFSD("EXIT: ino %lu\n", inode->i_ino);
961 }
962
963 static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
964 {
965 struct super_block *sb = inode->i_sb;
966 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
967 struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
968 unsigned i;
969
970 if (!ubh)
971 return;
972
973 if (--depth) {
974 for (i = 0; i < uspi->s_apb; i++) {
975 void *p = ubh_get_data_ptr(uspi, ubh, i);
976 u64 block = ufs_data_ptr_to_cpu(sb, p);
977 if (block)
978 free_full_branch(inode, block, depth);
979 }
980 } else {
981 struct to_free ctx = {.inode = inode};
982
983 for (i = 0; i < uspi->s_apb; i++) {
984 void *p = ubh_get_data_ptr(uspi, ubh, i);
985 u64 block = ufs_data_ptr_to_cpu(sb, p);
986 if (block)
987 free_data(&ctx, block, uspi->s_fpb);
988 }
989 free_data(&ctx, 0, 0);
990 }
991
992 ubh_bforget(ubh);
993 ufs_free_blocks(inode, ind_block, uspi->s_fpb);
994 }
995
996 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
997 {
998 struct super_block *sb = inode->i_sb;
999 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1000 unsigned i;
1001
1002 if (--depth) {
1003 for (i = from; i < uspi->s_apb ; i++) {
1004 void *p = ubh_get_data_ptr(uspi, ubh, i);
1005 u64 block = ufs_data_ptr_to_cpu(sb, p);
1006 if (block) {
1007 write_seqlock(&UFS_I(inode)->meta_lock);
1008 ufs_data_ptr_clear(uspi, p);
1009 write_sequnlock(&UFS_I(inode)->meta_lock);
1010 ubh_mark_buffer_dirty(ubh);
1011 free_full_branch(inode, block, depth);
1012 }
1013 }
1014 } else {
1015 struct to_free ctx = {.inode = inode};
1016
1017 for (i = from; i < uspi->s_apb; i++) {
1018 void *p = ubh_get_data_ptr(uspi, ubh, i);
1019 u64 block = ufs_data_ptr_to_cpu(sb, p);
1020 if (block) {
1021 write_seqlock(&UFS_I(inode)->meta_lock);
1022 ufs_data_ptr_clear(uspi, p);
1023 write_sequnlock(&UFS_I(inode)->meta_lock);
1024 ubh_mark_buffer_dirty(ubh);
1025 free_data(&ctx, block, uspi->s_fpb);
1026 }
1027 }
1028 free_data(&ctx, 0, 0);
1029 }
1030 if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1031 ubh_sync_block(ubh);
1032 ubh_brelse(ubh);
1033 }
1034
1035 static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1036 {
1037 int err = 0;
1038 struct super_block *sb = inode->i_sb;
1039 struct address_space *mapping = inode->i_mapping;
1040 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1041 unsigned i, end;
1042 sector_t lastfrag;
1043 struct page *lastpage;
1044 struct buffer_head *bh;
1045 u64 phys64;
1046
1047 lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1048
1049 if (!lastfrag)
1050 goto out;
1051
1052 lastfrag--;
1053
1054 lastpage = ufs_get_locked_page(mapping, lastfrag >>
1055 (PAGE_SHIFT - inode->i_blkbits));
1056 if (IS_ERR(lastpage)) {
1057 err = -EIO;
1058 goto out;
1059 }
1060
1061 end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1062 bh = page_buffers(lastpage);
1063 for (i = 0; i < end; ++i)
1064 bh = bh->b_this_page;
1065
1066
1067 err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1068
1069 if (unlikely(err))
1070 goto out_unlock;
1071
1072 if (buffer_new(bh)) {
1073 clear_buffer_new(bh);
1074 clean_bdev_bh_alias(bh);
1075 /*
1076 * we do not zeroize fragment, because of
1077 * if it maped to hole, it already contains zeroes
1078 */
1079 set_buffer_uptodate(bh);
1080 mark_buffer_dirty(bh);
1081 set_page_dirty(lastpage);
1082 }
1083
1084 if (lastfrag >= UFS_IND_FRAGMENT) {
1085 end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1086 phys64 = bh->b_blocknr + 1;
1087 for (i = 0; i < end; ++i) {
1088 bh = sb_getblk(sb, i + phys64);
1089 lock_buffer(bh);
1090 memset(bh->b_data, 0, sb->s_blocksize);
1091 set_buffer_uptodate(bh);
1092 mark_buffer_dirty(bh);
1093 unlock_buffer(bh);
1094 sync_dirty_buffer(bh);
1095 brelse(bh);
1096 }
1097 }
1098 out_unlock:
1099 ufs_put_locked_page(lastpage);
1100 out:
1101 return err;
1102 }
1103
1104 static void __ufs_truncate_blocks(struct inode *inode)
1105 {
1106 struct ufs_inode_info *ufsi = UFS_I(inode);
1107 struct super_block *sb = inode->i_sb;
1108 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1109 unsigned offsets[4];
1110 int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
1111 int depth2;
1112 unsigned i;
1113 struct ufs_buffer_head *ubh[3];
1114 void *p;
1115 u64 block;
1116
1117 if (!depth)
1118 return;
1119
1120 /* find the last non-zero in offsets[] */
1121 for (depth2 = depth - 1; depth2; depth2--)
1122 if (offsets[depth2])
1123 break;
1124
1125 mutex_lock(&ufsi->truncate_mutex);
1126 if (depth == 1) {
1127 ufs_trunc_direct(inode);
1128 offsets[0] = UFS_IND_BLOCK;
1129 } else {
1130 /* get the blocks that should be partially emptied */
1131 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
1132 for (i = 0; i < depth2; i++) {
1133 offsets[i]++; /* next branch is fully freed */
1134 block = ufs_data_ptr_to_cpu(sb, p);
1135 if (!block)
1136 break;
1137 ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1138 if (!ubh[i]) {
1139 write_seqlock(&ufsi->meta_lock);
1140 ufs_data_ptr_clear(uspi, p);
1141 write_sequnlock(&ufsi->meta_lock);
1142 break;
1143 }
1144 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
1145 }
1146 while (i--)
1147 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1148 }
1149 for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1150 p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1151 block = ufs_data_ptr_to_cpu(sb, p);
1152 if (block) {
1153 write_seqlock(&ufsi->meta_lock);
1154 ufs_data_ptr_clear(uspi, p);
1155 write_sequnlock(&ufsi->meta_lock);
1156 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1157 }
1158 }
1159 ufsi->i_lastfrag = DIRECT_FRAGMENT;
1160 mark_inode_dirty(inode);
1161 mutex_unlock(&ufsi->truncate_mutex);
1162 }
1163
1164 static int ufs_truncate(struct inode *inode, loff_t size)
1165 {
1166 int err = 0;
1167
1168 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1169 inode->i_ino, (unsigned long long)size,
1170 (unsigned long long)i_size_read(inode));
1171
1172 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1173 S_ISLNK(inode->i_mode)))
1174 return -EINVAL;
1175 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1176 return -EPERM;
1177
1178 err = ufs_alloc_lastblock(inode, size);
1179
1180 if (err)
1181 goto out;
1182
1183 block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1184
1185 truncate_setsize(inode, size);
1186
1187 __ufs_truncate_blocks(inode);
1188 inode->i_mtime = inode->i_ctime = current_time(inode);
1189 mark_inode_dirty(inode);
1190 out:
1191 UFSD("EXIT: err %d\n", err);
1192 return err;
1193 }
1194
1195 static void ufs_truncate_blocks(struct inode *inode)
1196 {
1197 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1198 S_ISLNK(inode->i_mode)))
1199 return;
1200 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1201 return;
1202 __ufs_truncate_blocks(inode);
1203 }
1204
1205 int ufs_setattr(struct dentry *dentry, struct iattr *attr)
1206 {
1207 struct inode *inode = d_inode(dentry);
1208 unsigned int ia_valid = attr->ia_valid;
1209 int error;
1210
1211 error = setattr_prepare(dentry, attr);
1212 if (error)
1213 return error;
1214
1215 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1216 error = ufs_truncate(inode, attr->ia_size);
1217 if (error)
1218 return error;
1219 }
1220
1221 setattr_copy(inode, attr);
1222 mark_inode_dirty(inode);
1223 return 0;
1224 }
1225
1226 const struct inode_operations ufs_file_inode_operations = {
1227 .setattr = ufs_setattr,
1228 };