]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/reiserfs/inode.c
Merge remote-tracking branches 'asoc/topic/rt5514', 'asoc/topic/rt5640', 'asoc/topic...
[mirror_ubuntu-bionic-kernel.git] / fs / reiserfs / inode.c
1 /*
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */
4
5 #include <linux/time.h>
6 #include <linux/fs.h>
7 #include "reiserfs.h"
8 #include "acl.h"
9 #include "xattr.h"
10 #include <linux/exportfs.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/uaccess.h>
15 #include <asm/unaligned.h>
16 #include <linux/buffer_head.h>
17 #include <linux/mpage.h>
18 #include <linux/writeback.h>
19 #include <linux/quotaops.h>
20 #include <linux/swap.h>
21 #include <linux/uio.h>
22 #include <linux/bio.h>
23
24 int reiserfs_commit_write(struct file *f, struct page *page,
25 unsigned from, unsigned to);
26
27 void reiserfs_evict_inode(struct inode *inode)
28 {
29 /*
30 * We need blocks for transaction + (user+group) quota
31 * update (possibly delete)
32 */
33 int jbegin_count =
34 JOURNAL_PER_BALANCE_CNT * 2 +
35 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
36 struct reiserfs_transaction_handle th;
37 int err;
38
39 if (!inode->i_nlink && !is_bad_inode(inode))
40 dquot_initialize(inode);
41
42 truncate_inode_pages_final(&inode->i_data);
43 if (inode->i_nlink)
44 goto no_delete;
45
46 /*
47 * The = 0 happens when we abort creating a new inode
48 * for some reason like lack of space..
49 * also handles bad_inode case
50 */
51 if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) {
52
53 reiserfs_delete_xattrs(inode);
54
55 reiserfs_write_lock(inode->i_sb);
56
57 if (journal_begin(&th, inode->i_sb, jbegin_count))
58 goto out;
59 reiserfs_update_inode_transaction(inode);
60
61 reiserfs_discard_prealloc(&th, inode);
62
63 err = reiserfs_delete_object(&th, inode);
64
65 /*
66 * Do quota update inside a transaction for journaled quotas.
67 * We must do that after delete_object so that quota updates
68 * go into the same transaction as stat data deletion
69 */
70 if (!err) {
71 int depth = reiserfs_write_unlock_nested(inode->i_sb);
72 dquot_free_inode(inode);
73 reiserfs_write_lock_nested(inode->i_sb, depth);
74 }
75
76 if (journal_end(&th))
77 goto out;
78
79 /*
80 * check return value from reiserfs_delete_object after
81 * ending the transaction
82 */
83 if (err)
84 goto out;
85
86 /*
87 * all items of file are deleted, so we can remove
88 * "save" link
89 * we can't do anything about an error here
90 */
91 remove_save_link(inode, 0 /* not truncate */);
92 out:
93 reiserfs_write_unlock(inode->i_sb);
94 } else {
95 /* no object items are in the tree */
96 ;
97 }
98
99 /* note this must go after the journal_end to prevent deadlock */
100 clear_inode(inode);
101
102 dquot_drop(inode);
103 inode->i_blocks = 0;
104 return;
105
106 no_delete:
107 clear_inode(inode);
108 dquot_drop(inode);
109 }
110
111 static void _make_cpu_key(struct cpu_key *key, int version, __u32 dirid,
112 __u32 objectid, loff_t offset, int type, int length)
113 {
114 key->version = version;
115
116 key->on_disk_key.k_dir_id = dirid;
117 key->on_disk_key.k_objectid = objectid;
118 set_cpu_key_k_offset(key, offset);
119 set_cpu_key_k_type(key, type);
120 key->key_length = length;
121 }
122
123 /*
124 * take base of inode_key (it comes from inode always) (dirid, objectid)
125 * and version from an inode, set offset and type of key
126 */
127 void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset,
128 int type, int length)
129 {
130 _make_cpu_key(key, get_inode_item_key_version(inode),
131 le32_to_cpu(INODE_PKEY(inode)->k_dir_id),
132 le32_to_cpu(INODE_PKEY(inode)->k_objectid), offset, type,
133 length);
134 }
135
136 /* when key is 0, do not set version and short key */
137 inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
138 int version,
139 loff_t offset, int type, int length,
140 int entry_count /*or ih_free_space */ )
141 {
142 if (key) {
143 ih->ih_key.k_dir_id = cpu_to_le32(key->on_disk_key.k_dir_id);
144 ih->ih_key.k_objectid =
145 cpu_to_le32(key->on_disk_key.k_objectid);
146 }
147 put_ih_version(ih, version);
148 set_le_ih_k_offset(ih, offset);
149 set_le_ih_k_type(ih, type);
150 put_ih_item_len(ih, length);
151 /* set_ih_free_space (ih, 0); */
152 /*
153 * for directory items it is entry count, for directs and stat
154 * datas - 0xffff, for indirects - 0
155 */
156 put_ih_entry_count(ih, entry_count);
157 }
158
159 /*
160 * FIXME: we might cache recently accessed indirect item
161 * Ugh. Not too eager for that....
162 * I cut the code until such time as I see a convincing argument (benchmark).
163 * I don't want a bloated inode struct..., and I don't like code complexity....
164 */
165
166 /*
167 * cutting the code is fine, since it really isn't in use yet and is easy
168 * to add back in. But, Vladimir has a really good idea here. Think
169 * about what happens for reading a file. For each page,
170 * The VFS layer calls reiserfs_readpage, who searches the tree to find
171 * an indirect item. This indirect item has X number of pointers, where
172 * X is a big number if we've done the block allocation right. But,
173 * we only use one or two of these pointers during each call to readpage,
174 * needlessly researching again later on.
175 *
176 * The size of the cache could be dynamic based on the size of the file.
177 *
178 * I'd also like to see us cache the location the stat data item, since
179 * we are needlessly researching for that frequently.
180 *
181 * --chris
182 */
183
184 /*
185 * If this page has a file tail in it, and
186 * it was read in by get_block_create_0, the page data is valid,
187 * but tail is still sitting in a direct item, and we can't write to
188 * it. So, look through this page, and check all the mapped buffers
189 * to make sure they have valid block numbers. Any that don't need
190 * to be unmapped, so that __block_write_begin will correctly call
191 * reiserfs_get_block to convert the tail into an unformatted node
192 */
193 static inline void fix_tail_page_for_writing(struct page *page)
194 {
195 struct buffer_head *head, *next, *bh;
196
197 if (page && page_has_buffers(page)) {
198 head = page_buffers(page);
199 bh = head;
200 do {
201 next = bh->b_this_page;
202 if (buffer_mapped(bh) && bh->b_blocknr == 0) {
203 reiserfs_unmap_buffer(bh);
204 }
205 bh = next;
206 } while (bh != head);
207 }
208 }
209
210 /*
211 * reiserfs_get_block does not need to allocate a block only if it has been
212 * done already or non-hole position has been found in the indirect item
213 */
214 static inline int allocation_needed(int retval, b_blocknr_t allocated,
215 struct item_head *ih,
216 __le32 * item, int pos_in_item)
217 {
218 if (allocated)
219 return 0;
220 if (retval == POSITION_FOUND && is_indirect_le_ih(ih) &&
221 get_block_num(item, pos_in_item))
222 return 0;
223 return 1;
224 }
225
226 static inline int indirect_item_found(int retval, struct item_head *ih)
227 {
228 return (retval == POSITION_FOUND) && is_indirect_le_ih(ih);
229 }
230
231 static inline void set_block_dev_mapped(struct buffer_head *bh,
232 b_blocknr_t block, struct inode *inode)
233 {
234 map_bh(bh, inode->i_sb, block);
235 }
236
237 /*
238 * files which were created in the earlier version can not be longer,
239 * than 2 gb
240 */
241 static int file_capable(struct inode *inode, sector_t block)
242 {
243 /* it is new file. */
244 if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 ||
245 /* old file, but 'block' is inside of 2gb */
246 block < (1 << (31 - inode->i_sb->s_blocksize_bits)))
247 return 1;
248
249 return 0;
250 }
251
252 static int restart_transaction(struct reiserfs_transaction_handle *th,
253 struct inode *inode, struct treepath *path)
254 {
255 struct super_block *s = th->t_super;
256 int err;
257
258 BUG_ON(!th->t_trans_id);
259 BUG_ON(!th->t_refcount);
260
261 pathrelse(path);
262
263 /* we cannot restart while nested */
264 if (th->t_refcount > 1) {
265 return 0;
266 }
267 reiserfs_update_sd(th, inode);
268 err = journal_end(th);
269 if (!err) {
270 err = journal_begin(th, s, JOURNAL_PER_BALANCE_CNT * 6);
271 if (!err)
272 reiserfs_update_inode_transaction(inode);
273 }
274 return err;
275 }
276
277 /*
278 * it is called by get_block when create == 0. Returns block number
279 * for 'block'-th logical block of file. When it hits direct item it
280 * returns 0 (being called from bmap) or read direct item into piece
281 * of page (bh_result)
282 * Please improve the english/clarity in the comment above, as it is
283 * hard to understand.
284 */
285 static int _get_block_create_0(struct inode *inode, sector_t block,
286 struct buffer_head *bh_result, int args)
287 {
288 INITIALIZE_PATH(path);
289 struct cpu_key key;
290 struct buffer_head *bh;
291 struct item_head *ih, tmp_ih;
292 b_blocknr_t blocknr;
293 char *p = NULL;
294 int chars;
295 int ret;
296 int result;
297 int done = 0;
298 unsigned long offset;
299
300 /* prepare the key to look for the 'block'-th block of file */
301 make_cpu_key(&key, inode,
302 (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
303 3);
304
305 result = search_for_position_by_key(inode->i_sb, &key, &path);
306 if (result != POSITION_FOUND) {
307 pathrelse(&path);
308 if (p)
309 kunmap(bh_result->b_page);
310 if (result == IO_ERROR)
311 return -EIO;
312 /*
313 * We do not return -ENOENT if there is a hole but page is
314 * uptodate, because it means that there is some MMAPED data
315 * associated with it that is yet to be written to disk.
316 */
317 if ((args & GET_BLOCK_NO_HOLE)
318 && !PageUptodate(bh_result->b_page)) {
319 return -ENOENT;
320 }
321 return 0;
322 }
323
324 bh = get_last_bh(&path);
325 ih = tp_item_head(&path);
326 if (is_indirect_le_ih(ih)) {
327 __le32 *ind_item = (__le32 *) ih_item_body(bh, ih);
328
329 /*
330 * FIXME: here we could cache indirect item or part of it in
331 * the inode to avoid search_by_key in case of subsequent
332 * access to file
333 */
334 blocknr = get_block_num(ind_item, path.pos_in_item);
335 ret = 0;
336 if (blocknr) {
337 map_bh(bh_result, inode->i_sb, blocknr);
338 if (path.pos_in_item ==
339 ((ih_item_len(ih) / UNFM_P_SIZE) - 1)) {
340 set_buffer_boundary(bh_result);
341 }
342 } else
343 /*
344 * We do not return -ENOENT if there is a hole but
345 * page is uptodate, because it means that there is
346 * some MMAPED data associated with it that is
347 * yet to be written to disk.
348 */
349 if ((args & GET_BLOCK_NO_HOLE)
350 && !PageUptodate(bh_result->b_page)) {
351 ret = -ENOENT;
352 }
353
354 pathrelse(&path);
355 if (p)
356 kunmap(bh_result->b_page);
357 return ret;
358 }
359 /* requested data are in direct item(s) */
360 if (!(args & GET_BLOCK_READ_DIRECT)) {
361 /*
362 * we are called by bmap. FIXME: we can not map block of file
363 * when it is stored in direct item(s)
364 */
365 pathrelse(&path);
366 if (p)
367 kunmap(bh_result->b_page);
368 return -ENOENT;
369 }
370
371 /*
372 * if we've got a direct item, and the buffer or page was uptodate,
373 * we don't want to pull data off disk again. skip to the
374 * end, where we map the buffer and return
375 */
376 if (buffer_uptodate(bh_result)) {
377 goto finished;
378 } else
379 /*
380 * grab_tail_page can trigger calls to reiserfs_get_block on
381 * up to date pages without any buffers. If the page is up
382 * to date, we don't want read old data off disk. Set the up
383 * to date bit on the buffer instead and jump to the end
384 */
385 if (!bh_result->b_page || PageUptodate(bh_result->b_page)) {
386 set_buffer_uptodate(bh_result);
387 goto finished;
388 }
389 /* read file tail into part of page */
390 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1);
391 copy_item_head(&tmp_ih, ih);
392
393 /*
394 * we only want to kmap if we are reading the tail into the page.
395 * this is not the common case, so we don't kmap until we are
396 * sure we need to. But, this means the item might move if
397 * kmap schedules
398 */
399 if (!p)
400 p = (char *)kmap(bh_result->b_page);
401
402 p += offset;
403 memset(p, 0, inode->i_sb->s_blocksize);
404 do {
405 if (!is_direct_le_ih(ih)) {
406 BUG();
407 }
408 /*
409 * make sure we don't read more bytes than actually exist in
410 * the file. This can happen in odd cases where i_size isn't
411 * correct, and when direct item padding results in a few
412 * extra bytes at the end of the direct item
413 */
414 if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
415 break;
416 if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {
417 chars =
418 inode->i_size - (le_ih_k_offset(ih) - 1) -
419 path.pos_in_item;
420 done = 1;
421 } else {
422 chars = ih_item_len(ih) - path.pos_in_item;
423 }
424 memcpy(p, ih_item_body(bh, ih) + path.pos_in_item, chars);
425
426 if (done)
427 break;
428
429 p += chars;
430
431 /*
432 * we done, if read direct item is not the last item of
433 * node FIXME: we could try to check right delimiting key
434 * to see whether direct item continues in the right
435 * neighbor or rely on i_size
436 */
437 if (PATH_LAST_POSITION(&path) != (B_NR_ITEMS(bh) - 1))
438 break;
439
440 /* update key to look for the next piece */
441 set_cpu_key_k_offset(&key, cpu_key_k_offset(&key) + chars);
442 result = search_for_position_by_key(inode->i_sb, &key, &path);
443 if (result != POSITION_FOUND)
444 /* i/o error most likely */
445 break;
446 bh = get_last_bh(&path);
447 ih = tp_item_head(&path);
448 } while (1);
449
450 flush_dcache_page(bh_result->b_page);
451 kunmap(bh_result->b_page);
452
453 finished:
454 pathrelse(&path);
455
456 if (result == IO_ERROR)
457 return -EIO;
458
459 /*
460 * this buffer has valid data, but isn't valid for io. mapping it to
461 * block #0 tells the rest of reiserfs it just has a tail in it
462 */
463 map_bh(bh_result, inode->i_sb, 0);
464 set_buffer_uptodate(bh_result);
465 return 0;
466 }
467
468 /*
469 * this is called to create file map. So, _get_block_create_0 will not
470 * read direct item
471 */
472 static int reiserfs_bmap(struct inode *inode, sector_t block,
473 struct buffer_head *bh_result, int create)
474 {
475 if (!file_capable(inode, block))
476 return -EFBIG;
477
478 reiserfs_write_lock(inode->i_sb);
479 /* do not read the direct item */
480 _get_block_create_0(inode, block, bh_result, 0);
481 reiserfs_write_unlock(inode->i_sb);
482 return 0;
483 }
484
485 /*
486 * special version of get_block that is only used by grab_tail_page right
487 * now. It is sent to __block_write_begin, and when you try to get a
488 * block past the end of the file (or a block from a hole) it returns
489 * -ENOENT instead of a valid buffer. __block_write_begin expects to
490 * be able to do i/o on the buffers returned, unless an error value
491 * is also returned.
492 *
493 * So, this allows __block_write_begin to be used for reading a single block
494 * in a page. Where it does not produce a valid page for holes, or past the
495 * end of the file. This turns out to be exactly what we need for reading
496 * tails for conversion.
497 *
498 * The point of the wrapper is forcing a certain value for create, even
499 * though the VFS layer is calling this function with create==1. If you
500 * don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
501 * don't use this function.
502 */
503 static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
504 struct buffer_head *bh_result,
505 int create)
506 {
507 return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE);
508 }
509
510 /*
511 * This is special helper for reiserfs_get_block in case we are executing
512 * direct_IO request.
513 */
514 static int reiserfs_get_blocks_direct_io(struct inode *inode,
515 sector_t iblock,
516 struct buffer_head *bh_result,
517 int create)
518 {
519 int ret;
520
521 bh_result->b_page = NULL;
522
523 /*
524 * We set the b_size before reiserfs_get_block call since it is
525 * referenced in convert_tail_for_hole() that may be called from
526 * reiserfs_get_block()
527 */
528 bh_result->b_size = (1 << inode->i_blkbits);
529
530 ret = reiserfs_get_block(inode, iblock, bh_result,
531 create | GET_BLOCK_NO_DANGLE);
532 if (ret)
533 goto out;
534
535 /* don't allow direct io onto tail pages */
536 if (buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
537 /*
538 * make sure future calls to the direct io funcs for this
539 * offset in the file fail by unmapping the buffer
540 */
541 clear_buffer_mapped(bh_result);
542 ret = -EINVAL;
543 }
544
545 /*
546 * Possible unpacked tail. Flush the data before pages have
547 * disappeared
548 */
549 if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
550 int err;
551
552 reiserfs_write_lock(inode->i_sb);
553
554 err = reiserfs_commit_for_inode(inode);
555 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
556
557 reiserfs_write_unlock(inode->i_sb);
558
559 if (err < 0)
560 ret = err;
561 }
562 out:
563 return ret;
564 }
565
566 /*
567 * helper function for when reiserfs_get_block is called for a hole
568 * but the file tail is still in a direct item
569 * bh_result is the buffer head for the hole
570 * tail_offset is the offset of the start of the tail in the file
571 *
572 * This calls prepare_write, which will start a new transaction
573 * you should not be in a transaction, or have any paths held when you
574 * call this.
575 */
576 static int convert_tail_for_hole(struct inode *inode,
577 struct buffer_head *bh_result,
578 loff_t tail_offset)
579 {
580 unsigned long index;
581 unsigned long tail_end;
582 unsigned long tail_start;
583 struct page *tail_page;
584 struct page *hole_page = bh_result->b_page;
585 int retval = 0;
586
587 if ((tail_offset & (bh_result->b_size - 1)) != 1)
588 return -EIO;
589
590 /* always try to read until the end of the block */
591 tail_start = tail_offset & (PAGE_SIZE - 1);
592 tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
593
594 index = tail_offset >> PAGE_SHIFT;
595 /*
596 * hole_page can be zero in case of direct_io, we are sure
597 * that we cannot get here if we write with O_DIRECT into tail page
598 */
599 if (!hole_page || index != hole_page->index) {
600 tail_page = grab_cache_page(inode->i_mapping, index);
601 retval = -ENOMEM;
602 if (!tail_page) {
603 goto out;
604 }
605 } else {
606 tail_page = hole_page;
607 }
608
609 /*
610 * we don't have to make sure the conversion did not happen while
611 * we were locking the page because anyone that could convert
612 * must first take i_mutex.
613 *
614 * We must fix the tail page for writing because it might have buffers
615 * that are mapped, but have a block number of 0. This indicates tail
616 * data that has been read directly into the page, and
617 * __block_write_begin won't trigger a get_block in this case.
618 */
619 fix_tail_page_for_writing(tail_page);
620 retval = __reiserfs_write_begin(tail_page, tail_start,
621 tail_end - tail_start);
622 if (retval)
623 goto unlock;
624
625 /* tail conversion might change the data in the page */
626 flush_dcache_page(tail_page);
627
628 retval = reiserfs_commit_write(NULL, tail_page, tail_start, tail_end);
629
630 unlock:
631 if (tail_page != hole_page) {
632 unlock_page(tail_page);
633 put_page(tail_page);
634 }
635 out:
636 return retval;
637 }
638
639 static inline int _allocate_block(struct reiserfs_transaction_handle *th,
640 sector_t block,
641 struct inode *inode,
642 b_blocknr_t * allocated_block_nr,
643 struct treepath *path, int flags)
644 {
645 BUG_ON(!th->t_trans_id);
646
647 #ifdef REISERFS_PREALLOCATE
648 if (!(flags & GET_BLOCK_NO_IMUX)) {
649 return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr,
650 path, block);
651 }
652 #endif
653 return reiserfs_new_unf_blocknrs(th, inode, allocated_block_nr, path,
654 block);
655 }
656
657 int reiserfs_get_block(struct inode *inode, sector_t block,
658 struct buffer_head *bh_result, int create)
659 {
660 int repeat, retval = 0;
661 /* b_blocknr_t is (unsigned) 32 bit int*/
662 b_blocknr_t allocated_block_nr = 0;
663 INITIALIZE_PATH(path);
664 int pos_in_item;
665 struct cpu_key key;
666 struct buffer_head *bh, *unbh = NULL;
667 struct item_head *ih, tmp_ih;
668 __le32 *item;
669 int done;
670 int fs_gen;
671 struct reiserfs_transaction_handle *th = NULL;
672 /*
673 * space reserved in transaction batch:
674 * . 3 balancings in direct->indirect conversion
675 * . 1 block involved into reiserfs_update_sd()
676 * XXX in practically impossible worst case direct2indirect()
677 * can incur (much) more than 3 balancings.
678 * quota update for user, group
679 */
680 int jbegin_count =
681 JOURNAL_PER_BALANCE_CNT * 3 + 1 +
682 2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
683 int version;
684 int dangle = 1;
685 loff_t new_offset =
686 (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
687
688 reiserfs_write_lock(inode->i_sb);
689 version = get_inode_item_key_version(inode);
690
691 if (!file_capable(inode, block)) {
692 reiserfs_write_unlock(inode->i_sb);
693 return -EFBIG;
694 }
695
696 /*
697 * if !create, we aren't changing the FS, so we don't need to
698 * log anything, so we don't need to start a transaction
699 */
700 if (!(create & GET_BLOCK_CREATE)) {
701 int ret;
702 /* find number of block-th logical block of the file */
703 ret = _get_block_create_0(inode, block, bh_result,
704 create | GET_BLOCK_READ_DIRECT);
705 reiserfs_write_unlock(inode->i_sb);
706 return ret;
707 }
708
709 /*
710 * if we're already in a transaction, make sure to close
711 * any new transactions we start in this func
712 */
713 if ((create & GET_BLOCK_NO_DANGLE) ||
714 reiserfs_transaction_running(inode->i_sb))
715 dangle = 0;
716
717 /*
718 * If file is of such a size, that it might have a tail and
719 * tails are enabled we should mark it as possibly needing
720 * tail packing on close
721 */
722 if ((have_large_tails(inode->i_sb)
723 && inode->i_size < i_block_size(inode) * 4)
724 || (have_small_tails(inode->i_sb)
725 && inode->i_size < i_block_size(inode)))
726 REISERFS_I(inode)->i_flags |= i_pack_on_close_mask;
727
728 /* set the key of the first byte in the 'block'-th block of file */
729 make_cpu_key(&key, inode, new_offset, TYPE_ANY, 3 /*key length */ );
730 if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) {
731 start_trans:
732 th = reiserfs_persistent_transaction(inode->i_sb, jbegin_count);
733 if (!th) {
734 retval = -ENOMEM;
735 goto failure;
736 }
737 reiserfs_update_inode_transaction(inode);
738 }
739 research:
740
741 retval = search_for_position_by_key(inode->i_sb, &key, &path);
742 if (retval == IO_ERROR) {
743 retval = -EIO;
744 goto failure;
745 }
746
747 bh = get_last_bh(&path);
748 ih = tp_item_head(&path);
749 item = tp_item_body(&path);
750 pos_in_item = path.pos_in_item;
751
752 fs_gen = get_generation(inode->i_sb);
753 copy_item_head(&tmp_ih, ih);
754
755 if (allocation_needed
756 (retval, allocated_block_nr, ih, item, pos_in_item)) {
757 /* we have to allocate block for the unformatted node */
758 if (!th) {
759 pathrelse(&path);
760 goto start_trans;
761 }
762
763 repeat =
764 _allocate_block(th, block, inode, &allocated_block_nr,
765 &path, create);
766
767 /*
768 * restart the transaction to give the journal a chance to free
769 * some blocks. releases the path, so we have to go back to
770 * research if we succeed on the second try
771 */
772 if (repeat == NO_DISK_SPACE || repeat == QUOTA_EXCEEDED) {
773 SB_JOURNAL(inode->i_sb)->j_next_async_flush = 1;
774 retval = restart_transaction(th, inode, &path);
775 if (retval)
776 goto failure;
777 repeat =
778 _allocate_block(th, block, inode,
779 &allocated_block_nr, NULL, create);
780
781 if (repeat != NO_DISK_SPACE && repeat != QUOTA_EXCEEDED) {
782 goto research;
783 }
784 if (repeat == QUOTA_EXCEEDED)
785 retval = -EDQUOT;
786 else
787 retval = -ENOSPC;
788 goto failure;
789 }
790
791 if (fs_changed(fs_gen, inode->i_sb)
792 && item_moved(&tmp_ih, &path)) {
793 goto research;
794 }
795 }
796
797 if (indirect_item_found(retval, ih)) {
798 b_blocknr_t unfm_ptr;
799 /*
800 * 'block'-th block is in the file already (there is
801 * corresponding cell in some indirect item). But it may be
802 * zero unformatted node pointer (hole)
803 */
804 unfm_ptr = get_block_num(item, pos_in_item);
805 if (unfm_ptr == 0) {
806 /* use allocated block to plug the hole */
807 reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
808 if (fs_changed(fs_gen, inode->i_sb)
809 && item_moved(&tmp_ih, &path)) {
810 reiserfs_restore_prepared_buffer(inode->i_sb,
811 bh);
812 goto research;
813 }
814 set_buffer_new(bh_result);
815 if (buffer_dirty(bh_result)
816 && reiserfs_data_ordered(inode->i_sb))
817 reiserfs_add_ordered_list(inode, bh_result);
818 put_block_num(item, pos_in_item, allocated_block_nr);
819 unfm_ptr = allocated_block_nr;
820 journal_mark_dirty(th, bh);
821 reiserfs_update_sd(th, inode);
822 }
823 set_block_dev_mapped(bh_result, unfm_ptr, inode);
824 pathrelse(&path);
825 retval = 0;
826 if (!dangle && th)
827 retval = reiserfs_end_persistent_transaction(th);
828
829 reiserfs_write_unlock(inode->i_sb);
830
831 /*
832 * the item was found, so new blocks were not added to the file
833 * there is no need to make sure the inode is updated with this
834 * transaction
835 */
836 return retval;
837 }
838
839 if (!th) {
840 pathrelse(&path);
841 goto start_trans;
842 }
843
844 /*
845 * desired position is not found or is in the direct item. We have
846 * to append file with holes up to 'block'-th block converting
847 * direct items to indirect one if necessary
848 */
849 done = 0;
850 do {
851 if (is_statdata_le_ih(ih)) {
852 __le32 unp = 0;
853 struct cpu_key tmp_key;
854
855 /* indirect item has to be inserted */
856 make_le_item_head(&tmp_ih, &key, version, 1,
857 TYPE_INDIRECT, UNFM_P_SIZE,
858 0 /* free_space */ );
859
860 /*
861 * we are going to add 'block'-th block to the file.
862 * Use allocated block for that
863 */
864 if (cpu_key_k_offset(&key) == 1) {
865 unp = cpu_to_le32(allocated_block_nr);
866 set_block_dev_mapped(bh_result,
867 allocated_block_nr, inode);
868 set_buffer_new(bh_result);
869 done = 1;
870 }
871 tmp_key = key; /* ;) */
872 set_cpu_key_k_offset(&tmp_key, 1);
873 PATH_LAST_POSITION(&path)++;
874
875 retval =
876 reiserfs_insert_item(th, &path, &tmp_key, &tmp_ih,
877 inode, (char *)&unp);
878 if (retval) {
879 reiserfs_free_block(th, inode,
880 allocated_block_nr, 1);
881 /*
882 * retval == -ENOSPC, -EDQUOT or -EIO
883 * or -EEXIST
884 */
885 goto failure;
886 }
887 } else if (is_direct_le_ih(ih)) {
888 /* direct item has to be converted */
889 loff_t tail_offset;
890
891 tail_offset =
892 ((le_ih_k_offset(ih) -
893 1) & ~(inode->i_sb->s_blocksize - 1)) + 1;
894
895 /*
896 * direct item we just found fits into block we have
897 * to map. Convert it into unformatted node: use
898 * bh_result for the conversion
899 */
900 if (tail_offset == cpu_key_k_offset(&key)) {
901 set_block_dev_mapped(bh_result,
902 allocated_block_nr, inode);
903 unbh = bh_result;
904 done = 1;
905 } else {
906 /*
907 * we have to pad file tail stored in direct
908 * item(s) up to block size and convert it
909 * to unformatted node. FIXME: this should
910 * also get into page cache
911 */
912
913 pathrelse(&path);
914 /*
915 * ugly, but we can only end the transaction if
916 * we aren't nested
917 */
918 BUG_ON(!th->t_refcount);
919 if (th->t_refcount == 1) {
920 retval =
921 reiserfs_end_persistent_transaction
922 (th);
923 th = NULL;
924 if (retval)
925 goto failure;
926 }
927
928 retval =
929 convert_tail_for_hole(inode, bh_result,
930 tail_offset);
931 if (retval) {
932 if (retval != -ENOSPC)
933 reiserfs_error(inode->i_sb,
934 "clm-6004",
935 "convert tail failed "
936 "inode %lu, error %d",
937 inode->i_ino,
938 retval);
939 if (allocated_block_nr) {
940 /*
941 * the bitmap, the super,
942 * and the stat data == 3
943 */
944 if (!th)
945 th = reiserfs_persistent_transaction(inode->i_sb, 3);
946 if (th)
947 reiserfs_free_block(th,
948 inode,
949 allocated_block_nr,
950 1);
951 }
952 goto failure;
953 }
954 goto research;
955 }
956 retval =
957 direct2indirect(th, inode, &path, unbh,
958 tail_offset);
959 if (retval) {
960 reiserfs_unmap_buffer(unbh);
961 reiserfs_free_block(th, inode,
962 allocated_block_nr, 1);
963 goto failure;
964 }
965 /*
966 * it is important the set_buffer_uptodate is done
967 * after the direct2indirect. The buffer might
968 * contain valid data newer than the data on disk
969 * (read by readpage, changed, and then sent here by
970 * writepage). direct2indirect needs to know if unbh
971 * was already up to date, so it can decide if the
972 * data in unbh needs to be replaced with data from
973 * the disk
974 */
975 set_buffer_uptodate(unbh);
976
977 /*
978 * unbh->b_page == NULL in case of DIRECT_IO request,
979 * this means buffer will disappear shortly, so it
980 * should not be added to
981 */
982 if (unbh->b_page) {
983 /*
984 * we've converted the tail, so we must
985 * flush unbh before the transaction commits
986 */
987 reiserfs_add_tail_list(inode, unbh);
988
989 /*
990 * mark it dirty now to prevent commit_write
991 * from adding this buffer to the inode's
992 * dirty buffer list
993 */
994 /*
995 * AKPM: changed __mark_buffer_dirty to
996 * mark_buffer_dirty(). It's still atomic,
997 * but it sets the page dirty too, which makes
998 * it eligible for writeback at any time by the
999 * VM (which was also the case with
1000 * __mark_buffer_dirty())
1001 */
1002 mark_buffer_dirty(unbh);
1003 }
1004 } else {
1005 /*
1006 * append indirect item with holes if needed, when
1007 * appending pointer to 'block'-th block use block,
1008 * which is already allocated
1009 */
1010 struct cpu_key tmp_key;
1011 /*
1012 * We use this in case we need to allocate
1013 * only one block which is a fastpath
1014 */
1015 unp_t unf_single = 0;
1016 unp_t *un;
1017 __u64 max_to_insert =
1018 MAX_ITEM_LEN(inode->i_sb->s_blocksize) /
1019 UNFM_P_SIZE;
1020 __u64 blocks_needed;
1021
1022 RFALSE(pos_in_item != ih_item_len(ih) / UNFM_P_SIZE,
1023 "vs-804: invalid position for append");
1024 /*
1025 * indirect item has to be appended,
1026 * set up key of that position
1027 * (key type is unimportant)
1028 */
1029 make_cpu_key(&tmp_key, inode,
1030 le_key_k_offset(version,
1031 &ih->ih_key) +
1032 op_bytes_number(ih,
1033 inode->i_sb->s_blocksize),
1034 TYPE_INDIRECT, 3);
1035
1036 RFALSE(cpu_key_k_offset(&tmp_key) > cpu_key_k_offset(&key),
1037 "green-805: invalid offset");
1038 blocks_needed =
1039 1 +
1040 ((cpu_key_k_offset(&key) -
1041 cpu_key_k_offset(&tmp_key)) >> inode->i_sb->
1042 s_blocksize_bits);
1043
1044 if (blocks_needed == 1) {
1045 un = &unf_single;
1046 } else {
1047 un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_NOFS);
1048 if (!un) {
1049 un = &unf_single;
1050 blocks_needed = 1;
1051 max_to_insert = 0;
1052 }
1053 }
1054 if (blocks_needed <= max_to_insert) {
1055 /*
1056 * we are going to add target block to
1057 * the file. Use allocated block for that
1058 */
1059 un[blocks_needed - 1] =
1060 cpu_to_le32(allocated_block_nr);
1061 set_block_dev_mapped(bh_result,
1062 allocated_block_nr, inode);
1063 set_buffer_new(bh_result);
1064 done = 1;
1065 } else {
1066 /* paste hole to the indirect item */
1067 /*
1068 * If kmalloc failed, max_to_insert becomes
1069 * zero and it means we only have space for
1070 * one block
1071 */
1072 blocks_needed =
1073 max_to_insert ? max_to_insert : 1;
1074 }
1075 retval =
1076 reiserfs_paste_into_item(th, &path, &tmp_key, inode,
1077 (char *)un,
1078 UNFM_P_SIZE *
1079 blocks_needed);
1080
1081 if (blocks_needed != 1)
1082 kfree(un);
1083
1084 if (retval) {
1085 reiserfs_free_block(th, inode,
1086 allocated_block_nr, 1);
1087 goto failure;
1088 }
1089 if (!done) {
1090 /*
1091 * We need to mark new file size in case
1092 * this function will be interrupted/aborted
1093 * later on. And we may do this only for
1094 * holes.
1095 */
1096 inode->i_size +=
1097 inode->i_sb->s_blocksize * blocks_needed;
1098 }
1099 }
1100
1101 if (done == 1)
1102 break;
1103
1104 /*
1105 * this loop could log more blocks than we had originally
1106 * asked for. So, we have to allow the transaction to end
1107 * if it is too big or too full. Update the inode so things
1108 * are consistent if we crash before the function returns
1109 * release the path so that anybody waiting on the path before
1110 * ending their transaction will be able to continue.
1111 */
1112 if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
1113 retval = restart_transaction(th, inode, &path);
1114 if (retval)
1115 goto failure;
1116 }
1117 /*
1118 * inserting indirect pointers for a hole can take a
1119 * long time. reschedule if needed and also release the write
1120 * lock for others.
1121 */
1122 reiserfs_cond_resched(inode->i_sb);
1123
1124 retval = search_for_position_by_key(inode->i_sb, &key, &path);
1125 if (retval == IO_ERROR) {
1126 retval = -EIO;
1127 goto failure;
1128 }
1129 if (retval == POSITION_FOUND) {
1130 reiserfs_warning(inode->i_sb, "vs-825",
1131 "%K should not be found", &key);
1132 retval = -EEXIST;
1133 if (allocated_block_nr)
1134 reiserfs_free_block(th, inode,
1135 allocated_block_nr, 1);
1136 pathrelse(&path);
1137 goto failure;
1138 }
1139 bh = get_last_bh(&path);
1140 ih = tp_item_head(&path);
1141 item = tp_item_body(&path);
1142 pos_in_item = path.pos_in_item;
1143 } while (1);
1144
1145 retval = 0;
1146
1147 failure:
1148 if (th && (!dangle || (retval && !th->t_trans_id))) {
1149 int err;
1150 if (th->t_trans_id)
1151 reiserfs_update_sd(th, inode);
1152 err = reiserfs_end_persistent_transaction(th);
1153 if (err)
1154 retval = err;
1155 }
1156
1157 reiserfs_write_unlock(inode->i_sb);
1158 reiserfs_check_path(&path);
1159 return retval;
1160 }
1161
1162 static int
1163 reiserfs_readpages(struct file *file, struct address_space *mapping,
1164 struct list_head *pages, unsigned nr_pages)
1165 {
1166 return mpage_readpages(mapping, pages, nr_pages, reiserfs_get_block);
1167 }
1168
1169 /*
1170 * Compute real number of used bytes by file
1171 * Following three functions can go away when we'll have enough space in
1172 * stat item
1173 */
1174 static int real_space_diff(struct inode *inode, int sd_size)
1175 {
1176 int bytes;
1177 loff_t blocksize = inode->i_sb->s_blocksize;
1178
1179 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode))
1180 return sd_size;
1181
1182 /*
1183 * End of file is also in full block with indirect reference, so round
1184 * up to the next block.
1185 *
1186 * there is just no way to know if the tail is actually packed
1187 * on the file, so we have to assume it isn't. When we pack the
1188 * tail, we add 4 bytes to pretend there really is an unformatted
1189 * node pointer
1190 */
1191 bytes =
1192 ((inode->i_size +
1193 (blocksize - 1)) >> inode->i_sb->s_blocksize_bits) * UNFM_P_SIZE +
1194 sd_size;
1195 return bytes;
1196 }
1197
1198 static inline loff_t to_real_used_space(struct inode *inode, ulong blocks,
1199 int sd_size)
1200 {
1201 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
1202 return inode->i_size +
1203 (loff_t) (real_space_diff(inode, sd_size));
1204 }
1205 return ((loff_t) real_space_diff(inode, sd_size)) +
1206 (((loff_t) blocks) << 9);
1207 }
1208
1209 /* Compute number of blocks used by file in ReiserFS counting */
1210 static inline ulong to_fake_used_blocks(struct inode *inode, int sd_size)
1211 {
1212 loff_t bytes = inode_get_bytes(inode);
1213 loff_t real_space = real_space_diff(inode, sd_size);
1214
1215 /* keeps fsck and non-quota versions of reiserfs happy */
1216 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
1217 bytes += (loff_t) 511;
1218 }
1219
1220 /*
1221 * files from before the quota patch might i_blocks such that
1222 * bytes < real_space. Deal with that here to prevent it from
1223 * going negative.
1224 */
1225 if (bytes < real_space)
1226 return 0;
1227 return (bytes - real_space) >> 9;
1228 }
1229
1230 /*
1231 * BAD: new directories have stat data of new type and all other items
1232 * of old type. Version stored in the inode says about body items, so
1233 * in update_stat_data we can not rely on inode, but have to check
1234 * item version directly
1235 */
1236
1237 /* called by read_locked_inode */
1238 static void init_inode(struct inode *inode, struct treepath *path)
1239 {
1240 struct buffer_head *bh;
1241 struct item_head *ih;
1242 __u32 rdev;
1243
1244 bh = PATH_PLAST_BUFFER(path);
1245 ih = tp_item_head(path);
1246
1247 copy_key(INODE_PKEY(inode), &ih->ih_key);
1248
1249 INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
1250 REISERFS_I(inode)->i_flags = 0;
1251 REISERFS_I(inode)->i_prealloc_block = 0;
1252 REISERFS_I(inode)->i_prealloc_count = 0;
1253 REISERFS_I(inode)->i_trans_id = 0;
1254 REISERFS_I(inode)->i_jl = NULL;
1255 reiserfs_init_xattr_rwsem(inode);
1256
1257 if (stat_data_v1(ih)) {
1258 struct stat_data_v1 *sd =
1259 (struct stat_data_v1 *)ih_item_body(bh, ih);
1260 unsigned long blocks;
1261
1262 set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1263 set_inode_sd_version(inode, STAT_DATA_V1);
1264 inode->i_mode = sd_v1_mode(sd);
1265 set_nlink(inode, sd_v1_nlink(sd));
1266 i_uid_write(inode, sd_v1_uid(sd));
1267 i_gid_write(inode, sd_v1_gid(sd));
1268 inode->i_size = sd_v1_size(sd);
1269 inode->i_atime.tv_sec = sd_v1_atime(sd);
1270 inode->i_mtime.tv_sec = sd_v1_mtime(sd);
1271 inode->i_ctime.tv_sec = sd_v1_ctime(sd);
1272 inode->i_atime.tv_nsec = 0;
1273 inode->i_ctime.tv_nsec = 0;
1274 inode->i_mtime.tv_nsec = 0;
1275
1276 inode->i_blocks = sd_v1_blocks(sd);
1277 inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1278 blocks = (inode->i_size + 511) >> 9;
1279 blocks = _ROUND_UP(blocks, inode->i_sb->s_blocksize >> 9);
1280
1281 /*
1282 * there was a bug in <=3.5.23 when i_blocks could take
1283 * negative values. Starting from 3.5.17 this value could
1284 * even be stored in stat data. For such files we set
1285 * i_blocks based on file size. Just 2 notes: this can be
1286 * wrong for sparse files. On-disk value will be only
1287 * updated if file's inode will ever change
1288 */
1289 if (inode->i_blocks > blocks) {
1290 inode->i_blocks = blocks;
1291 }
1292
1293 rdev = sd_v1_rdev(sd);
1294 REISERFS_I(inode)->i_first_direct_byte =
1295 sd_v1_first_direct_byte(sd);
1296
1297 /*
1298 * an early bug in the quota code can give us an odd
1299 * number for the block count. This is incorrect, fix it here.
1300 */
1301 if (inode->i_blocks & 1) {
1302 inode->i_blocks++;
1303 }
1304 inode_set_bytes(inode,
1305 to_real_used_space(inode, inode->i_blocks,
1306 SD_V1_SIZE));
1307 /*
1308 * nopack is initially zero for v1 objects. For v2 objects,
1309 * nopack is initialised from sd_attrs
1310 */
1311 REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
1312 } else {
1313 /*
1314 * new stat data found, but object may have old items
1315 * (directories and symlinks)
1316 */
1317 struct stat_data *sd = (struct stat_data *)ih_item_body(bh, ih);
1318
1319 inode->i_mode = sd_v2_mode(sd);
1320 set_nlink(inode, sd_v2_nlink(sd));
1321 i_uid_write(inode, sd_v2_uid(sd));
1322 inode->i_size = sd_v2_size(sd);
1323 i_gid_write(inode, sd_v2_gid(sd));
1324 inode->i_mtime.tv_sec = sd_v2_mtime(sd);
1325 inode->i_atime.tv_sec = sd_v2_atime(sd);
1326 inode->i_ctime.tv_sec = sd_v2_ctime(sd);
1327 inode->i_ctime.tv_nsec = 0;
1328 inode->i_mtime.tv_nsec = 0;
1329 inode->i_atime.tv_nsec = 0;
1330 inode->i_blocks = sd_v2_blocks(sd);
1331 rdev = sd_v2_rdev(sd);
1332 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1333 inode->i_generation =
1334 le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1335 else
1336 inode->i_generation = sd_v2_generation(sd);
1337
1338 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
1339 set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1340 else
1341 set_inode_item_key_version(inode, KEY_FORMAT_3_6);
1342 REISERFS_I(inode)->i_first_direct_byte = 0;
1343 set_inode_sd_version(inode, STAT_DATA_V2);
1344 inode_set_bytes(inode,
1345 to_real_used_space(inode, inode->i_blocks,
1346 SD_V2_SIZE));
1347 /*
1348 * read persistent inode attributes from sd and initialise
1349 * generic inode flags from them
1350 */
1351 REISERFS_I(inode)->i_attrs = sd_v2_attrs(sd);
1352 sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
1353 }
1354
1355 pathrelse(path);
1356 if (S_ISREG(inode->i_mode)) {
1357 inode->i_op = &reiserfs_file_inode_operations;
1358 inode->i_fop = &reiserfs_file_operations;
1359 inode->i_mapping->a_ops = &reiserfs_address_space_operations;
1360 } else if (S_ISDIR(inode->i_mode)) {
1361 inode->i_op = &reiserfs_dir_inode_operations;
1362 inode->i_fop = &reiserfs_dir_operations;
1363 } else if (S_ISLNK(inode->i_mode)) {
1364 inode->i_op = &reiserfs_symlink_inode_operations;
1365 inode_nohighmem(inode);
1366 inode->i_mapping->a_ops = &reiserfs_address_space_operations;
1367 } else {
1368 inode->i_blocks = 0;
1369 inode->i_op = &reiserfs_special_inode_operations;
1370 init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
1371 }
1372 }
1373
1374 /* update new stat data with inode fields */
1375 static void inode2sd(void *sd, struct inode *inode, loff_t size)
1376 {
1377 struct stat_data *sd_v2 = (struct stat_data *)sd;
1378 __u16 flags;
1379
1380 set_sd_v2_mode(sd_v2, inode->i_mode);
1381 set_sd_v2_nlink(sd_v2, inode->i_nlink);
1382 set_sd_v2_uid(sd_v2, i_uid_read(inode));
1383 set_sd_v2_size(sd_v2, size);
1384 set_sd_v2_gid(sd_v2, i_gid_read(inode));
1385 set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
1386 set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
1387 set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
1388 set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE));
1389 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1390 set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev));
1391 else
1392 set_sd_v2_generation(sd_v2, inode->i_generation);
1393 flags = REISERFS_I(inode)->i_attrs;
1394 i_attrs_to_sd_attrs(inode, &flags);
1395 set_sd_v2_attrs(sd_v2, flags);
1396 }
1397
1398 /* used to copy inode's fields to old stat data */
1399 static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
1400 {
1401 struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd;
1402
1403 set_sd_v1_mode(sd_v1, inode->i_mode);
1404 set_sd_v1_uid(sd_v1, i_uid_read(inode));
1405 set_sd_v1_gid(sd_v1, i_gid_read(inode));
1406 set_sd_v1_nlink(sd_v1, inode->i_nlink);
1407 set_sd_v1_size(sd_v1, size);
1408 set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);
1409 set_sd_v1_ctime(sd_v1, inode->i_ctime.tv_sec);
1410 set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec);
1411
1412 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1413 set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev));
1414 else
1415 set_sd_v1_blocks(sd_v1, to_fake_used_blocks(inode, SD_V1_SIZE));
1416
1417 /* Sigh. i_first_direct_byte is back */
1418 set_sd_v1_first_direct_byte(sd_v1,
1419 REISERFS_I(inode)->i_first_direct_byte);
1420 }
1421
1422 /*
1423 * NOTE, you must prepare the buffer head before sending it here,
1424 * and then log it after the call
1425 */
1426 static void update_stat_data(struct treepath *path, struct inode *inode,
1427 loff_t size)
1428 {
1429 struct buffer_head *bh;
1430 struct item_head *ih;
1431
1432 bh = PATH_PLAST_BUFFER(path);
1433 ih = tp_item_head(path);
1434
1435 if (!is_statdata_le_ih(ih))
1436 reiserfs_panic(inode->i_sb, "vs-13065", "key %k, found item %h",
1437 INODE_PKEY(inode), ih);
1438
1439 /* path points to old stat data */
1440 if (stat_data_v1(ih)) {
1441 inode2sd_v1(ih_item_body(bh, ih), inode, size);
1442 } else {
1443 inode2sd(ih_item_body(bh, ih), inode, size);
1444 }
1445
1446 return;
1447 }
1448
1449 void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
1450 struct inode *inode, loff_t size)
1451 {
1452 struct cpu_key key;
1453 INITIALIZE_PATH(path);
1454 struct buffer_head *bh;
1455 int fs_gen;
1456 struct item_head *ih, tmp_ih;
1457 int retval;
1458
1459 BUG_ON(!th->t_trans_id);
1460
1461 /* key type is unimportant */
1462 make_cpu_key(&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3);
1463
1464 for (;;) {
1465 int pos;
1466 /* look for the object's stat data */
1467 retval = search_item(inode->i_sb, &key, &path);
1468 if (retval == IO_ERROR) {
1469 reiserfs_error(inode->i_sb, "vs-13050",
1470 "i/o failure occurred trying to "
1471 "update %K stat data", &key);
1472 return;
1473 }
1474 if (retval == ITEM_NOT_FOUND) {
1475 pos = PATH_LAST_POSITION(&path);
1476 pathrelse(&path);
1477 if (inode->i_nlink == 0) {
1478 /*reiserfs_warning (inode->i_sb, "vs-13050: reiserfs_update_sd: i_nlink == 0, stat data not found"); */
1479 return;
1480 }
1481 reiserfs_warning(inode->i_sb, "vs-13060",
1482 "stat data of object %k (nlink == %d) "
1483 "not found (pos %d)",
1484 INODE_PKEY(inode), inode->i_nlink,
1485 pos);
1486 reiserfs_check_path(&path);
1487 return;
1488 }
1489
1490 /*
1491 * sigh, prepare_for_journal might schedule. When it
1492 * schedules the FS might change. We have to detect that,
1493 * and loop back to the search if the stat data item has moved
1494 */
1495 bh = get_last_bh(&path);
1496 ih = tp_item_head(&path);
1497 copy_item_head(&tmp_ih, ih);
1498 fs_gen = get_generation(inode->i_sb);
1499 reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
1500
1501 /* Stat_data item has been moved after scheduling. */
1502 if (fs_changed(fs_gen, inode->i_sb)
1503 && item_moved(&tmp_ih, &path)) {
1504 reiserfs_restore_prepared_buffer(inode->i_sb, bh);
1505 continue;
1506 }
1507 break;
1508 }
1509 update_stat_data(&path, inode, size);
1510 journal_mark_dirty(th, bh);
1511 pathrelse(&path);
1512 return;
1513 }
1514
1515 /*
1516 * reiserfs_read_locked_inode is called to read the inode off disk, and it
1517 * does a make_bad_inode when things go wrong. But, we need to make sure
1518 * and clear the key in the private portion of the inode, otherwise a
1519 * corresponding iput might try to delete whatever object the inode last
1520 * represented.
1521 */
1522 static void reiserfs_make_bad_inode(struct inode *inode)
1523 {
1524 memset(INODE_PKEY(inode), 0, KEY_SIZE);
1525 make_bad_inode(inode);
1526 }
1527
1528 /*
1529 * initially this function was derived from minix or ext2's analog and
1530 * evolved as the prototype did
1531 */
1532 int reiserfs_init_locked_inode(struct inode *inode, void *p)
1533 {
1534 struct reiserfs_iget_args *args = (struct reiserfs_iget_args *)p;
1535 inode->i_ino = args->objectid;
1536 INODE_PKEY(inode)->k_dir_id = cpu_to_le32(args->dirid);
1537 return 0;
1538 }
1539
1540 /*
1541 * looks for stat data in the tree, and fills up the fields of in-core
1542 * inode stat data fields
1543 */
1544 void reiserfs_read_locked_inode(struct inode *inode,
1545 struct reiserfs_iget_args *args)
1546 {
1547 INITIALIZE_PATH(path_to_sd);
1548 struct cpu_key key;
1549 unsigned long dirino;
1550 int retval;
1551
1552 dirino = args->dirid;
1553
1554 /*
1555 * set version 1, version 2 could be used too, because stat data
1556 * key is the same in both versions
1557 */
1558 key.version = KEY_FORMAT_3_5;
1559 key.on_disk_key.k_dir_id = dirino;
1560 key.on_disk_key.k_objectid = inode->i_ino;
1561 key.on_disk_key.k_offset = 0;
1562 key.on_disk_key.k_type = 0;
1563
1564 /* look for the object's stat data */
1565 retval = search_item(inode->i_sb, &key, &path_to_sd);
1566 if (retval == IO_ERROR) {
1567 reiserfs_error(inode->i_sb, "vs-13070",
1568 "i/o failure occurred trying to find "
1569 "stat data of %K", &key);
1570 reiserfs_make_bad_inode(inode);
1571 return;
1572 }
1573
1574 /* a stale NFS handle can trigger this without it being an error */
1575 if (retval != ITEM_FOUND) {
1576 pathrelse(&path_to_sd);
1577 reiserfs_make_bad_inode(inode);
1578 clear_nlink(inode);
1579 return;
1580 }
1581
1582 init_inode(inode, &path_to_sd);
1583
1584 /*
1585 * It is possible that knfsd is trying to access inode of a file
1586 * that is being removed from the disk by some other thread. As we
1587 * update sd on unlink all that is required is to check for nlink
1588 * here. This bug was first found by Sizif when debugging
1589 * SquidNG/Butterfly, forgotten, and found again after Philippe
1590 * Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
1591
1592 * More logical fix would require changes in fs/inode.c:iput() to
1593 * remove inode from hash-table _after_ fs cleaned disk stuff up and
1594 * in iget() to return NULL if I_FREEING inode is found in
1595 * hash-table.
1596 */
1597
1598 /*
1599 * Currently there is one place where it's ok to meet inode with
1600 * nlink==0: processing of open-unlinked and half-truncated files
1601 * during mount (fs/reiserfs/super.c:finish_unfinished()).
1602 */
1603 if ((inode->i_nlink == 0) &&
1604 !REISERFS_SB(inode->i_sb)->s_is_unlinked_ok) {
1605 reiserfs_warning(inode->i_sb, "vs-13075",
1606 "dead inode read from disk %K. "
1607 "This is likely to be race with knfsd. Ignore",
1608 &key);
1609 reiserfs_make_bad_inode(inode);
1610 }
1611
1612 /* init inode should be relsing */
1613 reiserfs_check_path(&path_to_sd);
1614
1615 /*
1616 * Stat data v1 doesn't support ACLs.
1617 */
1618 if (get_inode_sd_version(inode) == STAT_DATA_V1)
1619 cache_no_acl(inode);
1620 }
1621
1622 /*
1623 * reiserfs_find_actor() - "find actor" reiserfs supplies to iget5_locked().
1624 *
1625 * @inode: inode from hash table to check
1626 * @opaque: "cookie" passed to iget5_locked(). This is &reiserfs_iget_args.
1627 *
1628 * This function is called by iget5_locked() to distinguish reiserfs inodes
1629 * having the same inode numbers. Such inodes can only exist due to some
1630 * error condition. One of them should be bad. Inodes with identical
1631 * inode numbers (objectids) are distinguished by parent directory ids.
1632 *
1633 */
1634 int reiserfs_find_actor(struct inode *inode, void *opaque)
1635 {
1636 struct reiserfs_iget_args *args;
1637
1638 args = opaque;
1639 /* args is already in CPU order */
1640 return (inode->i_ino == args->objectid) &&
1641 (le32_to_cpu(INODE_PKEY(inode)->k_dir_id) == args->dirid);
1642 }
1643
1644 struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
1645 {
1646 struct inode *inode;
1647 struct reiserfs_iget_args args;
1648 int depth;
1649
1650 args.objectid = key->on_disk_key.k_objectid;
1651 args.dirid = key->on_disk_key.k_dir_id;
1652 depth = reiserfs_write_unlock_nested(s);
1653 inode = iget5_locked(s, key->on_disk_key.k_objectid,
1654 reiserfs_find_actor, reiserfs_init_locked_inode,
1655 (void *)(&args));
1656 reiserfs_write_lock_nested(s, depth);
1657 if (!inode)
1658 return ERR_PTR(-ENOMEM);
1659
1660 if (inode->i_state & I_NEW) {
1661 reiserfs_read_locked_inode(inode, &args);
1662 unlock_new_inode(inode);
1663 }
1664
1665 if (comp_short_keys(INODE_PKEY(inode), key) || is_bad_inode(inode)) {
1666 /* either due to i/o error or a stale NFS handle */
1667 iput(inode);
1668 inode = NULL;
1669 }
1670 return inode;
1671 }
1672
1673 static struct dentry *reiserfs_get_dentry(struct super_block *sb,
1674 u32 objectid, u32 dir_id, u32 generation)
1675
1676 {
1677 struct cpu_key key;
1678 struct inode *inode;
1679
1680 key.on_disk_key.k_objectid = objectid;
1681 key.on_disk_key.k_dir_id = dir_id;
1682 reiserfs_write_lock(sb);
1683 inode = reiserfs_iget(sb, &key);
1684 if (inode && !IS_ERR(inode) && generation != 0 &&
1685 generation != inode->i_generation) {
1686 iput(inode);
1687 inode = NULL;
1688 }
1689 reiserfs_write_unlock(sb);
1690
1691 return d_obtain_alias(inode);
1692 }
1693
1694 struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1695 int fh_len, int fh_type)
1696 {
1697 /*
1698 * fhtype happens to reflect the number of u32s encoded.
1699 * due to a bug in earlier code, fhtype might indicate there
1700 * are more u32s then actually fitted.
1701 * so if fhtype seems to be more than len, reduce fhtype.
1702 * Valid types are:
1703 * 2 - objectid + dir_id - legacy support
1704 * 3 - objectid + dir_id + generation
1705 * 4 - objectid + dir_id + objectid and dirid of parent - legacy
1706 * 5 - objectid + dir_id + generation + objectid and dirid of parent
1707 * 6 - as above plus generation of directory
1708 * 6 does not fit in NFSv2 handles
1709 */
1710 if (fh_type > fh_len) {
1711 if (fh_type != 6 || fh_len != 5)
1712 reiserfs_warning(sb, "reiserfs-13077",
1713 "nfsd/reiserfs, fhtype=%d, len=%d - odd",
1714 fh_type, fh_len);
1715 fh_type = fh_len;
1716 }
1717 if (fh_len < 2)
1718 return NULL;
1719
1720 return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
1721 (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
1722 }
1723
1724 struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
1725 int fh_len, int fh_type)
1726 {
1727 if (fh_type > fh_len)
1728 fh_type = fh_len;
1729 if (fh_type < 4)
1730 return NULL;
1731
1732 return reiserfs_get_dentry(sb,
1733 (fh_type >= 5) ? fid->raw[3] : fid->raw[2],
1734 (fh_type >= 5) ? fid->raw[4] : fid->raw[3],
1735 (fh_type == 6) ? fid->raw[5] : 0);
1736 }
1737
1738 int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
1739 struct inode *parent)
1740 {
1741 int maxlen = *lenp;
1742
1743 if (parent && (maxlen < 5)) {
1744 *lenp = 5;
1745 return FILEID_INVALID;
1746 } else if (maxlen < 3) {
1747 *lenp = 3;
1748 return FILEID_INVALID;
1749 }
1750
1751 data[0] = inode->i_ino;
1752 data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1753 data[2] = inode->i_generation;
1754 *lenp = 3;
1755 if (parent) {
1756 data[3] = parent->i_ino;
1757 data[4] = le32_to_cpu(INODE_PKEY(parent)->k_dir_id);
1758 *lenp = 5;
1759 if (maxlen >= 6) {
1760 data[5] = parent->i_generation;
1761 *lenp = 6;
1762 }
1763 }
1764 return *lenp;
1765 }
1766
1767 /*
1768 * looks for stat data, then copies fields to it, marks the buffer
1769 * containing stat data as dirty
1770 */
1771 /*
1772 * reiserfs inodes are never really dirty, since the dirty inode call
1773 * always logs them. This call allows the VFS inode marking routines
1774 * to properly mark inodes for datasync and such, but only actually
1775 * does something when called for a synchronous update.
1776 */
1777 int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1778 {
1779 struct reiserfs_transaction_handle th;
1780 int jbegin_count = 1;
1781
1782 if (inode->i_sb->s_flags & MS_RDONLY)
1783 return -EROFS;
1784 /*
1785 * memory pressure can sometimes initiate write_inode calls with
1786 * sync == 1,
1787 * these cases are just when the system needs ram, not when the
1788 * inode needs to reach disk for safety, and they can safely be
1789 * ignored because the altered inode has already been logged.
1790 */
1791 if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) {
1792 reiserfs_write_lock(inode->i_sb);
1793 if (!journal_begin(&th, inode->i_sb, jbegin_count)) {
1794 reiserfs_update_sd(&th, inode);
1795 journal_end_sync(&th);
1796 }
1797 reiserfs_write_unlock(inode->i_sb);
1798 }
1799 return 0;
1800 }
1801
1802 /*
1803 * stat data of new object is inserted already, this inserts the item
1804 * containing "." and ".." entries
1805 */
1806 static int reiserfs_new_directory(struct reiserfs_transaction_handle *th,
1807 struct inode *inode,
1808 struct item_head *ih, struct treepath *path,
1809 struct inode *dir)
1810 {
1811 struct super_block *sb = th->t_super;
1812 char empty_dir[EMPTY_DIR_SIZE];
1813 char *body = empty_dir;
1814 struct cpu_key key;
1815 int retval;
1816
1817 BUG_ON(!th->t_trans_id);
1818
1819 _make_cpu_key(&key, KEY_FORMAT_3_5, le32_to_cpu(ih->ih_key.k_dir_id),
1820 le32_to_cpu(ih->ih_key.k_objectid), DOT_OFFSET,
1821 TYPE_DIRENTRY, 3 /*key length */ );
1822
1823 /*
1824 * compose item head for new item. Directories consist of items of
1825 * old type (ITEM_VERSION_1). Do not set key (second arg is 0), it
1826 * is done by reiserfs_new_inode
1827 */
1828 if (old_format_only(sb)) {
1829 make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
1830 TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2);
1831
1832 make_empty_dir_item_v1(body, ih->ih_key.k_dir_id,
1833 ih->ih_key.k_objectid,
1834 INODE_PKEY(dir)->k_dir_id,
1835 INODE_PKEY(dir)->k_objectid);
1836 } else {
1837 make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
1838 TYPE_DIRENTRY, EMPTY_DIR_SIZE, 2);
1839
1840 make_empty_dir_item(body, ih->ih_key.k_dir_id,
1841 ih->ih_key.k_objectid,
1842 INODE_PKEY(dir)->k_dir_id,
1843 INODE_PKEY(dir)->k_objectid);
1844 }
1845
1846 /* look for place in the tree for new item */
1847 retval = search_item(sb, &key, path);
1848 if (retval == IO_ERROR) {
1849 reiserfs_error(sb, "vs-13080",
1850 "i/o failure occurred creating new directory");
1851 return -EIO;
1852 }
1853 if (retval == ITEM_FOUND) {
1854 pathrelse(path);
1855 reiserfs_warning(sb, "vs-13070",
1856 "object with this key exists (%k)",
1857 &(ih->ih_key));
1858 return -EEXIST;
1859 }
1860
1861 /* insert item, that is empty directory item */
1862 return reiserfs_insert_item(th, path, &key, ih, inode, body);
1863 }
1864
1865 /*
1866 * stat data of object has been inserted, this inserts the item
1867 * containing the body of symlink
1868 */
1869 static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th,
1870 struct inode *inode,
1871 struct item_head *ih,
1872 struct treepath *path, const char *symname,
1873 int item_len)
1874 {
1875 struct super_block *sb = th->t_super;
1876 struct cpu_key key;
1877 int retval;
1878
1879 BUG_ON(!th->t_trans_id);
1880
1881 _make_cpu_key(&key, KEY_FORMAT_3_5,
1882 le32_to_cpu(ih->ih_key.k_dir_id),
1883 le32_to_cpu(ih->ih_key.k_objectid),
1884 1, TYPE_DIRECT, 3 /*key length */ );
1885
1886 make_le_item_head(ih, NULL, KEY_FORMAT_3_5, 1, TYPE_DIRECT, item_len,
1887 0 /*free_space */ );
1888
1889 /* look for place in the tree for new item */
1890 retval = search_item(sb, &key, path);
1891 if (retval == IO_ERROR) {
1892 reiserfs_error(sb, "vs-13080",
1893 "i/o failure occurred creating new symlink");
1894 return -EIO;
1895 }
1896 if (retval == ITEM_FOUND) {
1897 pathrelse(path);
1898 reiserfs_warning(sb, "vs-13080",
1899 "object with this key exists (%k)",
1900 &(ih->ih_key));
1901 return -EEXIST;
1902 }
1903
1904 /* insert item, that is body of symlink */
1905 return reiserfs_insert_item(th, path, &key, ih, inode, symname);
1906 }
1907
1908 /*
1909 * inserts the stat data into the tree, and then calls
1910 * reiserfs_new_directory (to insert ".", ".." item if new object is
1911 * directory) or reiserfs_new_symlink (to insert symlink body if new
1912 * object is symlink) or nothing (if new object is regular file)
1913
1914 * NOTE! uid and gid must already be set in the inode. If we return
1915 * non-zero due to an error, we have to drop the quota previously allocated
1916 * for the fresh inode. This can only be done outside a transaction, so
1917 * if we return non-zero, we also end the transaction.
1918 *
1919 * @th: active transaction handle
1920 * @dir: parent directory for new inode
1921 * @mode: mode of new inode
1922 * @symname: symlink contents if inode is symlink
1923 * @isize: 0 for regular file, EMPTY_DIR_SIZE for dirs, strlen(symname) for
1924 * symlinks
1925 * @inode: inode to be filled
1926 * @security: optional security context to associate with this inode
1927 */
1928 int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1929 struct inode *dir, umode_t mode, const char *symname,
1930 /* 0 for regular, EMTRY_DIR_SIZE for dirs,
1931 strlen (symname) for symlinks) */
1932 loff_t i_size, struct dentry *dentry,
1933 struct inode *inode,
1934 struct reiserfs_security_handle *security)
1935 {
1936 struct super_block *sb = dir->i_sb;
1937 struct reiserfs_iget_args args;
1938 INITIALIZE_PATH(path_to_key);
1939 struct cpu_key key;
1940 struct item_head ih;
1941 struct stat_data sd;
1942 int retval;
1943 int err;
1944 int depth;
1945
1946 BUG_ON(!th->t_trans_id);
1947
1948 depth = reiserfs_write_unlock_nested(sb);
1949 err = dquot_alloc_inode(inode);
1950 reiserfs_write_lock_nested(sb, depth);
1951 if (err)
1952 goto out_end_trans;
1953 if (!dir->i_nlink) {
1954 err = -EPERM;
1955 goto out_bad_inode;
1956 }
1957
1958 /* item head of new item */
1959 ih.ih_key.k_dir_id = reiserfs_choose_packing(dir);
1960 ih.ih_key.k_objectid = cpu_to_le32(reiserfs_get_unused_objectid(th));
1961 if (!ih.ih_key.k_objectid) {
1962 err = -ENOMEM;
1963 goto out_bad_inode;
1964 }
1965 args.objectid = inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid);
1966 if (old_format_only(sb))
1967 make_le_item_head(&ih, NULL, KEY_FORMAT_3_5, SD_OFFSET,
1968 TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT);
1969 else
1970 make_le_item_head(&ih, NULL, KEY_FORMAT_3_6, SD_OFFSET,
1971 TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
1972 memcpy(INODE_PKEY(inode), &ih.ih_key, KEY_SIZE);
1973 args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
1974
1975 depth = reiserfs_write_unlock_nested(inode->i_sb);
1976 err = insert_inode_locked4(inode, args.objectid,
1977 reiserfs_find_actor, &args);
1978 reiserfs_write_lock_nested(inode->i_sb, depth);
1979 if (err) {
1980 err = -EINVAL;
1981 goto out_bad_inode;
1982 }
1983
1984 if (old_format_only(sb))
1985 /*
1986 * not a perfect generation count, as object ids can be reused,
1987 * but this is as good as reiserfs can do right now.
1988 * note that the private part of inode isn't filled in yet,
1989 * we have to use the directory.
1990 */
1991 inode->i_generation = le32_to_cpu(INODE_PKEY(dir)->k_objectid);
1992 else
1993 #if defined( USE_INODE_GENERATION_COUNTER )
1994 inode->i_generation =
1995 le32_to_cpu(REISERFS_SB(sb)->s_rs->s_inode_generation);
1996 #else
1997 inode->i_generation = ++event;
1998 #endif
1999
2000 /* fill stat data */
2001 set_nlink(inode, (S_ISDIR(mode) ? 2 : 1));
2002
2003 /* uid and gid must already be set by the caller for quota init */
2004
2005 /* symlink cannot be immutable or append only, right? */
2006 if (S_ISLNK(inode->i_mode))
2007 inode->i_flags &= ~(S_IMMUTABLE | S_APPEND);
2008
2009 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
2010 inode->i_size = i_size;
2011 inode->i_blocks = 0;
2012 inode->i_bytes = 0;
2013 REISERFS_I(inode)->i_first_direct_byte = S_ISLNK(mode) ? 1 :
2014 U32_MAX /*NO_BYTES_IN_DIRECT_ITEM */ ;
2015
2016 INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
2017 REISERFS_I(inode)->i_flags = 0;
2018 REISERFS_I(inode)->i_prealloc_block = 0;
2019 REISERFS_I(inode)->i_prealloc_count = 0;
2020 REISERFS_I(inode)->i_trans_id = 0;
2021 REISERFS_I(inode)->i_jl = NULL;
2022 REISERFS_I(inode)->i_attrs =
2023 REISERFS_I(dir)->i_attrs & REISERFS_INHERIT_MASK;
2024 sd_attrs_to_i_attrs(REISERFS_I(inode)->i_attrs, inode);
2025 reiserfs_init_xattr_rwsem(inode);
2026
2027 /* key to search for correct place for new stat data */
2028 _make_cpu_key(&key, KEY_FORMAT_3_6, le32_to_cpu(ih.ih_key.k_dir_id),
2029 le32_to_cpu(ih.ih_key.k_objectid), SD_OFFSET,
2030 TYPE_STAT_DATA, 3 /*key length */ );
2031
2032 /* find proper place for inserting of stat data */
2033 retval = search_item(sb, &key, &path_to_key);
2034 if (retval == IO_ERROR) {
2035 err = -EIO;
2036 goto out_bad_inode;
2037 }
2038 if (retval == ITEM_FOUND) {
2039 pathrelse(&path_to_key);
2040 err = -EEXIST;
2041 goto out_bad_inode;
2042 }
2043 if (old_format_only(sb)) {
2044 /* i_uid or i_gid is too big to be stored in stat data v3.5 */
2045 if (i_uid_read(inode) & ~0xffff || i_gid_read(inode) & ~0xffff) {
2046 pathrelse(&path_to_key);
2047 err = -EINVAL;
2048 goto out_bad_inode;
2049 }
2050 inode2sd_v1(&sd, inode, inode->i_size);
2051 } else {
2052 inode2sd(&sd, inode, inode->i_size);
2053 }
2054 /*
2055 * store in in-core inode the key of stat data and version all
2056 * object items will have (directory items will have old offset
2057 * format, other new objects will consist of new items)
2058 */
2059 if (old_format_only(sb) || S_ISDIR(mode) || S_ISLNK(mode))
2060 set_inode_item_key_version(inode, KEY_FORMAT_3_5);
2061 else
2062 set_inode_item_key_version(inode, KEY_FORMAT_3_6);
2063 if (old_format_only(sb))
2064 set_inode_sd_version(inode, STAT_DATA_V1);
2065 else
2066 set_inode_sd_version(inode, STAT_DATA_V2);
2067
2068 /* insert the stat data into the tree */
2069 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
2070 if (REISERFS_I(dir)->new_packing_locality)
2071 th->displace_new_blocks = 1;
2072 #endif
2073 retval =
2074 reiserfs_insert_item(th, &path_to_key, &key, &ih, inode,
2075 (char *)(&sd));
2076 if (retval) {
2077 err = retval;
2078 reiserfs_check_path(&path_to_key);
2079 goto out_bad_inode;
2080 }
2081 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
2082 if (!th->displace_new_blocks)
2083 REISERFS_I(dir)->new_packing_locality = 0;
2084 #endif
2085 if (S_ISDIR(mode)) {
2086 /* insert item with "." and ".." */
2087 retval =
2088 reiserfs_new_directory(th, inode, &ih, &path_to_key, dir);
2089 }
2090
2091 if (S_ISLNK(mode)) {
2092 /* insert body of symlink */
2093 if (!old_format_only(sb))
2094 i_size = ROUND_UP(i_size);
2095 retval =
2096 reiserfs_new_symlink(th, inode, &ih, &path_to_key, symname,
2097 i_size);
2098 }
2099 if (retval) {
2100 err = retval;
2101 reiserfs_check_path(&path_to_key);
2102 journal_end(th);
2103 goto out_inserted_sd;
2104 }
2105
2106 if (reiserfs_posixacl(inode->i_sb)) {
2107 reiserfs_write_unlock(inode->i_sb);
2108 retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
2109 reiserfs_write_lock(inode->i_sb);
2110 if (retval) {
2111 err = retval;
2112 reiserfs_check_path(&path_to_key);
2113 journal_end(th);
2114 goto out_inserted_sd;
2115 }
2116 } else if (inode->i_sb->s_flags & MS_POSIXACL) {
2117 reiserfs_warning(inode->i_sb, "jdm-13090",
2118 "ACLs aren't enabled in the fs, "
2119 "but vfs thinks they are!");
2120 } else if (IS_PRIVATE(dir))
2121 inode->i_flags |= S_PRIVATE;
2122
2123 if (security->name) {
2124 reiserfs_write_unlock(inode->i_sb);
2125 retval = reiserfs_security_write(th, inode, security);
2126 reiserfs_write_lock(inode->i_sb);
2127 if (retval) {
2128 err = retval;
2129 reiserfs_check_path(&path_to_key);
2130 retval = journal_end(th);
2131 if (retval)
2132 err = retval;
2133 goto out_inserted_sd;
2134 }
2135 }
2136
2137 reiserfs_update_sd(th, inode);
2138 reiserfs_check_path(&path_to_key);
2139
2140 return 0;
2141
2142 out_bad_inode:
2143 /* Invalidate the object, nothing was inserted yet */
2144 INODE_PKEY(inode)->k_objectid = 0;
2145
2146 /* Quota change must be inside a transaction for journaling */
2147 depth = reiserfs_write_unlock_nested(inode->i_sb);
2148 dquot_free_inode(inode);
2149 reiserfs_write_lock_nested(inode->i_sb, depth);
2150
2151 out_end_trans:
2152 journal_end(th);
2153 /*
2154 * Drop can be outside and it needs more credits so it's better
2155 * to have it outside
2156 */
2157 depth = reiserfs_write_unlock_nested(inode->i_sb);
2158 dquot_drop(inode);
2159 reiserfs_write_lock_nested(inode->i_sb, depth);
2160 inode->i_flags |= S_NOQUOTA;
2161 make_bad_inode(inode);
2162
2163 out_inserted_sd:
2164 clear_nlink(inode);
2165 th->t_trans_id = 0; /* so the caller can't use this handle later */
2166 unlock_new_inode(inode); /* OK to do even if we hadn't locked it */
2167 iput(inode);
2168 return err;
2169 }
2170
2171 /*
2172 * finds the tail page in the page cache,
2173 * reads the last block in.
2174 *
2175 * On success, page_result is set to a locked, pinned page, and bh_result
2176 * is set to an up to date buffer for the last block in the file. returns 0.
2177 *
2178 * tail conversion is not done, so bh_result might not be valid for writing
2179 * check buffer_mapped(bh_result) and bh_result->b_blocknr != 0 before
2180 * trying to write the block.
2181 *
2182 * on failure, nonzero is returned, page_result and bh_result are untouched.
2183 */
2184 static int grab_tail_page(struct inode *inode,
2185 struct page **page_result,
2186 struct buffer_head **bh_result)
2187 {
2188
2189 /*
2190 * we want the page with the last byte in the file,
2191 * not the page that will hold the next byte for appending
2192 */
2193 unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT;
2194 unsigned long pos = 0;
2195 unsigned long start = 0;
2196 unsigned long blocksize = inode->i_sb->s_blocksize;
2197 unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
2198 struct buffer_head *bh;
2199 struct buffer_head *head;
2200 struct page *page;
2201 int error;
2202
2203 /*
2204 * we know that we are only called with inode->i_size > 0.
2205 * we also know that a file tail can never be as big as a block
2206 * If i_size % blocksize == 0, our file is currently block aligned
2207 * and it won't need converting or zeroing after a truncate.
2208 */
2209 if ((offset & (blocksize - 1)) == 0) {
2210 return -ENOENT;
2211 }
2212 page = grab_cache_page(inode->i_mapping, index);
2213 error = -ENOMEM;
2214 if (!page) {
2215 goto out;
2216 }
2217 /* start within the page of the last block in the file */
2218 start = (offset / blocksize) * blocksize;
2219
2220 error = __block_write_begin(page, start, offset - start,
2221 reiserfs_get_block_create_0);
2222 if (error)
2223 goto unlock;
2224
2225 head = page_buffers(page);
2226 bh = head;
2227 do {
2228 if (pos >= start) {
2229 break;
2230 }
2231 bh = bh->b_this_page;
2232 pos += blocksize;
2233 } while (bh != head);
2234
2235 if (!buffer_uptodate(bh)) {
2236 /*
2237 * note, this should never happen, prepare_write should be
2238 * taking care of this for us. If the buffer isn't up to
2239 * date, I've screwed up the code to find the buffer, or the
2240 * code to call prepare_write
2241 */
2242 reiserfs_error(inode->i_sb, "clm-6000",
2243 "error reading block %lu", bh->b_blocknr);
2244 error = -EIO;
2245 goto unlock;
2246 }
2247 *bh_result = bh;
2248 *page_result = page;
2249
2250 out:
2251 return error;
2252
2253 unlock:
2254 unlock_page(page);
2255 put_page(page);
2256 return error;
2257 }
2258
2259 /*
2260 * vfs version of truncate file. Must NOT be called with
2261 * a transaction already started.
2262 *
2263 * some code taken from block_truncate_page
2264 */
2265 int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2266 {
2267 struct reiserfs_transaction_handle th;
2268 /* we want the offset for the first byte after the end of the file */
2269 unsigned long offset = inode->i_size & (PAGE_SIZE - 1);
2270 unsigned blocksize = inode->i_sb->s_blocksize;
2271 unsigned length;
2272 struct page *page = NULL;
2273 int error;
2274 struct buffer_head *bh = NULL;
2275 int err2;
2276
2277 reiserfs_write_lock(inode->i_sb);
2278
2279 if (inode->i_size > 0) {
2280 error = grab_tail_page(inode, &page, &bh);
2281 if (error) {
2282 /*
2283 * -ENOENT means we truncated past the end of the
2284 * file, and get_block_create_0 could not find a
2285 * block to read in, which is ok.
2286 */
2287 if (error != -ENOENT)
2288 reiserfs_error(inode->i_sb, "clm-6001",
2289 "grab_tail_page failed %d",
2290 error);
2291 page = NULL;
2292 bh = NULL;
2293 }
2294 }
2295
2296 /*
2297 * so, if page != NULL, we have a buffer head for the offset at
2298 * the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
2299 * then we have an unformatted node. Otherwise, we have a direct item,
2300 * and no zeroing is required on disk. We zero after the truncate,
2301 * because the truncate might pack the item anyway
2302 * (it will unmap bh if it packs).
2303 *
2304 * it is enough to reserve space in transaction for 2 balancings:
2305 * one for "save" link adding and another for the first
2306 * cut_from_item. 1 is for update_sd
2307 */
2308 error = journal_begin(&th, inode->i_sb,
2309 JOURNAL_PER_BALANCE_CNT * 2 + 1);
2310 if (error)
2311 goto out;
2312 reiserfs_update_inode_transaction(inode);
2313 if (update_timestamps)
2314 /*
2315 * we are doing real truncate: if the system crashes
2316 * before the last transaction of truncating gets committed
2317 * - on reboot the file either appears truncated properly
2318 * or not truncated at all
2319 */
2320 add_save_link(&th, inode, 1);
2321 err2 = reiserfs_do_truncate(&th, inode, page, update_timestamps);
2322 error = journal_end(&th);
2323 if (error)
2324 goto out;
2325
2326 /* check reiserfs_do_truncate after ending the transaction */
2327 if (err2) {
2328 error = err2;
2329 goto out;
2330 }
2331
2332 if (update_timestamps) {
2333 error = remove_save_link(inode, 1 /* truncate */);
2334 if (error)
2335 goto out;
2336 }
2337
2338 if (page) {
2339 length = offset & (blocksize - 1);
2340 /* if we are not on a block boundary */
2341 if (length) {
2342 length = blocksize - length;
2343 zero_user(page, offset, length);
2344 if (buffer_mapped(bh) && bh->b_blocknr != 0) {
2345 mark_buffer_dirty(bh);
2346 }
2347 }
2348 unlock_page(page);
2349 put_page(page);
2350 }
2351
2352 reiserfs_write_unlock(inode->i_sb);
2353
2354 return 0;
2355 out:
2356 if (page) {
2357 unlock_page(page);
2358 put_page(page);
2359 }
2360
2361 reiserfs_write_unlock(inode->i_sb);
2362
2363 return error;
2364 }
2365
2366 static int map_block_for_writepage(struct inode *inode,
2367 struct buffer_head *bh_result,
2368 unsigned long block)
2369 {
2370 struct reiserfs_transaction_handle th;
2371 int fs_gen;
2372 struct item_head tmp_ih;
2373 struct item_head *ih;
2374 struct buffer_head *bh;
2375 __le32 *item;
2376 struct cpu_key key;
2377 INITIALIZE_PATH(path);
2378 int pos_in_item;
2379 int jbegin_count = JOURNAL_PER_BALANCE_CNT;
2380 loff_t byte_offset = ((loff_t)block << inode->i_sb->s_blocksize_bits)+1;
2381 int retval;
2382 int use_get_block = 0;
2383 int bytes_copied = 0;
2384 int copy_size;
2385 int trans_running = 0;
2386
2387 /*
2388 * catch places below that try to log something without
2389 * starting a trans
2390 */
2391 th.t_trans_id = 0;
2392
2393 if (!buffer_uptodate(bh_result)) {
2394 return -EIO;
2395 }
2396
2397 kmap(bh_result->b_page);
2398 start_over:
2399 reiserfs_write_lock(inode->i_sb);
2400 make_cpu_key(&key, inode, byte_offset, TYPE_ANY, 3);
2401
2402 research:
2403 retval = search_for_position_by_key(inode->i_sb, &key, &path);
2404 if (retval != POSITION_FOUND) {
2405 use_get_block = 1;
2406 goto out;
2407 }
2408
2409 bh = get_last_bh(&path);
2410 ih = tp_item_head(&path);
2411 item = tp_item_body(&path);
2412 pos_in_item = path.pos_in_item;
2413
2414 /* we've found an unformatted node */
2415 if (indirect_item_found(retval, ih)) {
2416 if (bytes_copied > 0) {
2417 reiserfs_warning(inode->i_sb, "clm-6002",
2418 "bytes_copied %d", bytes_copied);
2419 }
2420 if (!get_block_num(item, pos_in_item)) {
2421 /* crap, we are writing to a hole */
2422 use_get_block = 1;
2423 goto out;
2424 }
2425 set_block_dev_mapped(bh_result,
2426 get_block_num(item, pos_in_item), inode);
2427 } else if (is_direct_le_ih(ih)) {
2428 char *p;
2429 p = page_address(bh_result->b_page);
2430 p += (byte_offset - 1) & (PAGE_SIZE - 1);
2431 copy_size = ih_item_len(ih) - pos_in_item;
2432
2433 fs_gen = get_generation(inode->i_sb);
2434 copy_item_head(&tmp_ih, ih);
2435
2436 if (!trans_running) {
2437 /* vs-3050 is gone, no need to drop the path */
2438 retval = journal_begin(&th, inode->i_sb, jbegin_count);
2439 if (retval)
2440 goto out;
2441 reiserfs_update_inode_transaction(inode);
2442 trans_running = 1;
2443 if (fs_changed(fs_gen, inode->i_sb)
2444 && item_moved(&tmp_ih, &path)) {
2445 reiserfs_restore_prepared_buffer(inode->i_sb,
2446 bh);
2447 goto research;
2448 }
2449 }
2450
2451 reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
2452
2453 if (fs_changed(fs_gen, inode->i_sb)
2454 && item_moved(&tmp_ih, &path)) {
2455 reiserfs_restore_prepared_buffer(inode->i_sb, bh);
2456 goto research;
2457 }
2458
2459 memcpy(ih_item_body(bh, ih) + pos_in_item, p + bytes_copied,
2460 copy_size);
2461
2462 journal_mark_dirty(&th, bh);
2463 bytes_copied += copy_size;
2464 set_block_dev_mapped(bh_result, 0, inode);
2465
2466 /* are there still bytes left? */
2467 if (bytes_copied < bh_result->b_size &&
2468 (byte_offset + bytes_copied) < inode->i_size) {
2469 set_cpu_key_k_offset(&key,
2470 cpu_key_k_offset(&key) +
2471 copy_size);
2472 goto research;
2473 }
2474 } else {
2475 reiserfs_warning(inode->i_sb, "clm-6003",
2476 "bad item inode %lu", inode->i_ino);
2477 retval = -EIO;
2478 goto out;
2479 }
2480 retval = 0;
2481
2482 out:
2483 pathrelse(&path);
2484 if (trans_running) {
2485 int err = journal_end(&th);
2486 if (err)
2487 retval = err;
2488 trans_running = 0;
2489 }
2490 reiserfs_write_unlock(inode->i_sb);
2491
2492 /* this is where we fill in holes in the file. */
2493 if (use_get_block) {
2494 retval = reiserfs_get_block(inode, block, bh_result,
2495 GET_BLOCK_CREATE | GET_BLOCK_NO_IMUX
2496 | GET_BLOCK_NO_DANGLE);
2497 if (!retval) {
2498 if (!buffer_mapped(bh_result)
2499 || bh_result->b_blocknr == 0) {
2500 /* get_block failed to find a mapped unformatted node. */
2501 use_get_block = 0;
2502 goto start_over;
2503 }
2504 }
2505 }
2506 kunmap(bh_result->b_page);
2507
2508 if (!retval && buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
2509 /*
2510 * we've copied data from the page into the direct item, so the
2511 * buffer in the page is now clean, mark it to reflect that.
2512 */
2513 lock_buffer(bh_result);
2514 clear_buffer_dirty(bh_result);
2515 unlock_buffer(bh_result);
2516 }
2517 return retval;
2518 }
2519
2520 /*
2521 * mason@suse.com: updated in 2.5.54 to follow the same general io
2522 * start/recovery path as __block_write_full_page, along with special
2523 * code to handle reiserfs tails.
2524 */
2525 static int reiserfs_write_full_page(struct page *page,
2526 struct writeback_control *wbc)
2527 {
2528 struct inode *inode = page->mapping->host;
2529 unsigned long end_index = inode->i_size >> PAGE_SHIFT;
2530 int error = 0;
2531 unsigned long block;
2532 sector_t last_block;
2533 struct buffer_head *head, *bh;
2534 int partial = 0;
2535 int nr = 0;
2536 int checked = PageChecked(page);
2537 struct reiserfs_transaction_handle th;
2538 struct super_block *s = inode->i_sb;
2539 int bh_per_page = PAGE_SIZE / s->s_blocksize;
2540 th.t_trans_id = 0;
2541
2542 /* no logging allowed when nonblocking or from PF_MEMALLOC */
2543 if (checked && (current->flags & PF_MEMALLOC)) {
2544 redirty_page_for_writepage(wbc, page);
2545 unlock_page(page);
2546 return 0;
2547 }
2548
2549 /*
2550 * The page dirty bit is cleared before writepage is called, which
2551 * means we have to tell create_empty_buffers to make dirty buffers
2552 * The page really should be up to date at this point, so tossing
2553 * in the BH_Uptodate is just a sanity check.
2554 */
2555 if (!page_has_buffers(page)) {
2556 create_empty_buffers(page, s->s_blocksize,
2557 (1 << BH_Dirty) | (1 << BH_Uptodate));
2558 }
2559 head = page_buffers(page);
2560
2561 /*
2562 * last page in the file, zero out any contents past the
2563 * last byte in the file
2564 */
2565 if (page->index >= end_index) {
2566 unsigned last_offset;
2567
2568 last_offset = inode->i_size & (PAGE_SIZE - 1);
2569 /* no file contents in this page */
2570 if (page->index >= end_index + 1 || !last_offset) {
2571 unlock_page(page);
2572 return 0;
2573 }
2574 zero_user_segment(page, last_offset, PAGE_SIZE);
2575 }
2576 bh = head;
2577 block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
2578 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
2579 /* first map all the buffers, logging any direct items we find */
2580 do {
2581 if (block > last_block) {
2582 /*
2583 * This can happen when the block size is less than
2584 * the page size. The corresponding bytes in the page
2585 * were zero filled above
2586 */
2587 clear_buffer_dirty(bh);
2588 set_buffer_uptodate(bh);
2589 } else if ((checked || buffer_dirty(bh)) &&
2590 (!buffer_mapped(bh) || (buffer_mapped(bh)
2591 && bh->b_blocknr ==
2592 0))) {
2593 /*
2594 * not mapped yet, or it points to a direct item, search
2595 * the btree for the mapping info, and log any direct
2596 * items found
2597 */
2598 if ((error = map_block_for_writepage(inode, bh, block))) {
2599 goto fail;
2600 }
2601 }
2602 bh = bh->b_this_page;
2603 block++;
2604 } while (bh != head);
2605
2606 /*
2607 * we start the transaction after map_block_for_writepage,
2608 * because it can create holes in the file (an unbounded operation).
2609 * starting it here, we can make a reliable estimate for how many
2610 * blocks we're going to log
2611 */
2612 if (checked) {
2613 ClearPageChecked(page);
2614 reiserfs_write_lock(s);
2615 error = journal_begin(&th, s, bh_per_page + 1);
2616 if (error) {
2617 reiserfs_write_unlock(s);
2618 goto fail;
2619 }
2620 reiserfs_update_inode_transaction(inode);
2621 }
2622 /* now go through and lock any dirty buffers on the page */
2623 do {
2624 get_bh(bh);
2625 if (!buffer_mapped(bh))
2626 continue;
2627 if (buffer_mapped(bh) && bh->b_blocknr == 0)
2628 continue;
2629
2630 if (checked) {
2631 reiserfs_prepare_for_journal(s, bh, 1);
2632 journal_mark_dirty(&th, bh);
2633 continue;
2634 }
2635 /*
2636 * from this point on, we know the buffer is mapped to a
2637 * real block and not a direct item
2638 */
2639 if (wbc->sync_mode != WB_SYNC_NONE) {
2640 lock_buffer(bh);
2641 } else {
2642 if (!trylock_buffer(bh)) {
2643 redirty_page_for_writepage(wbc, page);
2644 continue;
2645 }
2646 }
2647 if (test_clear_buffer_dirty(bh)) {
2648 mark_buffer_async_write(bh);
2649 } else {
2650 unlock_buffer(bh);
2651 }
2652 } while ((bh = bh->b_this_page) != head);
2653
2654 if (checked) {
2655 error = journal_end(&th);
2656 reiserfs_write_unlock(s);
2657 if (error)
2658 goto fail;
2659 }
2660 BUG_ON(PageWriteback(page));
2661 set_page_writeback(page);
2662 unlock_page(page);
2663
2664 /*
2665 * since any buffer might be the only dirty buffer on the page,
2666 * the first submit_bh can bring the page out of writeback.
2667 * be careful with the buffers.
2668 */
2669 do {
2670 struct buffer_head *next = bh->b_this_page;
2671 if (buffer_async_write(bh)) {
2672 submit_bh(REQ_OP_WRITE, 0, bh);
2673 nr++;
2674 }
2675 put_bh(bh);
2676 bh = next;
2677 } while (bh != head);
2678
2679 error = 0;
2680 done:
2681 if (nr == 0) {
2682 /*
2683 * if this page only had a direct item, it is very possible for
2684 * no io to be required without there being an error. Or,
2685 * someone else could have locked them and sent them down the
2686 * pipe without locking the page
2687 */
2688 bh = head;
2689 do {
2690 if (!buffer_uptodate(bh)) {
2691 partial = 1;
2692 break;
2693 }
2694 bh = bh->b_this_page;
2695 } while (bh != head);
2696 if (!partial)
2697 SetPageUptodate(page);
2698 end_page_writeback(page);
2699 }
2700 return error;
2701
2702 fail:
2703 /*
2704 * catches various errors, we need to make sure any valid dirty blocks
2705 * get to the media. The page is currently locked and not marked for
2706 * writeback
2707 */
2708 ClearPageUptodate(page);
2709 bh = head;
2710 do {
2711 get_bh(bh);
2712 if (buffer_mapped(bh) && buffer_dirty(bh) && bh->b_blocknr) {
2713 lock_buffer(bh);
2714 mark_buffer_async_write(bh);
2715 } else {
2716 /*
2717 * clear any dirty bits that might have come from
2718 * getting attached to a dirty page
2719 */
2720 clear_buffer_dirty(bh);
2721 }
2722 bh = bh->b_this_page;
2723 } while (bh != head);
2724 SetPageError(page);
2725 BUG_ON(PageWriteback(page));
2726 set_page_writeback(page);
2727 unlock_page(page);
2728 do {
2729 struct buffer_head *next = bh->b_this_page;
2730 if (buffer_async_write(bh)) {
2731 clear_buffer_dirty(bh);
2732 submit_bh(REQ_OP_WRITE, 0, bh);
2733 nr++;
2734 }
2735 put_bh(bh);
2736 bh = next;
2737 } while (bh != head);
2738 goto done;
2739 }
2740
2741 static int reiserfs_readpage(struct file *f, struct page *page)
2742 {
2743 return block_read_full_page(page, reiserfs_get_block);
2744 }
2745
2746 static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
2747 {
2748 struct inode *inode = page->mapping->host;
2749 reiserfs_wait_on_write_block(inode->i_sb);
2750 return reiserfs_write_full_page(page, wbc);
2751 }
2752
2753 static void reiserfs_truncate_failed_write(struct inode *inode)
2754 {
2755 truncate_inode_pages(inode->i_mapping, inode->i_size);
2756 reiserfs_truncate_file(inode, 0);
2757 }
2758
2759 static int reiserfs_write_begin(struct file *file,
2760 struct address_space *mapping,
2761 loff_t pos, unsigned len, unsigned flags,
2762 struct page **pagep, void **fsdata)
2763 {
2764 struct inode *inode;
2765 struct page *page;
2766 pgoff_t index;
2767 int ret;
2768 int old_ref = 0;
2769
2770 inode = mapping->host;
2771 *fsdata = NULL;
2772 if (flags & AOP_FLAG_CONT_EXPAND &&
2773 (pos & (inode->i_sb->s_blocksize - 1)) == 0) {
2774 pos ++;
2775 *fsdata = (void *)(unsigned long)flags;
2776 }
2777
2778 index = pos >> PAGE_SHIFT;
2779 page = grab_cache_page_write_begin(mapping, index, flags);
2780 if (!page)
2781 return -ENOMEM;
2782 *pagep = page;
2783
2784 reiserfs_wait_on_write_block(inode->i_sb);
2785 fix_tail_page_for_writing(page);
2786 if (reiserfs_transaction_running(inode->i_sb)) {
2787 struct reiserfs_transaction_handle *th;
2788 th = (struct reiserfs_transaction_handle *)current->
2789 journal_info;
2790 BUG_ON(!th->t_refcount);
2791 BUG_ON(!th->t_trans_id);
2792 old_ref = th->t_refcount;
2793 th->t_refcount++;
2794 }
2795 ret = __block_write_begin(page, pos, len, reiserfs_get_block);
2796 if (ret && reiserfs_transaction_running(inode->i_sb)) {
2797 struct reiserfs_transaction_handle *th = current->journal_info;
2798 /*
2799 * this gets a little ugly. If reiserfs_get_block returned an
2800 * error and left a transacstion running, we've got to close
2801 * it, and we've got to free handle if it was a persistent
2802 * transaction.
2803 *
2804 * But, if we had nested into an existing transaction, we need
2805 * to just drop the ref count on the handle.
2806 *
2807 * If old_ref == 0, the transaction is from reiserfs_get_block,
2808 * and it was a persistent trans. Otherwise, it was nested
2809 * above.
2810 */
2811 if (th->t_refcount > old_ref) {
2812 if (old_ref)
2813 th->t_refcount--;
2814 else {
2815 int err;
2816 reiserfs_write_lock(inode->i_sb);
2817 err = reiserfs_end_persistent_transaction(th);
2818 reiserfs_write_unlock(inode->i_sb);
2819 if (err)
2820 ret = err;
2821 }
2822 }
2823 }
2824 if (ret) {
2825 unlock_page(page);
2826 put_page(page);
2827 /* Truncate allocated blocks */
2828 reiserfs_truncate_failed_write(inode);
2829 }
2830 return ret;
2831 }
2832
2833 int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
2834 {
2835 struct inode *inode = page->mapping->host;
2836 int ret;
2837 int old_ref = 0;
2838 int depth;
2839
2840 depth = reiserfs_write_unlock_nested(inode->i_sb);
2841 reiserfs_wait_on_write_block(inode->i_sb);
2842 reiserfs_write_lock_nested(inode->i_sb, depth);
2843
2844 fix_tail_page_for_writing(page);
2845 if (reiserfs_transaction_running(inode->i_sb)) {
2846 struct reiserfs_transaction_handle *th;
2847 th = (struct reiserfs_transaction_handle *)current->
2848 journal_info;
2849 BUG_ON(!th->t_refcount);
2850 BUG_ON(!th->t_trans_id);
2851 old_ref = th->t_refcount;
2852 th->t_refcount++;
2853 }
2854
2855 ret = __block_write_begin(page, from, len, reiserfs_get_block);
2856 if (ret && reiserfs_transaction_running(inode->i_sb)) {
2857 struct reiserfs_transaction_handle *th = current->journal_info;
2858 /*
2859 * this gets a little ugly. If reiserfs_get_block returned an
2860 * error and left a transacstion running, we've got to close
2861 * it, and we've got to free handle if it was a persistent
2862 * transaction.
2863 *
2864 * But, if we had nested into an existing transaction, we need
2865 * to just drop the ref count on the handle.
2866 *
2867 * If old_ref == 0, the transaction is from reiserfs_get_block,
2868 * and it was a persistent trans. Otherwise, it was nested
2869 * above.
2870 */
2871 if (th->t_refcount > old_ref) {
2872 if (old_ref)
2873 th->t_refcount--;
2874 else {
2875 int err;
2876 reiserfs_write_lock(inode->i_sb);
2877 err = reiserfs_end_persistent_transaction(th);
2878 reiserfs_write_unlock(inode->i_sb);
2879 if (err)
2880 ret = err;
2881 }
2882 }
2883 }
2884 return ret;
2885
2886 }
2887
2888 static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block)
2889 {
2890 return generic_block_bmap(as, block, reiserfs_bmap);
2891 }
2892
2893 static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2894 loff_t pos, unsigned len, unsigned copied,
2895 struct page *page, void *fsdata)
2896 {
2897 struct inode *inode = page->mapping->host;
2898 int ret = 0;
2899 int update_sd = 0;
2900 struct reiserfs_transaction_handle *th;
2901 unsigned start;
2902 bool locked = false;
2903
2904 if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
2905 pos ++;
2906
2907 reiserfs_wait_on_write_block(inode->i_sb);
2908 if (reiserfs_transaction_running(inode->i_sb))
2909 th = current->journal_info;
2910 else
2911 th = NULL;
2912
2913 start = pos & (PAGE_SIZE - 1);
2914 if (unlikely(copied < len)) {
2915 if (!PageUptodate(page))
2916 copied = 0;
2917
2918 page_zero_new_buffers(page, start + copied, start + len);
2919 }
2920 flush_dcache_page(page);
2921
2922 reiserfs_commit_page(inode, page, start, start + copied);
2923
2924 /*
2925 * generic_commit_write does this for us, but does not update the
2926 * transaction tracking stuff when the size changes. So, we have
2927 * to do the i_size updates here.
2928 */
2929 if (pos + copied > inode->i_size) {
2930 struct reiserfs_transaction_handle myth;
2931 reiserfs_write_lock(inode->i_sb);
2932 locked = true;
2933 /*
2934 * If the file have grown beyond the border where it
2935 * can have a tail, unmark it as needing a tail
2936 * packing
2937 */
2938 if ((have_large_tails(inode->i_sb)
2939 && inode->i_size > i_block_size(inode) * 4)
2940 || (have_small_tails(inode->i_sb)
2941 && inode->i_size > i_block_size(inode)))
2942 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
2943
2944 ret = journal_begin(&myth, inode->i_sb, 1);
2945 if (ret)
2946 goto journal_error;
2947
2948 reiserfs_update_inode_transaction(inode);
2949 inode->i_size = pos + copied;
2950 /*
2951 * this will just nest into our transaction. It's important
2952 * to use mark_inode_dirty so the inode gets pushed around on
2953 * the dirty lists, and so that O_SYNC works as expected
2954 */
2955 mark_inode_dirty(inode);
2956 reiserfs_update_sd(&myth, inode);
2957 update_sd = 1;
2958 ret = journal_end(&myth);
2959 if (ret)
2960 goto journal_error;
2961 }
2962 if (th) {
2963 if (!locked) {
2964 reiserfs_write_lock(inode->i_sb);
2965 locked = true;
2966 }
2967 if (!update_sd)
2968 mark_inode_dirty(inode);
2969 ret = reiserfs_end_persistent_transaction(th);
2970 if (ret)
2971 goto out;
2972 }
2973
2974 out:
2975 if (locked)
2976 reiserfs_write_unlock(inode->i_sb);
2977 unlock_page(page);
2978 put_page(page);
2979
2980 if (pos + len > inode->i_size)
2981 reiserfs_truncate_failed_write(inode);
2982
2983 return ret == 0 ? copied : ret;
2984
2985 journal_error:
2986 reiserfs_write_unlock(inode->i_sb);
2987 locked = false;
2988 if (th) {
2989 if (!update_sd)
2990 reiserfs_update_sd(th, inode);
2991 ret = reiserfs_end_persistent_transaction(th);
2992 }
2993 goto out;
2994 }
2995
2996 int reiserfs_commit_write(struct file *f, struct page *page,
2997 unsigned from, unsigned to)
2998 {
2999 struct inode *inode = page->mapping->host;
3000 loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to;
3001 int ret = 0;
3002 int update_sd = 0;
3003 struct reiserfs_transaction_handle *th = NULL;
3004 int depth;
3005
3006 depth = reiserfs_write_unlock_nested(inode->i_sb);
3007 reiserfs_wait_on_write_block(inode->i_sb);
3008 reiserfs_write_lock_nested(inode->i_sb, depth);
3009
3010 if (reiserfs_transaction_running(inode->i_sb)) {
3011 th = current->journal_info;
3012 }
3013 reiserfs_commit_page(inode, page, from, to);
3014
3015 /*
3016 * generic_commit_write does this for us, but does not update the
3017 * transaction tracking stuff when the size changes. So, we have
3018 * to do the i_size updates here.
3019 */
3020 if (pos > inode->i_size) {
3021 struct reiserfs_transaction_handle myth;
3022 /*
3023 * If the file have grown beyond the border where it
3024 * can have a tail, unmark it as needing a tail
3025 * packing
3026 */
3027 if ((have_large_tails(inode->i_sb)
3028 && inode->i_size > i_block_size(inode) * 4)
3029 || (have_small_tails(inode->i_sb)
3030 && inode->i_size > i_block_size(inode)))
3031 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
3032
3033 ret = journal_begin(&myth, inode->i_sb, 1);
3034 if (ret)
3035 goto journal_error;
3036
3037 reiserfs_update_inode_transaction(inode);
3038 inode->i_size = pos;
3039 /*
3040 * this will just nest into our transaction. It's important
3041 * to use mark_inode_dirty so the inode gets pushed around
3042 * on the dirty lists, and so that O_SYNC works as expected
3043 */
3044 mark_inode_dirty(inode);
3045 reiserfs_update_sd(&myth, inode);
3046 update_sd = 1;
3047 ret = journal_end(&myth);
3048 if (ret)
3049 goto journal_error;
3050 }
3051 if (th) {
3052 if (!update_sd)
3053 mark_inode_dirty(inode);
3054 ret = reiserfs_end_persistent_transaction(th);
3055 if (ret)
3056 goto out;
3057 }
3058
3059 out:
3060 return ret;
3061
3062 journal_error:
3063 if (th) {
3064 if (!update_sd)
3065 reiserfs_update_sd(th, inode);
3066 ret = reiserfs_end_persistent_transaction(th);
3067 }
3068
3069 return ret;
3070 }
3071
3072 void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode)
3073 {
3074 if (reiserfs_attrs(inode->i_sb)) {
3075 if (sd_attrs & REISERFS_SYNC_FL)
3076 inode->i_flags |= S_SYNC;
3077 else
3078 inode->i_flags &= ~S_SYNC;
3079 if (sd_attrs & REISERFS_IMMUTABLE_FL)
3080 inode->i_flags |= S_IMMUTABLE;
3081 else
3082 inode->i_flags &= ~S_IMMUTABLE;
3083 if (sd_attrs & REISERFS_APPEND_FL)
3084 inode->i_flags |= S_APPEND;
3085 else
3086 inode->i_flags &= ~S_APPEND;
3087 if (sd_attrs & REISERFS_NOATIME_FL)
3088 inode->i_flags |= S_NOATIME;
3089 else
3090 inode->i_flags &= ~S_NOATIME;
3091 if (sd_attrs & REISERFS_NOTAIL_FL)
3092 REISERFS_I(inode)->i_flags |= i_nopack_mask;
3093 else
3094 REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
3095 }
3096 }
3097
3098 void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs)
3099 {
3100 if (reiserfs_attrs(inode->i_sb)) {
3101 if (inode->i_flags & S_IMMUTABLE)
3102 *sd_attrs |= REISERFS_IMMUTABLE_FL;
3103 else
3104 *sd_attrs &= ~REISERFS_IMMUTABLE_FL;
3105 if (inode->i_flags & S_SYNC)
3106 *sd_attrs |= REISERFS_SYNC_FL;
3107 else
3108 *sd_attrs &= ~REISERFS_SYNC_FL;
3109 if (inode->i_flags & S_NOATIME)
3110 *sd_attrs |= REISERFS_NOATIME_FL;
3111 else
3112 *sd_attrs &= ~REISERFS_NOATIME_FL;
3113 if (REISERFS_I(inode)->i_flags & i_nopack_mask)
3114 *sd_attrs |= REISERFS_NOTAIL_FL;
3115 else
3116 *sd_attrs &= ~REISERFS_NOTAIL_FL;
3117 }
3118 }
3119
3120 /*
3121 * decide if this buffer needs to stay around for data logging or ordered
3122 * write purposes
3123 */
3124 static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
3125 {
3126 int ret = 1;
3127 struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
3128
3129 lock_buffer(bh);
3130 spin_lock(&j->j_dirty_buffers_lock);
3131 if (!buffer_mapped(bh)) {
3132 goto free_jh;
3133 }
3134 /*
3135 * the page is locked, and the only places that log a data buffer
3136 * also lock the page.
3137 */
3138 if (reiserfs_file_data_log(inode)) {
3139 /*
3140 * very conservative, leave the buffer pinned if
3141 * anyone might need it.
3142 */
3143 if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
3144 ret = 0;
3145 }
3146 } else if (buffer_dirty(bh)) {
3147 struct reiserfs_journal_list *jl;
3148 struct reiserfs_jh *jh = bh->b_private;
3149
3150 /*
3151 * why is this safe?
3152 * reiserfs_setattr updates i_size in the on disk
3153 * stat data before allowing vmtruncate to be called.
3154 *
3155 * If buffer was put onto the ordered list for this
3156 * transaction, we know for sure either this transaction
3157 * or an older one already has updated i_size on disk,
3158 * and this ordered data won't be referenced in the file
3159 * if we crash.
3160 *
3161 * if the buffer was put onto the ordered list for an older
3162 * transaction, we need to leave it around
3163 */
3164 if (jh && (jl = jh->jl)
3165 && jl != SB_JOURNAL(inode->i_sb)->j_current_jl)
3166 ret = 0;
3167 }
3168 free_jh:
3169 if (ret && bh->b_private) {
3170 reiserfs_free_jh(bh);
3171 }
3172 spin_unlock(&j->j_dirty_buffers_lock);
3173 unlock_buffer(bh);
3174 return ret;
3175 }
3176
3177 /* clm -- taken from fs/buffer.c:block_invalidate_page */
3178 static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
3179 unsigned int length)
3180 {
3181 struct buffer_head *head, *bh, *next;
3182 struct inode *inode = page->mapping->host;
3183 unsigned int curr_off = 0;
3184 unsigned int stop = offset + length;
3185 int partial_page = (offset || length < PAGE_SIZE);
3186 int ret = 1;
3187
3188 BUG_ON(!PageLocked(page));
3189
3190 if (!partial_page)
3191 ClearPageChecked(page);
3192
3193 if (!page_has_buffers(page))
3194 goto out;
3195
3196 head = page_buffers(page);
3197 bh = head;
3198 do {
3199 unsigned int next_off = curr_off + bh->b_size;
3200 next = bh->b_this_page;
3201
3202 if (next_off > stop)
3203 goto out;
3204
3205 /*
3206 * is this block fully invalidated?
3207 */
3208 if (offset <= curr_off) {
3209 if (invalidatepage_can_drop(inode, bh))
3210 reiserfs_unmap_buffer(bh);
3211 else
3212 ret = 0;
3213 }
3214 curr_off = next_off;
3215 bh = next;
3216 } while (bh != head);
3217
3218 /*
3219 * We release buffers only if the entire page is being invalidated.
3220 * The get_block cached value has been unconditionally invalidated,
3221 * so real IO is not possible anymore.
3222 */
3223 if (!partial_page && ret) {
3224 ret = try_to_release_page(page, 0);
3225 /* maybe should BUG_ON(!ret); - neilb */
3226 }
3227 out:
3228 return;
3229 }
3230
3231 static int reiserfs_set_page_dirty(struct page *page)
3232 {
3233 struct inode *inode = page->mapping->host;
3234 if (reiserfs_file_data_log(inode)) {
3235 SetPageChecked(page);
3236 return __set_page_dirty_nobuffers(page);
3237 }
3238 return __set_page_dirty_buffers(page);
3239 }
3240
3241 /*
3242 * Returns 1 if the page's buffers were dropped. The page is locked.
3243 *
3244 * Takes j_dirty_buffers_lock to protect the b_assoc_buffers list_heads
3245 * in the buffers at page_buffers(page).
3246 *
3247 * even in -o notail mode, we can't be sure an old mount without -o notail
3248 * didn't create files with tails.
3249 */
3250 static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
3251 {
3252 struct inode *inode = page->mapping->host;
3253 struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
3254 struct buffer_head *head;
3255 struct buffer_head *bh;
3256 int ret = 1;
3257
3258 WARN_ON(PageChecked(page));
3259 spin_lock(&j->j_dirty_buffers_lock);
3260 head = page_buffers(page);
3261 bh = head;
3262 do {
3263 if (bh->b_private) {
3264 if (!buffer_dirty(bh) && !buffer_locked(bh)) {
3265 reiserfs_free_jh(bh);
3266 } else {
3267 ret = 0;
3268 break;
3269 }
3270 }
3271 bh = bh->b_this_page;
3272 } while (bh != head);
3273 if (ret)
3274 ret = try_to_free_buffers(page);
3275 spin_unlock(&j->j_dirty_buffers_lock);
3276 return ret;
3277 }
3278
3279 /*
3280 * We thank Mingming Cao for helping us understand in great detail what
3281 * to do in this section of the code.
3282 */
3283 static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3284 {
3285 struct file *file = iocb->ki_filp;
3286 struct inode *inode = file->f_mapping->host;
3287 size_t count = iov_iter_count(iter);
3288 ssize_t ret;
3289
3290 ret = blockdev_direct_IO(iocb, inode, iter,
3291 reiserfs_get_blocks_direct_io);
3292
3293 /*
3294 * In case of error extending write may have instantiated a few
3295 * blocks outside i_size. Trim these off again.
3296 */
3297 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
3298 loff_t isize = i_size_read(inode);
3299 loff_t end = iocb->ki_pos + count;
3300
3301 if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
3302 truncate_setsize(inode, isize);
3303 reiserfs_vfs_truncate_file(inode);
3304 }
3305 }
3306
3307 return ret;
3308 }
3309
3310 int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3311 {
3312 struct inode *inode = d_inode(dentry);
3313 unsigned int ia_valid;
3314 int error;
3315
3316 error = setattr_prepare(dentry, attr);
3317 if (error)
3318 return error;
3319
3320 /* must be turned off for recursive notify_change calls */
3321 ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
3322
3323 if (is_quota_modification(inode, attr)) {
3324 error = dquot_initialize(inode);
3325 if (error)
3326 return error;
3327 }
3328 reiserfs_write_lock(inode->i_sb);
3329 if (attr->ia_valid & ATTR_SIZE) {
3330 /*
3331 * version 2 items will be caught by the s_maxbytes check
3332 * done for us in vmtruncate
3333 */
3334 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
3335 attr->ia_size > MAX_NON_LFS) {
3336 reiserfs_write_unlock(inode->i_sb);
3337 error = -EFBIG;
3338 goto out;
3339 }
3340
3341 inode_dio_wait(inode);
3342
3343 /* fill in hole pointers in the expanding truncate case. */
3344 if (attr->ia_size > inode->i_size) {
3345 error = generic_cont_expand_simple(inode, attr->ia_size);
3346 if (REISERFS_I(inode)->i_prealloc_count > 0) {
3347 int err;
3348 struct reiserfs_transaction_handle th;
3349 /* we're changing at most 2 bitmaps, inode + super */
3350 err = journal_begin(&th, inode->i_sb, 4);
3351 if (!err) {
3352 reiserfs_discard_prealloc(&th, inode);
3353 err = journal_end(&th);
3354 }
3355 if (err)
3356 error = err;
3357 }
3358 if (error) {
3359 reiserfs_write_unlock(inode->i_sb);
3360 goto out;
3361 }
3362 /*
3363 * file size is changed, ctime and mtime are
3364 * to be updated
3365 */
3366 attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME);
3367 }
3368 }
3369 reiserfs_write_unlock(inode->i_sb);
3370
3371 if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) ||
3372 ((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) &&
3373 (get_inode_sd_version(inode) == STAT_DATA_V1)) {
3374 /* stat data of format v3.5 has 16 bit uid and gid */
3375 error = -EINVAL;
3376 goto out;
3377 }
3378
3379 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
3380 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
3381 struct reiserfs_transaction_handle th;
3382 int jbegin_count =
3383 2 *
3384 (REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb) +
3385 REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)) +
3386 2;
3387
3388 error = reiserfs_chown_xattrs(inode, attr);
3389
3390 if (error)
3391 return error;
3392
3393 /*
3394 * (user+group)*(old+new) structure - we count quota
3395 * info and , inode write (sb, inode)
3396 */
3397 reiserfs_write_lock(inode->i_sb);
3398 error = journal_begin(&th, inode->i_sb, jbegin_count);
3399 reiserfs_write_unlock(inode->i_sb);
3400 if (error)
3401 goto out;
3402 error = dquot_transfer(inode, attr);
3403 reiserfs_write_lock(inode->i_sb);
3404 if (error) {
3405 journal_end(&th);
3406 reiserfs_write_unlock(inode->i_sb);
3407 goto out;
3408 }
3409
3410 /*
3411 * Update corresponding info in inode so that everything
3412 * is in one transaction
3413 */
3414 if (attr->ia_valid & ATTR_UID)
3415 inode->i_uid = attr->ia_uid;
3416 if (attr->ia_valid & ATTR_GID)
3417 inode->i_gid = attr->ia_gid;
3418 mark_inode_dirty(inode);
3419 error = journal_end(&th);
3420 reiserfs_write_unlock(inode->i_sb);
3421 if (error)
3422 goto out;
3423 }
3424
3425 if ((attr->ia_valid & ATTR_SIZE) &&
3426 attr->ia_size != i_size_read(inode)) {
3427 error = inode_newsize_ok(inode, attr->ia_size);
3428 if (!error) {
3429 /*
3430 * Could race against reiserfs_file_release
3431 * if called from NFS, so take tailpack mutex.
3432 */
3433 mutex_lock(&REISERFS_I(inode)->tailpack);
3434 truncate_setsize(inode, attr->ia_size);
3435 reiserfs_truncate_file(inode, 1);
3436 mutex_unlock(&REISERFS_I(inode)->tailpack);
3437 }
3438 }
3439
3440 if (!error) {
3441 setattr_copy(inode, attr);
3442 mark_inode_dirty(inode);
3443 }
3444
3445 if (!error && reiserfs_posixacl(inode->i_sb)) {
3446 if (attr->ia_valid & ATTR_MODE)
3447 error = reiserfs_acl_chmod(inode);
3448 }
3449
3450 out:
3451 return error;
3452 }
3453
3454 const struct address_space_operations reiserfs_address_space_operations = {
3455 .writepage = reiserfs_writepage,
3456 .readpage = reiserfs_readpage,
3457 .readpages = reiserfs_readpages,
3458 .releasepage = reiserfs_releasepage,
3459 .invalidatepage = reiserfs_invalidatepage,
3460 .write_begin = reiserfs_write_begin,
3461 .write_end = reiserfs_write_end,
3462 .bmap = reiserfs_aop_bmap,
3463 .direct_IO = reiserfs_direct_IO,
3464 .set_page_dirty = reiserfs_set_page_dirty,
3465 };