]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/reiserfs/inode.c
Remove rw from {,__,do_}blockdev_direct_IO()
[mirror_ubuntu-zesty-kernel.git] / fs / reiserfs / inode.c
1 /*
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */
4
5 #include <linux/time.h>
6 #include <linux/fs.h>
7 #include "reiserfs.h"
8 #include "acl.h"
9 #include "xattr.h"
10 #include <linux/exportfs.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/uaccess.h>
15 #include <asm/unaligned.h>
16 #include <linux/buffer_head.h>
17 #include <linux/mpage.h>
18 #include <linux/writeback.h>
19 #include <linux/quotaops.h>
20 #include <linux/swap.h>
21 #include <linux/uio.h>
22
23 int reiserfs_commit_write(struct file *f, struct page *page,
24 unsigned from, unsigned to);
25
26 void reiserfs_evict_inode(struct inode *inode)
27 {
28 /*
29 * We need blocks for transaction + (user+group) quota
30 * update (possibly delete)
31 */
32 int jbegin_count =
33 JOURNAL_PER_BALANCE_CNT * 2 +
34 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
35 struct reiserfs_transaction_handle th;
36 int err;
37
38 if (!inode->i_nlink && !is_bad_inode(inode))
39 dquot_initialize(inode);
40
41 truncate_inode_pages_final(&inode->i_data);
42 if (inode->i_nlink)
43 goto no_delete;
44
45 /*
46 * The = 0 happens when we abort creating a new inode
47 * for some reason like lack of space..
48 * also handles bad_inode case
49 */
50 if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) {
51
52 reiserfs_delete_xattrs(inode);
53
54 reiserfs_write_lock(inode->i_sb);
55
56 if (journal_begin(&th, inode->i_sb, jbegin_count))
57 goto out;
58 reiserfs_update_inode_transaction(inode);
59
60 reiserfs_discard_prealloc(&th, inode);
61
62 err = reiserfs_delete_object(&th, inode);
63
64 /*
65 * Do quota update inside a transaction for journaled quotas.
66 * We must do that after delete_object so that quota updates
67 * go into the same transaction as stat data deletion
68 */
69 if (!err) {
70 int depth = reiserfs_write_unlock_nested(inode->i_sb);
71 dquot_free_inode(inode);
72 reiserfs_write_lock_nested(inode->i_sb, depth);
73 }
74
75 if (journal_end(&th))
76 goto out;
77
78 /*
79 * check return value from reiserfs_delete_object after
80 * ending the transaction
81 */
82 if (err)
83 goto out;
84
85 /*
86 * all items of file are deleted, so we can remove
87 * "save" link
88 * we can't do anything about an error here
89 */
90 remove_save_link(inode, 0 /* not truncate */);
91 out:
92 reiserfs_write_unlock(inode->i_sb);
93 } else {
94 /* no object items are in the tree */
95 ;
96 }
97
98 /* note this must go after the journal_end to prevent deadlock */
99 clear_inode(inode);
100
101 dquot_drop(inode);
102 inode->i_blocks = 0;
103 return;
104
105 no_delete:
106 clear_inode(inode);
107 dquot_drop(inode);
108 }
109
110 static void _make_cpu_key(struct cpu_key *key, int version, __u32 dirid,
111 __u32 objectid, loff_t offset, int type, int length)
112 {
113 key->version = version;
114
115 key->on_disk_key.k_dir_id = dirid;
116 key->on_disk_key.k_objectid = objectid;
117 set_cpu_key_k_offset(key, offset);
118 set_cpu_key_k_type(key, type);
119 key->key_length = length;
120 }
121
122 /*
123 * take base of inode_key (it comes from inode always) (dirid, objectid)
124 * and version from an inode, set offset and type of key
125 */
126 void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset,
127 int type, int length)
128 {
129 _make_cpu_key(key, get_inode_item_key_version(inode),
130 le32_to_cpu(INODE_PKEY(inode)->k_dir_id),
131 le32_to_cpu(INODE_PKEY(inode)->k_objectid), offset, type,
132 length);
133 }
134
135 /* when key is 0, do not set version and short key */
136 inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
137 int version,
138 loff_t offset, int type, int length,
139 int entry_count /*or ih_free_space */ )
140 {
141 if (key) {
142 ih->ih_key.k_dir_id = cpu_to_le32(key->on_disk_key.k_dir_id);
143 ih->ih_key.k_objectid =
144 cpu_to_le32(key->on_disk_key.k_objectid);
145 }
146 put_ih_version(ih, version);
147 set_le_ih_k_offset(ih, offset);
148 set_le_ih_k_type(ih, type);
149 put_ih_item_len(ih, length);
150 /* set_ih_free_space (ih, 0); */
151 /*
152 * for directory items it is entry count, for directs and stat
153 * datas - 0xffff, for indirects - 0
154 */
155 put_ih_entry_count(ih, entry_count);
156 }
157
158 /*
159 * FIXME: we might cache recently accessed indirect item
160 * Ugh. Not too eager for that....
161 * I cut the code until such time as I see a convincing argument (benchmark).
162 * I don't want a bloated inode struct..., and I don't like code complexity....
163 */
164
165 /*
166 * cutting the code is fine, since it really isn't in use yet and is easy
167 * to add back in. But, Vladimir has a really good idea here. Think
168 * about what happens for reading a file. For each page,
169 * The VFS layer calls reiserfs_readpage, who searches the tree to find
170 * an indirect item. This indirect item has X number of pointers, where
171 * X is a big number if we've done the block allocation right. But,
172 * we only use one or two of these pointers during each call to readpage,
173 * needlessly researching again later on.
174 *
175 * The size of the cache could be dynamic based on the size of the file.
176 *
177 * I'd also like to see us cache the location the stat data item, since
178 * we are needlessly researching for that frequently.
179 *
180 * --chris
181 */
182
183 /*
184 * If this page has a file tail in it, and
185 * it was read in by get_block_create_0, the page data is valid,
186 * but tail is still sitting in a direct item, and we can't write to
187 * it. So, look through this page, and check all the mapped buffers
188 * to make sure they have valid block numbers. Any that don't need
189 * to be unmapped, so that __block_write_begin will correctly call
190 * reiserfs_get_block to convert the tail into an unformatted node
191 */
192 static inline void fix_tail_page_for_writing(struct page *page)
193 {
194 struct buffer_head *head, *next, *bh;
195
196 if (page && page_has_buffers(page)) {
197 head = page_buffers(page);
198 bh = head;
199 do {
200 next = bh->b_this_page;
201 if (buffer_mapped(bh) && bh->b_blocknr == 0) {
202 reiserfs_unmap_buffer(bh);
203 }
204 bh = next;
205 } while (bh != head);
206 }
207 }
208
209 /*
210 * reiserfs_get_block does not need to allocate a block only if it has been
211 * done already or non-hole position has been found in the indirect item
212 */
213 static inline int allocation_needed(int retval, b_blocknr_t allocated,
214 struct item_head *ih,
215 __le32 * item, int pos_in_item)
216 {
217 if (allocated)
218 return 0;
219 if (retval == POSITION_FOUND && is_indirect_le_ih(ih) &&
220 get_block_num(item, pos_in_item))
221 return 0;
222 return 1;
223 }
224
225 static inline int indirect_item_found(int retval, struct item_head *ih)
226 {
227 return (retval == POSITION_FOUND) && is_indirect_le_ih(ih);
228 }
229
230 static inline void set_block_dev_mapped(struct buffer_head *bh,
231 b_blocknr_t block, struct inode *inode)
232 {
233 map_bh(bh, inode->i_sb, block);
234 }
235
236 /*
237 * files which were created in the earlier version can not be longer,
238 * than 2 gb
239 */
240 static int file_capable(struct inode *inode, sector_t block)
241 {
242 /* it is new file. */
243 if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 ||
244 /* old file, but 'block' is inside of 2gb */
245 block < (1 << (31 - inode->i_sb->s_blocksize_bits)))
246 return 1;
247
248 return 0;
249 }
250
251 static int restart_transaction(struct reiserfs_transaction_handle *th,
252 struct inode *inode, struct treepath *path)
253 {
254 struct super_block *s = th->t_super;
255 int err;
256
257 BUG_ON(!th->t_trans_id);
258 BUG_ON(!th->t_refcount);
259
260 pathrelse(path);
261
262 /* we cannot restart while nested */
263 if (th->t_refcount > 1) {
264 return 0;
265 }
266 reiserfs_update_sd(th, inode);
267 err = journal_end(th);
268 if (!err) {
269 err = journal_begin(th, s, JOURNAL_PER_BALANCE_CNT * 6);
270 if (!err)
271 reiserfs_update_inode_transaction(inode);
272 }
273 return err;
274 }
275
276 /*
277 * it is called by get_block when create == 0. Returns block number
278 * for 'block'-th logical block of file. When it hits direct item it
279 * returns 0 (being called from bmap) or read direct item into piece
280 * of page (bh_result)
281 * Please improve the english/clarity in the comment above, as it is
282 * hard to understand.
283 */
284 static int _get_block_create_0(struct inode *inode, sector_t block,
285 struct buffer_head *bh_result, int args)
286 {
287 INITIALIZE_PATH(path);
288 struct cpu_key key;
289 struct buffer_head *bh;
290 struct item_head *ih, tmp_ih;
291 b_blocknr_t blocknr;
292 char *p = NULL;
293 int chars;
294 int ret;
295 int result;
296 int done = 0;
297 unsigned long offset;
298
299 /* prepare the key to look for the 'block'-th block of file */
300 make_cpu_key(&key, inode,
301 (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
302 3);
303
304 result = search_for_position_by_key(inode->i_sb, &key, &path);
305 if (result != POSITION_FOUND) {
306 pathrelse(&path);
307 if (p)
308 kunmap(bh_result->b_page);
309 if (result == IO_ERROR)
310 return -EIO;
311 /*
312 * We do not return -ENOENT if there is a hole but page is
313 * uptodate, because it means that there is some MMAPED data
314 * associated with it that is yet to be written to disk.
315 */
316 if ((args & GET_BLOCK_NO_HOLE)
317 && !PageUptodate(bh_result->b_page)) {
318 return -ENOENT;
319 }
320 return 0;
321 }
322
323 bh = get_last_bh(&path);
324 ih = tp_item_head(&path);
325 if (is_indirect_le_ih(ih)) {
326 __le32 *ind_item = (__le32 *) ih_item_body(bh, ih);
327
328 /*
329 * FIXME: here we could cache indirect item or part of it in
330 * the inode to avoid search_by_key in case of subsequent
331 * access to file
332 */
333 blocknr = get_block_num(ind_item, path.pos_in_item);
334 ret = 0;
335 if (blocknr) {
336 map_bh(bh_result, inode->i_sb, blocknr);
337 if (path.pos_in_item ==
338 ((ih_item_len(ih) / UNFM_P_SIZE) - 1)) {
339 set_buffer_boundary(bh_result);
340 }
341 } else
342 /*
343 * We do not return -ENOENT if there is a hole but
344 * page is uptodate, because it means that there is
345 * some MMAPED data associated with it that is
346 * yet to be written to disk.
347 */
348 if ((args & GET_BLOCK_NO_HOLE)
349 && !PageUptodate(bh_result->b_page)) {
350 ret = -ENOENT;
351 }
352
353 pathrelse(&path);
354 if (p)
355 kunmap(bh_result->b_page);
356 return ret;
357 }
358 /* requested data are in direct item(s) */
359 if (!(args & GET_BLOCK_READ_DIRECT)) {
360 /*
361 * we are called by bmap. FIXME: we can not map block of file
362 * when it is stored in direct item(s)
363 */
364 pathrelse(&path);
365 if (p)
366 kunmap(bh_result->b_page);
367 return -ENOENT;
368 }
369
370 /*
371 * if we've got a direct item, and the buffer or page was uptodate,
372 * we don't want to pull data off disk again. skip to the
373 * end, where we map the buffer and return
374 */
375 if (buffer_uptodate(bh_result)) {
376 goto finished;
377 } else
378 /*
379 * grab_tail_page can trigger calls to reiserfs_get_block on
380 * up to date pages without any buffers. If the page is up
381 * to date, we don't want read old data off disk. Set the up
382 * to date bit on the buffer instead and jump to the end
383 */
384 if (!bh_result->b_page || PageUptodate(bh_result->b_page)) {
385 set_buffer_uptodate(bh_result);
386 goto finished;
387 }
388 /* read file tail into part of page */
389 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1);
390 copy_item_head(&tmp_ih, ih);
391
392 /*
393 * we only want to kmap if we are reading the tail into the page.
394 * this is not the common case, so we don't kmap until we are
395 * sure we need to. But, this means the item might move if
396 * kmap schedules
397 */
398 if (!p)
399 p = (char *)kmap(bh_result->b_page);
400
401 p += offset;
402 memset(p, 0, inode->i_sb->s_blocksize);
403 do {
404 if (!is_direct_le_ih(ih)) {
405 BUG();
406 }
407 /*
408 * make sure we don't read more bytes than actually exist in
409 * the file. This can happen in odd cases where i_size isn't
410 * correct, and when direct item padding results in a few
411 * extra bytes at the end of the direct item
412 */
413 if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
414 break;
415 if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {
416 chars =
417 inode->i_size - (le_ih_k_offset(ih) - 1) -
418 path.pos_in_item;
419 done = 1;
420 } else {
421 chars = ih_item_len(ih) - path.pos_in_item;
422 }
423 memcpy(p, ih_item_body(bh, ih) + path.pos_in_item, chars);
424
425 if (done)
426 break;
427
428 p += chars;
429
430 /*
431 * we done, if read direct item is not the last item of
432 * node FIXME: we could try to check right delimiting key
433 * to see whether direct item continues in the right
434 * neighbor or rely on i_size
435 */
436 if (PATH_LAST_POSITION(&path) != (B_NR_ITEMS(bh) - 1))
437 break;
438
439 /* update key to look for the next piece */
440 set_cpu_key_k_offset(&key, cpu_key_k_offset(&key) + chars);
441 result = search_for_position_by_key(inode->i_sb, &key, &path);
442 if (result != POSITION_FOUND)
443 /* i/o error most likely */
444 break;
445 bh = get_last_bh(&path);
446 ih = tp_item_head(&path);
447 } while (1);
448
449 flush_dcache_page(bh_result->b_page);
450 kunmap(bh_result->b_page);
451
452 finished:
453 pathrelse(&path);
454
455 if (result == IO_ERROR)
456 return -EIO;
457
458 /*
459 * this buffer has valid data, but isn't valid for io. mapping it to
460 * block #0 tells the rest of reiserfs it just has a tail in it
461 */
462 map_bh(bh_result, inode->i_sb, 0);
463 set_buffer_uptodate(bh_result);
464 return 0;
465 }
466
467 /*
468 * this is called to create file map. So, _get_block_create_0 will not
469 * read direct item
470 */
471 static int reiserfs_bmap(struct inode *inode, sector_t block,
472 struct buffer_head *bh_result, int create)
473 {
474 if (!file_capable(inode, block))
475 return -EFBIG;
476
477 reiserfs_write_lock(inode->i_sb);
478 /* do not read the direct item */
479 _get_block_create_0(inode, block, bh_result, 0);
480 reiserfs_write_unlock(inode->i_sb);
481 return 0;
482 }
483
484 /*
485 * special version of get_block that is only used by grab_tail_page right
486 * now. It is sent to __block_write_begin, and when you try to get a
487 * block past the end of the file (or a block from a hole) it returns
488 * -ENOENT instead of a valid buffer. __block_write_begin expects to
489 * be able to do i/o on the buffers returned, unless an error value
490 * is also returned.
491 *
492 * So, this allows __block_write_begin to be used for reading a single block
493 * in a page. Where it does not produce a valid page for holes, or past the
494 * end of the file. This turns out to be exactly what we need for reading
495 * tails for conversion.
496 *
497 * The point of the wrapper is forcing a certain value for create, even
498 * though the VFS layer is calling this function with create==1. If you
499 * don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
500 * don't use this function.
501 */
502 static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
503 struct buffer_head *bh_result,
504 int create)
505 {
506 return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE);
507 }
508
509 /*
510 * This is special helper for reiserfs_get_block in case we are executing
511 * direct_IO request.
512 */
513 static int reiserfs_get_blocks_direct_io(struct inode *inode,
514 sector_t iblock,
515 struct buffer_head *bh_result,
516 int create)
517 {
518 int ret;
519
520 bh_result->b_page = NULL;
521
522 /*
523 * We set the b_size before reiserfs_get_block call since it is
524 * referenced in convert_tail_for_hole() that may be called from
525 * reiserfs_get_block()
526 */
527 bh_result->b_size = (1 << inode->i_blkbits);
528
529 ret = reiserfs_get_block(inode, iblock, bh_result,
530 create | GET_BLOCK_NO_DANGLE);
531 if (ret)
532 goto out;
533
534 /* don't allow direct io onto tail pages */
535 if (buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
536 /*
537 * make sure future calls to the direct io funcs for this
538 * offset in the file fail by unmapping the buffer
539 */
540 clear_buffer_mapped(bh_result);
541 ret = -EINVAL;
542 }
543
544 /*
545 * Possible unpacked tail. Flush the data before pages have
546 * disappeared
547 */
548 if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
549 int err;
550
551 reiserfs_write_lock(inode->i_sb);
552
553 err = reiserfs_commit_for_inode(inode);
554 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
555
556 reiserfs_write_unlock(inode->i_sb);
557
558 if (err < 0)
559 ret = err;
560 }
561 out:
562 return ret;
563 }
564
565 /*
566 * helper function for when reiserfs_get_block is called for a hole
567 * but the file tail is still in a direct item
568 * bh_result is the buffer head for the hole
569 * tail_offset is the offset of the start of the tail in the file
570 *
571 * This calls prepare_write, which will start a new transaction
572 * you should not be in a transaction, or have any paths held when you
573 * call this.
574 */
575 static int convert_tail_for_hole(struct inode *inode,
576 struct buffer_head *bh_result,
577 loff_t tail_offset)
578 {
579 unsigned long index;
580 unsigned long tail_end;
581 unsigned long tail_start;
582 struct page *tail_page;
583 struct page *hole_page = bh_result->b_page;
584 int retval = 0;
585
586 if ((tail_offset & (bh_result->b_size - 1)) != 1)
587 return -EIO;
588
589 /* always try to read until the end of the block */
590 tail_start = tail_offset & (PAGE_CACHE_SIZE - 1);
591 tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
592
593 index = tail_offset >> PAGE_CACHE_SHIFT;
594 /*
595 * hole_page can be zero in case of direct_io, we are sure
596 * that we cannot get here if we write with O_DIRECT into tail page
597 */
598 if (!hole_page || index != hole_page->index) {
599 tail_page = grab_cache_page(inode->i_mapping, index);
600 retval = -ENOMEM;
601 if (!tail_page) {
602 goto out;
603 }
604 } else {
605 tail_page = hole_page;
606 }
607
608 /*
609 * we don't have to make sure the conversion did not happen while
610 * we were locking the page because anyone that could convert
611 * must first take i_mutex.
612 *
613 * We must fix the tail page for writing because it might have buffers
614 * that are mapped, but have a block number of 0. This indicates tail
615 * data that has been read directly into the page, and
616 * __block_write_begin won't trigger a get_block in this case.
617 */
618 fix_tail_page_for_writing(tail_page);
619 retval = __reiserfs_write_begin(tail_page, tail_start,
620 tail_end - tail_start);
621 if (retval)
622 goto unlock;
623
624 /* tail conversion might change the data in the page */
625 flush_dcache_page(tail_page);
626
627 retval = reiserfs_commit_write(NULL, tail_page, tail_start, tail_end);
628
629 unlock:
630 if (tail_page != hole_page) {
631 unlock_page(tail_page);
632 page_cache_release(tail_page);
633 }
634 out:
635 return retval;
636 }
637
638 static inline int _allocate_block(struct reiserfs_transaction_handle *th,
639 sector_t block,
640 struct inode *inode,
641 b_blocknr_t * allocated_block_nr,
642 struct treepath *path, int flags)
643 {
644 BUG_ON(!th->t_trans_id);
645
646 #ifdef REISERFS_PREALLOCATE
647 if (!(flags & GET_BLOCK_NO_IMUX)) {
648 return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr,
649 path, block);
650 }
651 #endif
652 return reiserfs_new_unf_blocknrs(th, inode, allocated_block_nr, path,
653 block);
654 }
655
656 int reiserfs_get_block(struct inode *inode, sector_t block,
657 struct buffer_head *bh_result, int create)
658 {
659 int repeat, retval = 0;
660 /* b_blocknr_t is (unsigned) 32 bit int*/
661 b_blocknr_t allocated_block_nr = 0;
662 INITIALIZE_PATH(path);
663 int pos_in_item;
664 struct cpu_key key;
665 struct buffer_head *bh, *unbh = NULL;
666 struct item_head *ih, tmp_ih;
667 __le32 *item;
668 int done;
669 int fs_gen;
670 struct reiserfs_transaction_handle *th = NULL;
671 /*
672 * space reserved in transaction batch:
673 * . 3 balancings in direct->indirect conversion
674 * . 1 block involved into reiserfs_update_sd()
675 * XXX in practically impossible worst case direct2indirect()
676 * can incur (much) more than 3 balancings.
677 * quota update for user, group
678 */
679 int jbegin_count =
680 JOURNAL_PER_BALANCE_CNT * 3 + 1 +
681 2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
682 int version;
683 int dangle = 1;
684 loff_t new_offset =
685 (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
686
687 reiserfs_write_lock(inode->i_sb);
688 version = get_inode_item_key_version(inode);
689
690 if (!file_capable(inode, block)) {
691 reiserfs_write_unlock(inode->i_sb);
692 return -EFBIG;
693 }
694
695 /*
696 * if !create, we aren't changing the FS, so we don't need to
697 * log anything, so we don't need to start a transaction
698 */
699 if (!(create & GET_BLOCK_CREATE)) {
700 int ret;
701 /* find number of block-th logical block of the file */
702 ret = _get_block_create_0(inode, block, bh_result,
703 create | GET_BLOCK_READ_DIRECT);
704 reiserfs_write_unlock(inode->i_sb);
705 return ret;
706 }
707
708 /*
709 * if we're already in a transaction, make sure to close
710 * any new transactions we start in this func
711 */
712 if ((create & GET_BLOCK_NO_DANGLE) ||
713 reiserfs_transaction_running(inode->i_sb))
714 dangle = 0;
715
716 /*
717 * If file is of such a size, that it might have a tail and
718 * tails are enabled we should mark it as possibly needing
719 * tail packing on close
720 */
721 if ((have_large_tails(inode->i_sb)
722 && inode->i_size < i_block_size(inode) * 4)
723 || (have_small_tails(inode->i_sb)
724 && inode->i_size < i_block_size(inode)))
725 REISERFS_I(inode)->i_flags |= i_pack_on_close_mask;
726
727 /* set the key of the first byte in the 'block'-th block of file */
728 make_cpu_key(&key, inode, new_offset, TYPE_ANY, 3 /*key length */ );
729 if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) {
730 start_trans:
731 th = reiserfs_persistent_transaction(inode->i_sb, jbegin_count);
732 if (!th) {
733 retval = -ENOMEM;
734 goto failure;
735 }
736 reiserfs_update_inode_transaction(inode);
737 }
738 research:
739
740 retval = search_for_position_by_key(inode->i_sb, &key, &path);
741 if (retval == IO_ERROR) {
742 retval = -EIO;
743 goto failure;
744 }
745
746 bh = get_last_bh(&path);
747 ih = tp_item_head(&path);
748 item = tp_item_body(&path);
749 pos_in_item = path.pos_in_item;
750
751 fs_gen = get_generation(inode->i_sb);
752 copy_item_head(&tmp_ih, ih);
753
754 if (allocation_needed
755 (retval, allocated_block_nr, ih, item, pos_in_item)) {
756 /* we have to allocate block for the unformatted node */
757 if (!th) {
758 pathrelse(&path);
759 goto start_trans;
760 }
761
762 repeat =
763 _allocate_block(th, block, inode, &allocated_block_nr,
764 &path, create);
765
766 /*
767 * restart the transaction to give the journal a chance to free
768 * some blocks. releases the path, so we have to go back to
769 * research if we succeed on the second try
770 */
771 if (repeat == NO_DISK_SPACE || repeat == QUOTA_EXCEEDED) {
772 SB_JOURNAL(inode->i_sb)->j_next_async_flush = 1;
773 retval = restart_transaction(th, inode, &path);
774 if (retval)
775 goto failure;
776 repeat =
777 _allocate_block(th, block, inode,
778 &allocated_block_nr, NULL, create);
779
780 if (repeat != NO_DISK_SPACE && repeat != QUOTA_EXCEEDED) {
781 goto research;
782 }
783 if (repeat == QUOTA_EXCEEDED)
784 retval = -EDQUOT;
785 else
786 retval = -ENOSPC;
787 goto failure;
788 }
789
790 if (fs_changed(fs_gen, inode->i_sb)
791 && item_moved(&tmp_ih, &path)) {
792 goto research;
793 }
794 }
795
796 if (indirect_item_found(retval, ih)) {
797 b_blocknr_t unfm_ptr;
798 /*
799 * 'block'-th block is in the file already (there is
800 * corresponding cell in some indirect item). But it may be
801 * zero unformatted node pointer (hole)
802 */
803 unfm_ptr = get_block_num(item, pos_in_item);
804 if (unfm_ptr == 0) {
805 /* use allocated block to plug the hole */
806 reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
807 if (fs_changed(fs_gen, inode->i_sb)
808 && item_moved(&tmp_ih, &path)) {
809 reiserfs_restore_prepared_buffer(inode->i_sb,
810 bh);
811 goto research;
812 }
813 set_buffer_new(bh_result);
814 if (buffer_dirty(bh_result)
815 && reiserfs_data_ordered(inode->i_sb))
816 reiserfs_add_ordered_list(inode, bh_result);
817 put_block_num(item, pos_in_item, allocated_block_nr);
818 unfm_ptr = allocated_block_nr;
819 journal_mark_dirty(th, bh);
820 reiserfs_update_sd(th, inode);
821 }
822 set_block_dev_mapped(bh_result, unfm_ptr, inode);
823 pathrelse(&path);
824 retval = 0;
825 if (!dangle && th)
826 retval = reiserfs_end_persistent_transaction(th);
827
828 reiserfs_write_unlock(inode->i_sb);
829
830 /*
831 * the item was found, so new blocks were not added to the file
832 * there is no need to make sure the inode is updated with this
833 * transaction
834 */
835 return retval;
836 }
837
838 if (!th) {
839 pathrelse(&path);
840 goto start_trans;
841 }
842
843 /*
844 * desired position is not found or is in the direct item. We have
845 * to append file with holes up to 'block'-th block converting
846 * direct items to indirect one if necessary
847 */
848 done = 0;
849 do {
850 if (is_statdata_le_ih(ih)) {
851 __le32 unp = 0;
852 struct cpu_key tmp_key;
853
854 /* indirect item has to be inserted */
855 make_le_item_head(&tmp_ih, &key, version, 1,
856 TYPE_INDIRECT, UNFM_P_SIZE,
857 0 /* free_space */ );
858
859 /*
860 * we are going to add 'block'-th block to the file.
861 * Use allocated block for that
862 */
863 if (cpu_key_k_offset(&key) == 1) {
864 unp = cpu_to_le32(allocated_block_nr);
865 set_block_dev_mapped(bh_result,
866 allocated_block_nr, inode);
867 set_buffer_new(bh_result);
868 done = 1;
869 }
870 tmp_key = key; /* ;) */
871 set_cpu_key_k_offset(&tmp_key, 1);
872 PATH_LAST_POSITION(&path)++;
873
874 retval =
875 reiserfs_insert_item(th, &path, &tmp_key, &tmp_ih,
876 inode, (char *)&unp);
877 if (retval) {
878 reiserfs_free_block(th, inode,
879 allocated_block_nr, 1);
880 /*
881 * retval == -ENOSPC, -EDQUOT or -EIO
882 * or -EEXIST
883 */
884 goto failure;
885 }
886 } else if (is_direct_le_ih(ih)) {
887 /* direct item has to be converted */
888 loff_t tail_offset;
889
890 tail_offset =
891 ((le_ih_k_offset(ih) -
892 1) & ~(inode->i_sb->s_blocksize - 1)) + 1;
893
894 /*
895 * direct item we just found fits into block we have
896 * to map. Convert it into unformatted node: use
897 * bh_result for the conversion
898 */
899 if (tail_offset == cpu_key_k_offset(&key)) {
900 set_block_dev_mapped(bh_result,
901 allocated_block_nr, inode);
902 unbh = bh_result;
903 done = 1;
904 } else {
905 /*
906 * we have to pad file tail stored in direct
907 * item(s) up to block size and convert it
908 * to unformatted node. FIXME: this should
909 * also get into page cache
910 */
911
912 pathrelse(&path);
913 /*
914 * ugly, but we can only end the transaction if
915 * we aren't nested
916 */
917 BUG_ON(!th->t_refcount);
918 if (th->t_refcount == 1) {
919 retval =
920 reiserfs_end_persistent_transaction
921 (th);
922 th = NULL;
923 if (retval)
924 goto failure;
925 }
926
927 retval =
928 convert_tail_for_hole(inode, bh_result,
929 tail_offset);
930 if (retval) {
931 if (retval != -ENOSPC)
932 reiserfs_error(inode->i_sb,
933 "clm-6004",
934 "convert tail failed "
935 "inode %lu, error %d",
936 inode->i_ino,
937 retval);
938 if (allocated_block_nr) {
939 /*
940 * the bitmap, the super,
941 * and the stat data == 3
942 */
943 if (!th)
944 th = reiserfs_persistent_transaction(inode->i_sb, 3);
945 if (th)
946 reiserfs_free_block(th,
947 inode,
948 allocated_block_nr,
949 1);
950 }
951 goto failure;
952 }
953 goto research;
954 }
955 retval =
956 direct2indirect(th, inode, &path, unbh,
957 tail_offset);
958 if (retval) {
959 reiserfs_unmap_buffer(unbh);
960 reiserfs_free_block(th, inode,
961 allocated_block_nr, 1);
962 goto failure;
963 }
964 /*
965 * it is important the set_buffer_uptodate is done
966 * after the direct2indirect. The buffer might
967 * contain valid data newer than the data on disk
968 * (read by readpage, changed, and then sent here by
969 * writepage). direct2indirect needs to know if unbh
970 * was already up to date, so it can decide if the
971 * data in unbh needs to be replaced with data from
972 * the disk
973 */
974 set_buffer_uptodate(unbh);
975
976 /*
977 * unbh->b_page == NULL in case of DIRECT_IO request,
978 * this means buffer will disappear shortly, so it
979 * should not be added to
980 */
981 if (unbh->b_page) {
982 /*
983 * we've converted the tail, so we must
984 * flush unbh before the transaction commits
985 */
986 reiserfs_add_tail_list(inode, unbh);
987
988 /*
989 * mark it dirty now to prevent commit_write
990 * from adding this buffer to the inode's
991 * dirty buffer list
992 */
993 /*
994 * AKPM: changed __mark_buffer_dirty to
995 * mark_buffer_dirty(). It's still atomic,
996 * but it sets the page dirty too, which makes
997 * it eligible for writeback at any time by the
998 * VM (which was also the case with
999 * __mark_buffer_dirty())
1000 */
1001 mark_buffer_dirty(unbh);
1002 }
1003 } else {
1004 /*
1005 * append indirect item with holes if needed, when
1006 * appending pointer to 'block'-th block use block,
1007 * which is already allocated
1008 */
1009 struct cpu_key tmp_key;
1010 /*
1011 * We use this in case we need to allocate
1012 * only one block which is a fastpath
1013 */
1014 unp_t unf_single = 0;
1015 unp_t *un;
1016 __u64 max_to_insert =
1017 MAX_ITEM_LEN(inode->i_sb->s_blocksize) /
1018 UNFM_P_SIZE;
1019 __u64 blocks_needed;
1020
1021 RFALSE(pos_in_item != ih_item_len(ih) / UNFM_P_SIZE,
1022 "vs-804: invalid position for append");
1023 /*
1024 * indirect item has to be appended,
1025 * set up key of that position
1026 * (key type is unimportant)
1027 */
1028 make_cpu_key(&tmp_key, inode,
1029 le_key_k_offset(version,
1030 &ih->ih_key) +
1031 op_bytes_number(ih,
1032 inode->i_sb->s_blocksize),
1033 TYPE_INDIRECT, 3);
1034
1035 RFALSE(cpu_key_k_offset(&tmp_key) > cpu_key_k_offset(&key),
1036 "green-805: invalid offset");
1037 blocks_needed =
1038 1 +
1039 ((cpu_key_k_offset(&key) -
1040 cpu_key_k_offset(&tmp_key)) >> inode->i_sb->
1041 s_blocksize_bits);
1042
1043 if (blocks_needed == 1) {
1044 un = &unf_single;
1045 } else {
1046 un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_NOFS);
1047 if (!un) {
1048 un = &unf_single;
1049 blocks_needed = 1;
1050 max_to_insert = 0;
1051 }
1052 }
1053 if (blocks_needed <= max_to_insert) {
1054 /*
1055 * we are going to add target block to
1056 * the file. Use allocated block for that
1057 */
1058 un[blocks_needed - 1] =
1059 cpu_to_le32(allocated_block_nr);
1060 set_block_dev_mapped(bh_result,
1061 allocated_block_nr, inode);
1062 set_buffer_new(bh_result);
1063 done = 1;
1064 } else {
1065 /* paste hole to the indirect item */
1066 /*
1067 * If kmalloc failed, max_to_insert becomes
1068 * zero and it means we only have space for
1069 * one block
1070 */
1071 blocks_needed =
1072 max_to_insert ? max_to_insert : 1;
1073 }
1074 retval =
1075 reiserfs_paste_into_item(th, &path, &tmp_key, inode,
1076 (char *)un,
1077 UNFM_P_SIZE *
1078 blocks_needed);
1079
1080 if (blocks_needed != 1)
1081 kfree(un);
1082
1083 if (retval) {
1084 reiserfs_free_block(th, inode,
1085 allocated_block_nr, 1);
1086 goto failure;
1087 }
1088 if (!done) {
1089 /*
1090 * We need to mark new file size in case
1091 * this function will be interrupted/aborted
1092 * later on. And we may do this only for
1093 * holes.
1094 */
1095 inode->i_size +=
1096 inode->i_sb->s_blocksize * blocks_needed;
1097 }
1098 }
1099
1100 if (done == 1)
1101 break;
1102
1103 /*
1104 * this loop could log more blocks than we had originally
1105 * asked for. So, we have to allow the transaction to end
1106 * if it is too big or too full. Update the inode so things
1107 * are consistent if we crash before the function returns
1108 * release the path so that anybody waiting on the path before
1109 * ending their transaction will be able to continue.
1110 */
1111 if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
1112 retval = restart_transaction(th, inode, &path);
1113 if (retval)
1114 goto failure;
1115 }
1116 /*
1117 * inserting indirect pointers for a hole can take a
1118 * long time. reschedule if needed and also release the write
1119 * lock for others.
1120 */
1121 reiserfs_cond_resched(inode->i_sb);
1122
1123 retval = search_for_position_by_key(inode->i_sb, &key, &path);
1124 if (retval == IO_ERROR) {
1125 retval = -EIO;
1126 goto failure;
1127 }
1128 if (retval == POSITION_FOUND) {
1129 reiserfs_warning(inode->i_sb, "vs-825",
1130 "%K should not be found", &key);
1131 retval = -EEXIST;
1132 if (allocated_block_nr)
1133 reiserfs_free_block(th, inode,
1134 allocated_block_nr, 1);
1135 pathrelse(&path);
1136 goto failure;
1137 }
1138 bh = get_last_bh(&path);
1139 ih = tp_item_head(&path);
1140 item = tp_item_body(&path);
1141 pos_in_item = path.pos_in_item;
1142 } while (1);
1143
1144 retval = 0;
1145
1146 failure:
1147 if (th && (!dangle || (retval && !th->t_trans_id))) {
1148 int err;
1149 if (th->t_trans_id)
1150 reiserfs_update_sd(th, inode);
1151 err = reiserfs_end_persistent_transaction(th);
1152 if (err)
1153 retval = err;
1154 }
1155
1156 reiserfs_write_unlock(inode->i_sb);
1157 reiserfs_check_path(&path);
1158 return retval;
1159 }
1160
1161 static int
1162 reiserfs_readpages(struct file *file, struct address_space *mapping,
1163 struct list_head *pages, unsigned nr_pages)
1164 {
1165 return mpage_readpages(mapping, pages, nr_pages, reiserfs_get_block);
1166 }
1167
1168 /*
1169 * Compute real number of used bytes by file
1170 * Following three functions can go away when we'll have enough space in
1171 * stat item
1172 */
1173 static int real_space_diff(struct inode *inode, int sd_size)
1174 {
1175 int bytes;
1176 loff_t blocksize = inode->i_sb->s_blocksize;
1177
1178 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode))
1179 return sd_size;
1180
1181 /*
1182 * End of file is also in full block with indirect reference, so round
1183 * up to the next block.
1184 *
1185 * there is just no way to know if the tail is actually packed
1186 * on the file, so we have to assume it isn't. When we pack the
1187 * tail, we add 4 bytes to pretend there really is an unformatted
1188 * node pointer
1189 */
1190 bytes =
1191 ((inode->i_size +
1192 (blocksize - 1)) >> inode->i_sb->s_blocksize_bits) * UNFM_P_SIZE +
1193 sd_size;
1194 return bytes;
1195 }
1196
1197 static inline loff_t to_real_used_space(struct inode *inode, ulong blocks,
1198 int sd_size)
1199 {
1200 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
1201 return inode->i_size +
1202 (loff_t) (real_space_diff(inode, sd_size));
1203 }
1204 return ((loff_t) real_space_diff(inode, sd_size)) +
1205 (((loff_t) blocks) << 9);
1206 }
1207
1208 /* Compute number of blocks used by file in ReiserFS counting */
1209 static inline ulong to_fake_used_blocks(struct inode *inode, int sd_size)
1210 {
1211 loff_t bytes = inode_get_bytes(inode);
1212 loff_t real_space = real_space_diff(inode, sd_size);
1213
1214 /* keeps fsck and non-quota versions of reiserfs happy */
1215 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
1216 bytes += (loff_t) 511;
1217 }
1218
1219 /*
1220 * files from before the quota patch might i_blocks such that
1221 * bytes < real_space. Deal with that here to prevent it from
1222 * going negative.
1223 */
1224 if (bytes < real_space)
1225 return 0;
1226 return (bytes - real_space) >> 9;
1227 }
1228
1229 /*
1230 * BAD: new directories have stat data of new type and all other items
1231 * of old type. Version stored in the inode says about body items, so
1232 * in update_stat_data we can not rely on inode, but have to check
1233 * item version directly
1234 */
1235
1236 /* called by read_locked_inode */
1237 static void init_inode(struct inode *inode, struct treepath *path)
1238 {
1239 struct buffer_head *bh;
1240 struct item_head *ih;
1241 __u32 rdev;
1242
1243 bh = PATH_PLAST_BUFFER(path);
1244 ih = tp_item_head(path);
1245
1246 copy_key(INODE_PKEY(inode), &ih->ih_key);
1247
1248 INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
1249 REISERFS_I(inode)->i_flags = 0;
1250 REISERFS_I(inode)->i_prealloc_block = 0;
1251 REISERFS_I(inode)->i_prealloc_count = 0;
1252 REISERFS_I(inode)->i_trans_id = 0;
1253 REISERFS_I(inode)->i_jl = NULL;
1254 reiserfs_init_xattr_rwsem(inode);
1255
1256 if (stat_data_v1(ih)) {
1257 struct stat_data_v1 *sd =
1258 (struct stat_data_v1 *)ih_item_body(bh, ih);
1259 unsigned long blocks;
1260
1261 set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1262 set_inode_sd_version(inode, STAT_DATA_V1);
1263 inode->i_mode = sd_v1_mode(sd);
1264 set_nlink(inode, sd_v1_nlink(sd));
1265 i_uid_write(inode, sd_v1_uid(sd));
1266 i_gid_write(inode, sd_v1_gid(sd));
1267 inode->i_size = sd_v1_size(sd);
1268 inode->i_atime.tv_sec = sd_v1_atime(sd);
1269 inode->i_mtime.tv_sec = sd_v1_mtime(sd);
1270 inode->i_ctime.tv_sec = sd_v1_ctime(sd);
1271 inode->i_atime.tv_nsec = 0;
1272 inode->i_ctime.tv_nsec = 0;
1273 inode->i_mtime.tv_nsec = 0;
1274
1275 inode->i_blocks = sd_v1_blocks(sd);
1276 inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1277 blocks = (inode->i_size + 511) >> 9;
1278 blocks = _ROUND_UP(blocks, inode->i_sb->s_blocksize >> 9);
1279
1280 /*
1281 * there was a bug in <=3.5.23 when i_blocks could take
1282 * negative values. Starting from 3.5.17 this value could
1283 * even be stored in stat data. For such files we set
1284 * i_blocks based on file size. Just 2 notes: this can be
1285 * wrong for sparse files. On-disk value will be only
1286 * updated if file's inode will ever change
1287 */
1288 if (inode->i_blocks > blocks) {
1289 inode->i_blocks = blocks;
1290 }
1291
1292 rdev = sd_v1_rdev(sd);
1293 REISERFS_I(inode)->i_first_direct_byte =
1294 sd_v1_first_direct_byte(sd);
1295
1296 /*
1297 * an early bug in the quota code can give us an odd
1298 * number for the block count. This is incorrect, fix it here.
1299 */
1300 if (inode->i_blocks & 1) {
1301 inode->i_blocks++;
1302 }
1303 inode_set_bytes(inode,
1304 to_real_used_space(inode, inode->i_blocks,
1305 SD_V1_SIZE));
1306 /*
1307 * nopack is initially zero for v1 objects. For v2 objects,
1308 * nopack is initialised from sd_attrs
1309 */
1310 REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
1311 } else {
1312 /*
1313 * new stat data found, but object may have old items
1314 * (directories and symlinks)
1315 */
1316 struct stat_data *sd = (struct stat_data *)ih_item_body(bh, ih);
1317
1318 inode->i_mode = sd_v2_mode(sd);
1319 set_nlink(inode, sd_v2_nlink(sd));
1320 i_uid_write(inode, sd_v2_uid(sd));
1321 inode->i_size = sd_v2_size(sd);
1322 i_gid_write(inode, sd_v2_gid(sd));
1323 inode->i_mtime.tv_sec = sd_v2_mtime(sd);
1324 inode->i_atime.tv_sec = sd_v2_atime(sd);
1325 inode->i_ctime.tv_sec = sd_v2_ctime(sd);
1326 inode->i_ctime.tv_nsec = 0;
1327 inode->i_mtime.tv_nsec = 0;
1328 inode->i_atime.tv_nsec = 0;
1329 inode->i_blocks = sd_v2_blocks(sd);
1330 rdev = sd_v2_rdev(sd);
1331 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1332 inode->i_generation =
1333 le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1334 else
1335 inode->i_generation = sd_v2_generation(sd);
1336
1337 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
1338 set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1339 else
1340 set_inode_item_key_version(inode, KEY_FORMAT_3_6);
1341 REISERFS_I(inode)->i_first_direct_byte = 0;
1342 set_inode_sd_version(inode, STAT_DATA_V2);
1343 inode_set_bytes(inode,
1344 to_real_used_space(inode, inode->i_blocks,
1345 SD_V2_SIZE));
1346 /*
1347 * read persistent inode attributes from sd and initialise
1348 * generic inode flags from them
1349 */
1350 REISERFS_I(inode)->i_attrs = sd_v2_attrs(sd);
1351 sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
1352 }
1353
1354 pathrelse(path);
1355 if (S_ISREG(inode->i_mode)) {
1356 inode->i_op = &reiserfs_file_inode_operations;
1357 inode->i_fop = &reiserfs_file_operations;
1358 inode->i_mapping->a_ops = &reiserfs_address_space_operations;
1359 } else if (S_ISDIR(inode->i_mode)) {
1360 inode->i_op = &reiserfs_dir_inode_operations;
1361 inode->i_fop = &reiserfs_dir_operations;
1362 } else if (S_ISLNK(inode->i_mode)) {
1363 inode->i_op = &reiserfs_symlink_inode_operations;
1364 inode->i_mapping->a_ops = &reiserfs_address_space_operations;
1365 } else {
1366 inode->i_blocks = 0;
1367 inode->i_op = &reiserfs_special_inode_operations;
1368 init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
1369 }
1370 }
1371
1372 /* update new stat data with inode fields */
1373 static void inode2sd(void *sd, struct inode *inode, loff_t size)
1374 {
1375 struct stat_data *sd_v2 = (struct stat_data *)sd;
1376 __u16 flags;
1377
1378 set_sd_v2_mode(sd_v2, inode->i_mode);
1379 set_sd_v2_nlink(sd_v2, inode->i_nlink);
1380 set_sd_v2_uid(sd_v2, i_uid_read(inode));
1381 set_sd_v2_size(sd_v2, size);
1382 set_sd_v2_gid(sd_v2, i_gid_read(inode));
1383 set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
1384 set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
1385 set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
1386 set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE));
1387 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1388 set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev));
1389 else
1390 set_sd_v2_generation(sd_v2, inode->i_generation);
1391 flags = REISERFS_I(inode)->i_attrs;
1392 i_attrs_to_sd_attrs(inode, &flags);
1393 set_sd_v2_attrs(sd_v2, flags);
1394 }
1395
1396 /* used to copy inode's fields to old stat data */
1397 static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
1398 {
1399 struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd;
1400
1401 set_sd_v1_mode(sd_v1, inode->i_mode);
1402 set_sd_v1_uid(sd_v1, i_uid_read(inode));
1403 set_sd_v1_gid(sd_v1, i_gid_read(inode));
1404 set_sd_v1_nlink(sd_v1, inode->i_nlink);
1405 set_sd_v1_size(sd_v1, size);
1406 set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);
1407 set_sd_v1_ctime(sd_v1, inode->i_ctime.tv_sec);
1408 set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec);
1409
1410 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1411 set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev));
1412 else
1413 set_sd_v1_blocks(sd_v1, to_fake_used_blocks(inode, SD_V1_SIZE));
1414
1415 /* Sigh. i_first_direct_byte is back */
1416 set_sd_v1_first_direct_byte(sd_v1,
1417 REISERFS_I(inode)->i_first_direct_byte);
1418 }
1419
1420 /*
1421 * NOTE, you must prepare the buffer head before sending it here,
1422 * and then log it after the call
1423 */
1424 static void update_stat_data(struct treepath *path, struct inode *inode,
1425 loff_t size)
1426 {
1427 struct buffer_head *bh;
1428 struct item_head *ih;
1429
1430 bh = PATH_PLAST_BUFFER(path);
1431 ih = tp_item_head(path);
1432
1433 if (!is_statdata_le_ih(ih))
1434 reiserfs_panic(inode->i_sb, "vs-13065", "key %k, found item %h",
1435 INODE_PKEY(inode), ih);
1436
1437 /* path points to old stat data */
1438 if (stat_data_v1(ih)) {
1439 inode2sd_v1(ih_item_body(bh, ih), inode, size);
1440 } else {
1441 inode2sd(ih_item_body(bh, ih), inode, size);
1442 }
1443
1444 return;
1445 }
1446
1447 void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
1448 struct inode *inode, loff_t size)
1449 {
1450 struct cpu_key key;
1451 INITIALIZE_PATH(path);
1452 struct buffer_head *bh;
1453 int fs_gen;
1454 struct item_head *ih, tmp_ih;
1455 int retval;
1456
1457 BUG_ON(!th->t_trans_id);
1458
1459 /* key type is unimportant */
1460 make_cpu_key(&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3);
1461
1462 for (;;) {
1463 int pos;
1464 /* look for the object's stat data */
1465 retval = search_item(inode->i_sb, &key, &path);
1466 if (retval == IO_ERROR) {
1467 reiserfs_error(inode->i_sb, "vs-13050",
1468 "i/o failure occurred trying to "
1469 "update %K stat data", &key);
1470 return;
1471 }
1472 if (retval == ITEM_NOT_FOUND) {
1473 pos = PATH_LAST_POSITION(&path);
1474 pathrelse(&path);
1475 if (inode->i_nlink == 0) {
1476 /*reiserfs_warning (inode->i_sb, "vs-13050: reiserfs_update_sd: i_nlink == 0, stat data not found"); */
1477 return;
1478 }
1479 reiserfs_warning(inode->i_sb, "vs-13060",
1480 "stat data of object %k (nlink == %d) "
1481 "not found (pos %d)",
1482 INODE_PKEY(inode), inode->i_nlink,
1483 pos);
1484 reiserfs_check_path(&path);
1485 return;
1486 }
1487
1488 /*
1489 * sigh, prepare_for_journal might schedule. When it
1490 * schedules the FS might change. We have to detect that,
1491 * and loop back to the search if the stat data item has moved
1492 */
1493 bh = get_last_bh(&path);
1494 ih = tp_item_head(&path);
1495 copy_item_head(&tmp_ih, ih);
1496 fs_gen = get_generation(inode->i_sb);
1497 reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
1498
1499 /* Stat_data item has been moved after scheduling. */
1500 if (fs_changed(fs_gen, inode->i_sb)
1501 && item_moved(&tmp_ih, &path)) {
1502 reiserfs_restore_prepared_buffer(inode->i_sb, bh);
1503 continue;
1504 }
1505 break;
1506 }
1507 update_stat_data(&path, inode, size);
1508 journal_mark_dirty(th, bh);
1509 pathrelse(&path);
1510 return;
1511 }
1512
1513 /*
1514 * reiserfs_read_locked_inode is called to read the inode off disk, and it
1515 * does a make_bad_inode when things go wrong. But, we need to make sure
1516 * and clear the key in the private portion of the inode, otherwise a
1517 * corresponding iput might try to delete whatever object the inode last
1518 * represented.
1519 */
1520 static void reiserfs_make_bad_inode(struct inode *inode)
1521 {
1522 memset(INODE_PKEY(inode), 0, KEY_SIZE);
1523 make_bad_inode(inode);
1524 }
1525
1526 /*
1527 * initially this function was derived from minix or ext2's analog and
1528 * evolved as the prototype did
1529 */
1530 int reiserfs_init_locked_inode(struct inode *inode, void *p)
1531 {
1532 struct reiserfs_iget_args *args = (struct reiserfs_iget_args *)p;
1533 inode->i_ino = args->objectid;
1534 INODE_PKEY(inode)->k_dir_id = cpu_to_le32(args->dirid);
1535 return 0;
1536 }
1537
1538 /*
1539 * looks for stat data in the tree, and fills up the fields of in-core
1540 * inode stat data fields
1541 */
1542 void reiserfs_read_locked_inode(struct inode *inode,
1543 struct reiserfs_iget_args *args)
1544 {
1545 INITIALIZE_PATH(path_to_sd);
1546 struct cpu_key key;
1547 unsigned long dirino;
1548 int retval;
1549
1550 dirino = args->dirid;
1551
1552 /*
1553 * set version 1, version 2 could be used too, because stat data
1554 * key is the same in both versions
1555 */
1556 key.version = KEY_FORMAT_3_5;
1557 key.on_disk_key.k_dir_id = dirino;
1558 key.on_disk_key.k_objectid = inode->i_ino;
1559 key.on_disk_key.k_offset = 0;
1560 key.on_disk_key.k_type = 0;
1561
1562 /* look for the object's stat data */
1563 retval = search_item(inode->i_sb, &key, &path_to_sd);
1564 if (retval == IO_ERROR) {
1565 reiserfs_error(inode->i_sb, "vs-13070",
1566 "i/o failure occurred trying to find "
1567 "stat data of %K", &key);
1568 reiserfs_make_bad_inode(inode);
1569 return;
1570 }
1571
1572 /* a stale NFS handle can trigger this without it being an error */
1573 if (retval != ITEM_FOUND) {
1574 pathrelse(&path_to_sd);
1575 reiserfs_make_bad_inode(inode);
1576 clear_nlink(inode);
1577 return;
1578 }
1579
1580 init_inode(inode, &path_to_sd);
1581
1582 /*
1583 * It is possible that knfsd is trying to access inode of a file
1584 * that is being removed from the disk by some other thread. As we
1585 * update sd on unlink all that is required is to check for nlink
1586 * here. This bug was first found by Sizif when debugging
1587 * SquidNG/Butterfly, forgotten, and found again after Philippe
1588 * Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
1589
1590 * More logical fix would require changes in fs/inode.c:iput() to
1591 * remove inode from hash-table _after_ fs cleaned disk stuff up and
1592 * in iget() to return NULL if I_FREEING inode is found in
1593 * hash-table.
1594 */
1595
1596 /*
1597 * Currently there is one place where it's ok to meet inode with
1598 * nlink==0: processing of open-unlinked and half-truncated files
1599 * during mount (fs/reiserfs/super.c:finish_unfinished()).
1600 */
1601 if ((inode->i_nlink == 0) &&
1602 !REISERFS_SB(inode->i_sb)->s_is_unlinked_ok) {
1603 reiserfs_warning(inode->i_sb, "vs-13075",
1604 "dead inode read from disk %K. "
1605 "This is likely to be race with knfsd. Ignore",
1606 &key);
1607 reiserfs_make_bad_inode(inode);
1608 }
1609
1610 /* init inode should be relsing */
1611 reiserfs_check_path(&path_to_sd);
1612
1613 /*
1614 * Stat data v1 doesn't support ACLs.
1615 */
1616 if (get_inode_sd_version(inode) == STAT_DATA_V1)
1617 cache_no_acl(inode);
1618 }
1619
1620 /*
1621 * reiserfs_find_actor() - "find actor" reiserfs supplies to iget5_locked().
1622 *
1623 * @inode: inode from hash table to check
1624 * @opaque: "cookie" passed to iget5_locked(). This is &reiserfs_iget_args.
1625 *
1626 * This function is called by iget5_locked() to distinguish reiserfs inodes
1627 * having the same inode numbers. Such inodes can only exist due to some
1628 * error condition. One of them should be bad. Inodes with identical
1629 * inode numbers (objectids) are distinguished by parent directory ids.
1630 *
1631 */
1632 int reiserfs_find_actor(struct inode *inode, void *opaque)
1633 {
1634 struct reiserfs_iget_args *args;
1635
1636 args = opaque;
1637 /* args is already in CPU order */
1638 return (inode->i_ino == args->objectid) &&
1639 (le32_to_cpu(INODE_PKEY(inode)->k_dir_id) == args->dirid);
1640 }
1641
1642 struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
1643 {
1644 struct inode *inode;
1645 struct reiserfs_iget_args args;
1646 int depth;
1647
1648 args.objectid = key->on_disk_key.k_objectid;
1649 args.dirid = key->on_disk_key.k_dir_id;
1650 depth = reiserfs_write_unlock_nested(s);
1651 inode = iget5_locked(s, key->on_disk_key.k_objectid,
1652 reiserfs_find_actor, reiserfs_init_locked_inode,
1653 (void *)(&args));
1654 reiserfs_write_lock_nested(s, depth);
1655 if (!inode)
1656 return ERR_PTR(-ENOMEM);
1657
1658 if (inode->i_state & I_NEW) {
1659 reiserfs_read_locked_inode(inode, &args);
1660 unlock_new_inode(inode);
1661 }
1662
1663 if (comp_short_keys(INODE_PKEY(inode), key) || is_bad_inode(inode)) {
1664 /* either due to i/o error or a stale NFS handle */
1665 iput(inode);
1666 inode = NULL;
1667 }
1668 return inode;
1669 }
1670
1671 static struct dentry *reiserfs_get_dentry(struct super_block *sb,
1672 u32 objectid, u32 dir_id, u32 generation)
1673
1674 {
1675 struct cpu_key key;
1676 struct inode *inode;
1677
1678 key.on_disk_key.k_objectid = objectid;
1679 key.on_disk_key.k_dir_id = dir_id;
1680 reiserfs_write_lock(sb);
1681 inode = reiserfs_iget(sb, &key);
1682 if (inode && !IS_ERR(inode) && generation != 0 &&
1683 generation != inode->i_generation) {
1684 iput(inode);
1685 inode = NULL;
1686 }
1687 reiserfs_write_unlock(sb);
1688
1689 return d_obtain_alias(inode);
1690 }
1691
1692 struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1693 int fh_len, int fh_type)
1694 {
1695 /*
1696 * fhtype happens to reflect the number of u32s encoded.
1697 * due to a bug in earlier code, fhtype might indicate there
1698 * are more u32s then actually fitted.
1699 * so if fhtype seems to be more than len, reduce fhtype.
1700 * Valid types are:
1701 * 2 - objectid + dir_id - legacy support
1702 * 3 - objectid + dir_id + generation
1703 * 4 - objectid + dir_id + objectid and dirid of parent - legacy
1704 * 5 - objectid + dir_id + generation + objectid and dirid of parent
1705 * 6 - as above plus generation of directory
1706 * 6 does not fit in NFSv2 handles
1707 */
1708 if (fh_type > fh_len) {
1709 if (fh_type != 6 || fh_len != 5)
1710 reiserfs_warning(sb, "reiserfs-13077",
1711 "nfsd/reiserfs, fhtype=%d, len=%d - odd",
1712 fh_type, fh_len);
1713 fh_type = fh_len;
1714 }
1715 if (fh_len < 2)
1716 return NULL;
1717
1718 return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
1719 (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
1720 }
1721
1722 struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
1723 int fh_len, int fh_type)
1724 {
1725 if (fh_type > fh_len)
1726 fh_type = fh_len;
1727 if (fh_type < 4)
1728 return NULL;
1729
1730 return reiserfs_get_dentry(sb,
1731 (fh_type >= 5) ? fid->raw[3] : fid->raw[2],
1732 (fh_type >= 5) ? fid->raw[4] : fid->raw[3],
1733 (fh_type == 6) ? fid->raw[5] : 0);
1734 }
1735
1736 int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
1737 struct inode *parent)
1738 {
1739 int maxlen = *lenp;
1740
1741 if (parent && (maxlen < 5)) {
1742 *lenp = 5;
1743 return FILEID_INVALID;
1744 } else if (maxlen < 3) {
1745 *lenp = 3;
1746 return FILEID_INVALID;
1747 }
1748
1749 data[0] = inode->i_ino;
1750 data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1751 data[2] = inode->i_generation;
1752 *lenp = 3;
1753 if (parent) {
1754 data[3] = parent->i_ino;
1755 data[4] = le32_to_cpu(INODE_PKEY(parent)->k_dir_id);
1756 *lenp = 5;
1757 if (maxlen >= 6) {
1758 data[5] = parent->i_generation;
1759 *lenp = 6;
1760 }
1761 }
1762 return *lenp;
1763 }
1764
1765 /*
1766 * looks for stat data, then copies fields to it, marks the buffer
1767 * containing stat data as dirty
1768 */
1769 /*
1770 * reiserfs inodes are never really dirty, since the dirty inode call
1771 * always logs them. This call allows the VFS inode marking routines
1772 * to properly mark inodes for datasync and such, but only actually
1773 * does something when called for a synchronous update.
1774 */
1775 int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1776 {
1777 struct reiserfs_transaction_handle th;
1778 int jbegin_count = 1;
1779
1780 if (inode->i_sb->s_flags & MS_RDONLY)
1781 return -EROFS;
1782 /*
1783 * memory pressure can sometimes initiate write_inode calls with
1784 * sync == 1,
1785 * these cases are just when the system needs ram, not when the
1786 * inode needs to reach disk for safety, and they can safely be
1787 * ignored because the altered inode has already been logged.
1788 */
1789 if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) {
1790 reiserfs_write_lock(inode->i_sb);
1791 if (!journal_begin(&th, inode->i_sb, jbegin_count)) {
1792 reiserfs_update_sd(&th, inode);
1793 journal_end_sync(&th);
1794 }
1795 reiserfs_write_unlock(inode->i_sb);
1796 }
1797 return 0;
1798 }
1799
1800 /*
1801 * stat data of new object is inserted already, this inserts the item
1802 * containing "." and ".." entries
1803 */
1804 static int reiserfs_new_directory(struct reiserfs_transaction_handle *th,
1805 struct inode *inode,
1806 struct item_head *ih, struct treepath *path,
1807 struct inode *dir)
1808 {
1809 struct super_block *sb = th->t_super;
1810 char empty_dir[EMPTY_DIR_SIZE];
1811 char *body = empty_dir;
1812 struct cpu_key key;
1813 int retval;
1814
1815 BUG_ON(!th->t_trans_id);
1816
1817 _make_cpu_key(&key, KEY_FORMAT_3_5, le32_to_cpu(ih->ih_key.k_dir_id),
1818 le32_to_cpu(ih->ih_key.k_objectid), DOT_OFFSET,
1819 TYPE_DIRENTRY, 3 /*key length */ );
1820
1821 /*
1822 * compose item head for new item. Directories consist of items of
1823 * old type (ITEM_VERSION_1). Do not set key (second arg is 0), it
1824 * is done by reiserfs_new_inode
1825 */
1826 if (old_format_only(sb)) {
1827 make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
1828 TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2);
1829
1830 make_empty_dir_item_v1(body, ih->ih_key.k_dir_id,
1831 ih->ih_key.k_objectid,
1832 INODE_PKEY(dir)->k_dir_id,
1833 INODE_PKEY(dir)->k_objectid);
1834 } else {
1835 make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
1836 TYPE_DIRENTRY, EMPTY_DIR_SIZE, 2);
1837
1838 make_empty_dir_item(body, ih->ih_key.k_dir_id,
1839 ih->ih_key.k_objectid,
1840 INODE_PKEY(dir)->k_dir_id,
1841 INODE_PKEY(dir)->k_objectid);
1842 }
1843
1844 /* look for place in the tree for new item */
1845 retval = search_item(sb, &key, path);
1846 if (retval == IO_ERROR) {
1847 reiserfs_error(sb, "vs-13080",
1848 "i/o failure occurred creating new directory");
1849 return -EIO;
1850 }
1851 if (retval == ITEM_FOUND) {
1852 pathrelse(path);
1853 reiserfs_warning(sb, "vs-13070",
1854 "object with this key exists (%k)",
1855 &(ih->ih_key));
1856 return -EEXIST;
1857 }
1858
1859 /* insert item, that is empty directory item */
1860 return reiserfs_insert_item(th, path, &key, ih, inode, body);
1861 }
1862
1863 /*
1864 * stat data of object has been inserted, this inserts the item
1865 * containing the body of symlink
1866 */
1867 static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th,
1868 struct inode *inode,
1869 struct item_head *ih,
1870 struct treepath *path, const char *symname,
1871 int item_len)
1872 {
1873 struct super_block *sb = th->t_super;
1874 struct cpu_key key;
1875 int retval;
1876
1877 BUG_ON(!th->t_trans_id);
1878
1879 _make_cpu_key(&key, KEY_FORMAT_3_5,
1880 le32_to_cpu(ih->ih_key.k_dir_id),
1881 le32_to_cpu(ih->ih_key.k_objectid),
1882 1, TYPE_DIRECT, 3 /*key length */ );
1883
1884 make_le_item_head(ih, NULL, KEY_FORMAT_3_5, 1, TYPE_DIRECT, item_len,
1885 0 /*free_space */ );
1886
1887 /* look for place in the tree for new item */
1888 retval = search_item(sb, &key, path);
1889 if (retval == IO_ERROR) {
1890 reiserfs_error(sb, "vs-13080",
1891 "i/o failure occurred creating new symlink");
1892 return -EIO;
1893 }
1894 if (retval == ITEM_FOUND) {
1895 pathrelse(path);
1896 reiserfs_warning(sb, "vs-13080",
1897 "object with this key exists (%k)",
1898 &(ih->ih_key));
1899 return -EEXIST;
1900 }
1901
1902 /* insert item, that is body of symlink */
1903 return reiserfs_insert_item(th, path, &key, ih, inode, symname);
1904 }
1905
1906 /*
1907 * inserts the stat data into the tree, and then calls
1908 * reiserfs_new_directory (to insert ".", ".." item if new object is
1909 * directory) or reiserfs_new_symlink (to insert symlink body if new
1910 * object is symlink) or nothing (if new object is regular file)
1911
1912 * NOTE! uid and gid must already be set in the inode. If we return
1913 * non-zero due to an error, we have to drop the quota previously allocated
1914 * for the fresh inode. This can only be done outside a transaction, so
1915 * if we return non-zero, we also end the transaction.
1916 *
1917 * @th: active transaction handle
1918 * @dir: parent directory for new inode
1919 * @mode: mode of new inode
1920 * @symname: symlink contents if inode is symlink
1921 * @isize: 0 for regular file, EMPTY_DIR_SIZE for dirs, strlen(symname) for
1922 * symlinks
1923 * @inode: inode to be filled
1924 * @security: optional security context to associate with this inode
1925 */
1926 int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1927 struct inode *dir, umode_t mode, const char *symname,
1928 /* 0 for regular, EMTRY_DIR_SIZE for dirs,
1929 strlen (symname) for symlinks) */
1930 loff_t i_size, struct dentry *dentry,
1931 struct inode *inode,
1932 struct reiserfs_security_handle *security)
1933 {
1934 struct super_block *sb = dir->i_sb;
1935 struct reiserfs_iget_args args;
1936 INITIALIZE_PATH(path_to_key);
1937 struct cpu_key key;
1938 struct item_head ih;
1939 struct stat_data sd;
1940 int retval;
1941 int err;
1942 int depth;
1943
1944 BUG_ON(!th->t_trans_id);
1945
1946 depth = reiserfs_write_unlock_nested(sb);
1947 err = dquot_alloc_inode(inode);
1948 reiserfs_write_lock_nested(sb, depth);
1949 if (err)
1950 goto out_end_trans;
1951 if (!dir->i_nlink) {
1952 err = -EPERM;
1953 goto out_bad_inode;
1954 }
1955
1956 /* item head of new item */
1957 ih.ih_key.k_dir_id = reiserfs_choose_packing(dir);
1958 ih.ih_key.k_objectid = cpu_to_le32(reiserfs_get_unused_objectid(th));
1959 if (!ih.ih_key.k_objectid) {
1960 err = -ENOMEM;
1961 goto out_bad_inode;
1962 }
1963 args.objectid = inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid);
1964 if (old_format_only(sb))
1965 make_le_item_head(&ih, NULL, KEY_FORMAT_3_5, SD_OFFSET,
1966 TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT);
1967 else
1968 make_le_item_head(&ih, NULL, KEY_FORMAT_3_6, SD_OFFSET,
1969 TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
1970 memcpy(INODE_PKEY(inode), &ih.ih_key, KEY_SIZE);
1971 args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
1972
1973 depth = reiserfs_write_unlock_nested(inode->i_sb);
1974 err = insert_inode_locked4(inode, args.objectid,
1975 reiserfs_find_actor, &args);
1976 reiserfs_write_lock_nested(inode->i_sb, depth);
1977 if (err) {
1978 err = -EINVAL;
1979 goto out_bad_inode;
1980 }
1981
1982 if (old_format_only(sb))
1983 /*
1984 * not a perfect generation count, as object ids can be reused,
1985 * but this is as good as reiserfs can do right now.
1986 * note that the private part of inode isn't filled in yet,
1987 * we have to use the directory.
1988 */
1989 inode->i_generation = le32_to_cpu(INODE_PKEY(dir)->k_objectid);
1990 else
1991 #if defined( USE_INODE_GENERATION_COUNTER )
1992 inode->i_generation =
1993 le32_to_cpu(REISERFS_SB(sb)->s_rs->s_inode_generation);
1994 #else
1995 inode->i_generation = ++event;
1996 #endif
1997
1998 /* fill stat data */
1999 set_nlink(inode, (S_ISDIR(mode) ? 2 : 1));
2000
2001 /* uid and gid must already be set by the caller for quota init */
2002
2003 /* symlink cannot be immutable or append only, right? */
2004 if (S_ISLNK(inode->i_mode))
2005 inode->i_flags &= ~(S_IMMUTABLE | S_APPEND);
2006
2007 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
2008 inode->i_size = i_size;
2009 inode->i_blocks = 0;
2010 inode->i_bytes = 0;
2011 REISERFS_I(inode)->i_first_direct_byte = S_ISLNK(mode) ? 1 :
2012 U32_MAX /*NO_BYTES_IN_DIRECT_ITEM */ ;
2013
2014 INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
2015 REISERFS_I(inode)->i_flags = 0;
2016 REISERFS_I(inode)->i_prealloc_block = 0;
2017 REISERFS_I(inode)->i_prealloc_count = 0;
2018 REISERFS_I(inode)->i_trans_id = 0;
2019 REISERFS_I(inode)->i_jl = NULL;
2020 REISERFS_I(inode)->i_attrs =
2021 REISERFS_I(dir)->i_attrs & REISERFS_INHERIT_MASK;
2022 sd_attrs_to_i_attrs(REISERFS_I(inode)->i_attrs, inode);
2023 reiserfs_init_xattr_rwsem(inode);
2024
2025 /* key to search for correct place for new stat data */
2026 _make_cpu_key(&key, KEY_FORMAT_3_6, le32_to_cpu(ih.ih_key.k_dir_id),
2027 le32_to_cpu(ih.ih_key.k_objectid), SD_OFFSET,
2028 TYPE_STAT_DATA, 3 /*key length */ );
2029
2030 /* find proper place for inserting of stat data */
2031 retval = search_item(sb, &key, &path_to_key);
2032 if (retval == IO_ERROR) {
2033 err = -EIO;
2034 goto out_bad_inode;
2035 }
2036 if (retval == ITEM_FOUND) {
2037 pathrelse(&path_to_key);
2038 err = -EEXIST;
2039 goto out_bad_inode;
2040 }
2041 if (old_format_only(sb)) {
2042 /* i_uid or i_gid is too big to be stored in stat data v3.5 */
2043 if (i_uid_read(inode) & ~0xffff || i_gid_read(inode) & ~0xffff) {
2044 pathrelse(&path_to_key);
2045 err = -EINVAL;
2046 goto out_bad_inode;
2047 }
2048 inode2sd_v1(&sd, inode, inode->i_size);
2049 } else {
2050 inode2sd(&sd, inode, inode->i_size);
2051 }
2052 /*
2053 * store in in-core inode the key of stat data and version all
2054 * object items will have (directory items will have old offset
2055 * format, other new objects will consist of new items)
2056 */
2057 if (old_format_only(sb) || S_ISDIR(mode) || S_ISLNK(mode))
2058 set_inode_item_key_version(inode, KEY_FORMAT_3_5);
2059 else
2060 set_inode_item_key_version(inode, KEY_FORMAT_3_6);
2061 if (old_format_only(sb))
2062 set_inode_sd_version(inode, STAT_DATA_V1);
2063 else
2064 set_inode_sd_version(inode, STAT_DATA_V2);
2065
2066 /* insert the stat data into the tree */
2067 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
2068 if (REISERFS_I(dir)->new_packing_locality)
2069 th->displace_new_blocks = 1;
2070 #endif
2071 retval =
2072 reiserfs_insert_item(th, &path_to_key, &key, &ih, inode,
2073 (char *)(&sd));
2074 if (retval) {
2075 err = retval;
2076 reiserfs_check_path(&path_to_key);
2077 goto out_bad_inode;
2078 }
2079 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
2080 if (!th->displace_new_blocks)
2081 REISERFS_I(dir)->new_packing_locality = 0;
2082 #endif
2083 if (S_ISDIR(mode)) {
2084 /* insert item with "." and ".." */
2085 retval =
2086 reiserfs_new_directory(th, inode, &ih, &path_to_key, dir);
2087 }
2088
2089 if (S_ISLNK(mode)) {
2090 /* insert body of symlink */
2091 if (!old_format_only(sb))
2092 i_size = ROUND_UP(i_size);
2093 retval =
2094 reiserfs_new_symlink(th, inode, &ih, &path_to_key, symname,
2095 i_size);
2096 }
2097 if (retval) {
2098 err = retval;
2099 reiserfs_check_path(&path_to_key);
2100 journal_end(th);
2101 goto out_inserted_sd;
2102 }
2103
2104 if (reiserfs_posixacl(inode->i_sb)) {
2105 reiserfs_write_unlock(inode->i_sb);
2106 retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
2107 reiserfs_write_lock(inode->i_sb);
2108 if (retval) {
2109 err = retval;
2110 reiserfs_check_path(&path_to_key);
2111 journal_end(th);
2112 goto out_inserted_sd;
2113 }
2114 } else if (inode->i_sb->s_flags & MS_POSIXACL) {
2115 reiserfs_warning(inode->i_sb, "jdm-13090",
2116 "ACLs aren't enabled in the fs, "
2117 "but vfs thinks they are!");
2118 } else if (IS_PRIVATE(dir))
2119 inode->i_flags |= S_PRIVATE;
2120
2121 if (security->name) {
2122 reiserfs_write_unlock(inode->i_sb);
2123 retval = reiserfs_security_write(th, inode, security);
2124 reiserfs_write_lock(inode->i_sb);
2125 if (retval) {
2126 err = retval;
2127 reiserfs_check_path(&path_to_key);
2128 retval = journal_end(th);
2129 if (retval)
2130 err = retval;
2131 goto out_inserted_sd;
2132 }
2133 }
2134
2135 reiserfs_update_sd(th, inode);
2136 reiserfs_check_path(&path_to_key);
2137
2138 return 0;
2139
2140 out_bad_inode:
2141 /* Invalidate the object, nothing was inserted yet */
2142 INODE_PKEY(inode)->k_objectid = 0;
2143
2144 /* Quota change must be inside a transaction for journaling */
2145 depth = reiserfs_write_unlock_nested(inode->i_sb);
2146 dquot_free_inode(inode);
2147 reiserfs_write_lock_nested(inode->i_sb, depth);
2148
2149 out_end_trans:
2150 journal_end(th);
2151 /*
2152 * Drop can be outside and it needs more credits so it's better
2153 * to have it outside
2154 */
2155 depth = reiserfs_write_unlock_nested(inode->i_sb);
2156 dquot_drop(inode);
2157 reiserfs_write_lock_nested(inode->i_sb, depth);
2158 inode->i_flags |= S_NOQUOTA;
2159 make_bad_inode(inode);
2160
2161 out_inserted_sd:
2162 clear_nlink(inode);
2163 th->t_trans_id = 0; /* so the caller can't use this handle later */
2164 unlock_new_inode(inode); /* OK to do even if we hadn't locked it */
2165 iput(inode);
2166 return err;
2167 }
2168
2169 /*
2170 * finds the tail page in the page cache,
2171 * reads the last block in.
2172 *
2173 * On success, page_result is set to a locked, pinned page, and bh_result
2174 * is set to an up to date buffer for the last block in the file. returns 0.
2175 *
2176 * tail conversion is not done, so bh_result might not be valid for writing
2177 * check buffer_mapped(bh_result) and bh_result->b_blocknr != 0 before
2178 * trying to write the block.
2179 *
2180 * on failure, nonzero is returned, page_result and bh_result are untouched.
2181 */
2182 static int grab_tail_page(struct inode *inode,
2183 struct page **page_result,
2184 struct buffer_head **bh_result)
2185 {
2186
2187 /*
2188 * we want the page with the last byte in the file,
2189 * not the page that will hold the next byte for appending
2190 */
2191 unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
2192 unsigned long pos = 0;
2193 unsigned long start = 0;
2194 unsigned long blocksize = inode->i_sb->s_blocksize;
2195 unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1);
2196 struct buffer_head *bh;
2197 struct buffer_head *head;
2198 struct page *page;
2199 int error;
2200
2201 /*
2202 * we know that we are only called with inode->i_size > 0.
2203 * we also know that a file tail can never be as big as a block
2204 * If i_size % blocksize == 0, our file is currently block aligned
2205 * and it won't need converting or zeroing after a truncate.
2206 */
2207 if ((offset & (blocksize - 1)) == 0) {
2208 return -ENOENT;
2209 }
2210 page = grab_cache_page(inode->i_mapping, index);
2211 error = -ENOMEM;
2212 if (!page) {
2213 goto out;
2214 }
2215 /* start within the page of the last block in the file */
2216 start = (offset / blocksize) * blocksize;
2217
2218 error = __block_write_begin(page, start, offset - start,
2219 reiserfs_get_block_create_0);
2220 if (error)
2221 goto unlock;
2222
2223 head = page_buffers(page);
2224 bh = head;
2225 do {
2226 if (pos >= start) {
2227 break;
2228 }
2229 bh = bh->b_this_page;
2230 pos += blocksize;
2231 } while (bh != head);
2232
2233 if (!buffer_uptodate(bh)) {
2234 /*
2235 * note, this should never happen, prepare_write should be
2236 * taking care of this for us. If the buffer isn't up to
2237 * date, I've screwed up the code to find the buffer, or the
2238 * code to call prepare_write
2239 */
2240 reiserfs_error(inode->i_sb, "clm-6000",
2241 "error reading block %lu", bh->b_blocknr);
2242 error = -EIO;
2243 goto unlock;
2244 }
2245 *bh_result = bh;
2246 *page_result = page;
2247
2248 out:
2249 return error;
2250
2251 unlock:
2252 unlock_page(page);
2253 page_cache_release(page);
2254 return error;
2255 }
2256
2257 /*
2258 * vfs version of truncate file. Must NOT be called with
2259 * a transaction already started.
2260 *
2261 * some code taken from block_truncate_page
2262 */
2263 int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2264 {
2265 struct reiserfs_transaction_handle th;
2266 /* we want the offset for the first byte after the end of the file */
2267 unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
2268 unsigned blocksize = inode->i_sb->s_blocksize;
2269 unsigned length;
2270 struct page *page = NULL;
2271 int error;
2272 struct buffer_head *bh = NULL;
2273 int err2;
2274
2275 reiserfs_write_lock(inode->i_sb);
2276
2277 if (inode->i_size > 0) {
2278 error = grab_tail_page(inode, &page, &bh);
2279 if (error) {
2280 /*
2281 * -ENOENT means we truncated past the end of the
2282 * file, and get_block_create_0 could not find a
2283 * block to read in, which is ok.
2284 */
2285 if (error != -ENOENT)
2286 reiserfs_error(inode->i_sb, "clm-6001",
2287 "grab_tail_page failed %d",
2288 error);
2289 page = NULL;
2290 bh = NULL;
2291 }
2292 }
2293
2294 /*
2295 * so, if page != NULL, we have a buffer head for the offset at
2296 * the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
2297 * then we have an unformatted node. Otherwise, we have a direct item,
2298 * and no zeroing is required on disk. We zero after the truncate,
2299 * because the truncate might pack the item anyway
2300 * (it will unmap bh if it packs).
2301 *
2302 * it is enough to reserve space in transaction for 2 balancings:
2303 * one for "save" link adding and another for the first
2304 * cut_from_item. 1 is for update_sd
2305 */
2306 error = journal_begin(&th, inode->i_sb,
2307 JOURNAL_PER_BALANCE_CNT * 2 + 1);
2308 if (error)
2309 goto out;
2310 reiserfs_update_inode_transaction(inode);
2311 if (update_timestamps)
2312 /*
2313 * we are doing real truncate: if the system crashes
2314 * before the last transaction of truncating gets committed
2315 * - on reboot the file either appears truncated properly
2316 * or not truncated at all
2317 */
2318 add_save_link(&th, inode, 1);
2319 err2 = reiserfs_do_truncate(&th, inode, page, update_timestamps);
2320 error = journal_end(&th);
2321 if (error)
2322 goto out;
2323
2324 /* check reiserfs_do_truncate after ending the transaction */
2325 if (err2) {
2326 error = err2;
2327 goto out;
2328 }
2329
2330 if (update_timestamps) {
2331 error = remove_save_link(inode, 1 /* truncate */);
2332 if (error)
2333 goto out;
2334 }
2335
2336 if (page) {
2337 length = offset & (blocksize - 1);
2338 /* if we are not on a block boundary */
2339 if (length) {
2340 length = blocksize - length;
2341 zero_user(page, offset, length);
2342 if (buffer_mapped(bh) && bh->b_blocknr != 0) {
2343 mark_buffer_dirty(bh);
2344 }
2345 }
2346 unlock_page(page);
2347 page_cache_release(page);
2348 }
2349
2350 reiserfs_write_unlock(inode->i_sb);
2351
2352 return 0;
2353 out:
2354 if (page) {
2355 unlock_page(page);
2356 page_cache_release(page);
2357 }
2358
2359 reiserfs_write_unlock(inode->i_sb);
2360
2361 return error;
2362 }
2363
2364 static int map_block_for_writepage(struct inode *inode,
2365 struct buffer_head *bh_result,
2366 unsigned long block)
2367 {
2368 struct reiserfs_transaction_handle th;
2369 int fs_gen;
2370 struct item_head tmp_ih;
2371 struct item_head *ih;
2372 struct buffer_head *bh;
2373 __le32 *item;
2374 struct cpu_key key;
2375 INITIALIZE_PATH(path);
2376 int pos_in_item;
2377 int jbegin_count = JOURNAL_PER_BALANCE_CNT;
2378 loff_t byte_offset = ((loff_t)block << inode->i_sb->s_blocksize_bits)+1;
2379 int retval;
2380 int use_get_block = 0;
2381 int bytes_copied = 0;
2382 int copy_size;
2383 int trans_running = 0;
2384
2385 /*
2386 * catch places below that try to log something without
2387 * starting a trans
2388 */
2389 th.t_trans_id = 0;
2390
2391 if (!buffer_uptodate(bh_result)) {
2392 return -EIO;
2393 }
2394
2395 kmap(bh_result->b_page);
2396 start_over:
2397 reiserfs_write_lock(inode->i_sb);
2398 make_cpu_key(&key, inode, byte_offset, TYPE_ANY, 3);
2399
2400 research:
2401 retval = search_for_position_by_key(inode->i_sb, &key, &path);
2402 if (retval != POSITION_FOUND) {
2403 use_get_block = 1;
2404 goto out;
2405 }
2406
2407 bh = get_last_bh(&path);
2408 ih = tp_item_head(&path);
2409 item = tp_item_body(&path);
2410 pos_in_item = path.pos_in_item;
2411
2412 /* we've found an unformatted node */
2413 if (indirect_item_found(retval, ih)) {
2414 if (bytes_copied > 0) {
2415 reiserfs_warning(inode->i_sb, "clm-6002",
2416 "bytes_copied %d", bytes_copied);
2417 }
2418 if (!get_block_num(item, pos_in_item)) {
2419 /* crap, we are writing to a hole */
2420 use_get_block = 1;
2421 goto out;
2422 }
2423 set_block_dev_mapped(bh_result,
2424 get_block_num(item, pos_in_item), inode);
2425 } else if (is_direct_le_ih(ih)) {
2426 char *p;
2427 p = page_address(bh_result->b_page);
2428 p += (byte_offset - 1) & (PAGE_CACHE_SIZE - 1);
2429 copy_size = ih_item_len(ih) - pos_in_item;
2430
2431 fs_gen = get_generation(inode->i_sb);
2432 copy_item_head(&tmp_ih, ih);
2433
2434 if (!trans_running) {
2435 /* vs-3050 is gone, no need to drop the path */
2436 retval = journal_begin(&th, inode->i_sb, jbegin_count);
2437 if (retval)
2438 goto out;
2439 reiserfs_update_inode_transaction(inode);
2440 trans_running = 1;
2441 if (fs_changed(fs_gen, inode->i_sb)
2442 && item_moved(&tmp_ih, &path)) {
2443 reiserfs_restore_prepared_buffer(inode->i_sb,
2444 bh);
2445 goto research;
2446 }
2447 }
2448
2449 reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
2450
2451 if (fs_changed(fs_gen, inode->i_sb)
2452 && item_moved(&tmp_ih, &path)) {
2453 reiserfs_restore_prepared_buffer(inode->i_sb, bh);
2454 goto research;
2455 }
2456
2457 memcpy(ih_item_body(bh, ih) + pos_in_item, p + bytes_copied,
2458 copy_size);
2459
2460 journal_mark_dirty(&th, bh);
2461 bytes_copied += copy_size;
2462 set_block_dev_mapped(bh_result, 0, inode);
2463
2464 /* are there still bytes left? */
2465 if (bytes_copied < bh_result->b_size &&
2466 (byte_offset + bytes_copied) < inode->i_size) {
2467 set_cpu_key_k_offset(&key,
2468 cpu_key_k_offset(&key) +
2469 copy_size);
2470 goto research;
2471 }
2472 } else {
2473 reiserfs_warning(inode->i_sb, "clm-6003",
2474 "bad item inode %lu", inode->i_ino);
2475 retval = -EIO;
2476 goto out;
2477 }
2478 retval = 0;
2479
2480 out:
2481 pathrelse(&path);
2482 if (trans_running) {
2483 int err = journal_end(&th);
2484 if (err)
2485 retval = err;
2486 trans_running = 0;
2487 }
2488 reiserfs_write_unlock(inode->i_sb);
2489
2490 /* this is where we fill in holes in the file. */
2491 if (use_get_block) {
2492 retval = reiserfs_get_block(inode, block, bh_result,
2493 GET_BLOCK_CREATE | GET_BLOCK_NO_IMUX
2494 | GET_BLOCK_NO_DANGLE);
2495 if (!retval) {
2496 if (!buffer_mapped(bh_result)
2497 || bh_result->b_blocknr == 0) {
2498 /* get_block failed to find a mapped unformatted node. */
2499 use_get_block = 0;
2500 goto start_over;
2501 }
2502 }
2503 }
2504 kunmap(bh_result->b_page);
2505
2506 if (!retval && buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
2507 /*
2508 * we've copied data from the page into the direct item, so the
2509 * buffer in the page is now clean, mark it to reflect that.
2510 */
2511 lock_buffer(bh_result);
2512 clear_buffer_dirty(bh_result);
2513 unlock_buffer(bh_result);
2514 }
2515 return retval;
2516 }
2517
2518 /*
2519 * mason@suse.com: updated in 2.5.54 to follow the same general io
2520 * start/recovery path as __block_write_full_page, along with special
2521 * code to handle reiserfs tails.
2522 */
2523 static int reiserfs_write_full_page(struct page *page,
2524 struct writeback_control *wbc)
2525 {
2526 struct inode *inode = page->mapping->host;
2527 unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
2528 int error = 0;
2529 unsigned long block;
2530 sector_t last_block;
2531 struct buffer_head *head, *bh;
2532 int partial = 0;
2533 int nr = 0;
2534 int checked = PageChecked(page);
2535 struct reiserfs_transaction_handle th;
2536 struct super_block *s = inode->i_sb;
2537 int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
2538 th.t_trans_id = 0;
2539
2540 /* no logging allowed when nonblocking or from PF_MEMALLOC */
2541 if (checked && (current->flags & PF_MEMALLOC)) {
2542 redirty_page_for_writepage(wbc, page);
2543 unlock_page(page);
2544 return 0;
2545 }
2546
2547 /*
2548 * The page dirty bit is cleared before writepage is called, which
2549 * means we have to tell create_empty_buffers to make dirty buffers
2550 * The page really should be up to date at this point, so tossing
2551 * in the BH_Uptodate is just a sanity check.
2552 */
2553 if (!page_has_buffers(page)) {
2554 create_empty_buffers(page, s->s_blocksize,
2555 (1 << BH_Dirty) | (1 << BH_Uptodate));
2556 }
2557 head = page_buffers(page);
2558
2559 /*
2560 * last page in the file, zero out any contents past the
2561 * last byte in the file
2562 */
2563 if (page->index >= end_index) {
2564 unsigned last_offset;
2565
2566 last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
2567 /* no file contents in this page */
2568 if (page->index >= end_index + 1 || !last_offset) {
2569 unlock_page(page);
2570 return 0;
2571 }
2572 zero_user_segment(page, last_offset, PAGE_CACHE_SIZE);
2573 }
2574 bh = head;
2575 block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
2576 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
2577 /* first map all the buffers, logging any direct items we find */
2578 do {
2579 if (block > last_block) {
2580 /*
2581 * This can happen when the block size is less than
2582 * the page size. The corresponding bytes in the page
2583 * were zero filled above
2584 */
2585 clear_buffer_dirty(bh);
2586 set_buffer_uptodate(bh);
2587 } else if ((checked || buffer_dirty(bh)) &&
2588 (!buffer_mapped(bh) || (buffer_mapped(bh)
2589 && bh->b_blocknr ==
2590 0))) {
2591 /*
2592 * not mapped yet, or it points to a direct item, search
2593 * the btree for the mapping info, and log any direct
2594 * items found
2595 */
2596 if ((error = map_block_for_writepage(inode, bh, block))) {
2597 goto fail;
2598 }
2599 }
2600 bh = bh->b_this_page;
2601 block++;
2602 } while (bh != head);
2603
2604 /*
2605 * we start the transaction after map_block_for_writepage,
2606 * because it can create holes in the file (an unbounded operation).
2607 * starting it here, we can make a reliable estimate for how many
2608 * blocks we're going to log
2609 */
2610 if (checked) {
2611 ClearPageChecked(page);
2612 reiserfs_write_lock(s);
2613 error = journal_begin(&th, s, bh_per_page + 1);
2614 if (error) {
2615 reiserfs_write_unlock(s);
2616 goto fail;
2617 }
2618 reiserfs_update_inode_transaction(inode);
2619 }
2620 /* now go through and lock any dirty buffers on the page */
2621 do {
2622 get_bh(bh);
2623 if (!buffer_mapped(bh))
2624 continue;
2625 if (buffer_mapped(bh) && bh->b_blocknr == 0)
2626 continue;
2627
2628 if (checked) {
2629 reiserfs_prepare_for_journal(s, bh, 1);
2630 journal_mark_dirty(&th, bh);
2631 continue;
2632 }
2633 /*
2634 * from this point on, we know the buffer is mapped to a
2635 * real block and not a direct item
2636 */
2637 if (wbc->sync_mode != WB_SYNC_NONE) {
2638 lock_buffer(bh);
2639 } else {
2640 if (!trylock_buffer(bh)) {
2641 redirty_page_for_writepage(wbc, page);
2642 continue;
2643 }
2644 }
2645 if (test_clear_buffer_dirty(bh)) {
2646 mark_buffer_async_write(bh);
2647 } else {
2648 unlock_buffer(bh);
2649 }
2650 } while ((bh = bh->b_this_page) != head);
2651
2652 if (checked) {
2653 error = journal_end(&th);
2654 reiserfs_write_unlock(s);
2655 if (error)
2656 goto fail;
2657 }
2658 BUG_ON(PageWriteback(page));
2659 set_page_writeback(page);
2660 unlock_page(page);
2661
2662 /*
2663 * since any buffer might be the only dirty buffer on the page,
2664 * the first submit_bh can bring the page out of writeback.
2665 * be careful with the buffers.
2666 */
2667 do {
2668 struct buffer_head *next = bh->b_this_page;
2669 if (buffer_async_write(bh)) {
2670 submit_bh(WRITE, bh);
2671 nr++;
2672 }
2673 put_bh(bh);
2674 bh = next;
2675 } while (bh != head);
2676
2677 error = 0;
2678 done:
2679 if (nr == 0) {
2680 /*
2681 * if this page only had a direct item, it is very possible for
2682 * no io to be required without there being an error. Or,
2683 * someone else could have locked them and sent them down the
2684 * pipe without locking the page
2685 */
2686 bh = head;
2687 do {
2688 if (!buffer_uptodate(bh)) {
2689 partial = 1;
2690 break;
2691 }
2692 bh = bh->b_this_page;
2693 } while (bh != head);
2694 if (!partial)
2695 SetPageUptodate(page);
2696 end_page_writeback(page);
2697 }
2698 return error;
2699
2700 fail:
2701 /*
2702 * catches various errors, we need to make sure any valid dirty blocks
2703 * get to the media. The page is currently locked and not marked for
2704 * writeback
2705 */
2706 ClearPageUptodate(page);
2707 bh = head;
2708 do {
2709 get_bh(bh);
2710 if (buffer_mapped(bh) && buffer_dirty(bh) && bh->b_blocknr) {
2711 lock_buffer(bh);
2712 mark_buffer_async_write(bh);
2713 } else {
2714 /*
2715 * clear any dirty bits that might have come from
2716 * getting attached to a dirty page
2717 */
2718 clear_buffer_dirty(bh);
2719 }
2720 bh = bh->b_this_page;
2721 } while (bh != head);
2722 SetPageError(page);
2723 BUG_ON(PageWriteback(page));
2724 set_page_writeback(page);
2725 unlock_page(page);
2726 do {
2727 struct buffer_head *next = bh->b_this_page;
2728 if (buffer_async_write(bh)) {
2729 clear_buffer_dirty(bh);
2730 submit_bh(WRITE, bh);
2731 nr++;
2732 }
2733 put_bh(bh);
2734 bh = next;
2735 } while (bh != head);
2736 goto done;
2737 }
2738
2739 static int reiserfs_readpage(struct file *f, struct page *page)
2740 {
2741 return block_read_full_page(page, reiserfs_get_block);
2742 }
2743
2744 static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
2745 {
2746 struct inode *inode = page->mapping->host;
2747 reiserfs_wait_on_write_block(inode->i_sb);
2748 return reiserfs_write_full_page(page, wbc);
2749 }
2750
2751 static void reiserfs_truncate_failed_write(struct inode *inode)
2752 {
2753 truncate_inode_pages(inode->i_mapping, inode->i_size);
2754 reiserfs_truncate_file(inode, 0);
2755 }
2756
2757 static int reiserfs_write_begin(struct file *file,
2758 struct address_space *mapping,
2759 loff_t pos, unsigned len, unsigned flags,
2760 struct page **pagep, void **fsdata)
2761 {
2762 struct inode *inode;
2763 struct page *page;
2764 pgoff_t index;
2765 int ret;
2766 int old_ref = 0;
2767
2768 inode = mapping->host;
2769 *fsdata = NULL;
2770 if (flags & AOP_FLAG_CONT_EXPAND &&
2771 (pos & (inode->i_sb->s_blocksize - 1)) == 0) {
2772 pos ++;
2773 *fsdata = (void *)(unsigned long)flags;
2774 }
2775
2776 index = pos >> PAGE_CACHE_SHIFT;
2777 page = grab_cache_page_write_begin(mapping, index, flags);
2778 if (!page)
2779 return -ENOMEM;
2780 *pagep = page;
2781
2782 reiserfs_wait_on_write_block(inode->i_sb);
2783 fix_tail_page_for_writing(page);
2784 if (reiserfs_transaction_running(inode->i_sb)) {
2785 struct reiserfs_transaction_handle *th;
2786 th = (struct reiserfs_transaction_handle *)current->
2787 journal_info;
2788 BUG_ON(!th->t_refcount);
2789 BUG_ON(!th->t_trans_id);
2790 old_ref = th->t_refcount;
2791 th->t_refcount++;
2792 }
2793 ret = __block_write_begin(page, pos, len, reiserfs_get_block);
2794 if (ret && reiserfs_transaction_running(inode->i_sb)) {
2795 struct reiserfs_transaction_handle *th = current->journal_info;
2796 /*
2797 * this gets a little ugly. If reiserfs_get_block returned an
2798 * error and left a transacstion running, we've got to close
2799 * it, and we've got to free handle if it was a persistent
2800 * transaction.
2801 *
2802 * But, if we had nested into an existing transaction, we need
2803 * to just drop the ref count on the handle.
2804 *
2805 * If old_ref == 0, the transaction is from reiserfs_get_block,
2806 * and it was a persistent trans. Otherwise, it was nested
2807 * above.
2808 */
2809 if (th->t_refcount > old_ref) {
2810 if (old_ref)
2811 th->t_refcount--;
2812 else {
2813 int err;
2814 reiserfs_write_lock(inode->i_sb);
2815 err = reiserfs_end_persistent_transaction(th);
2816 reiserfs_write_unlock(inode->i_sb);
2817 if (err)
2818 ret = err;
2819 }
2820 }
2821 }
2822 if (ret) {
2823 unlock_page(page);
2824 page_cache_release(page);
2825 /* Truncate allocated blocks */
2826 reiserfs_truncate_failed_write(inode);
2827 }
2828 return ret;
2829 }
2830
2831 int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
2832 {
2833 struct inode *inode = page->mapping->host;
2834 int ret;
2835 int old_ref = 0;
2836 int depth;
2837
2838 depth = reiserfs_write_unlock_nested(inode->i_sb);
2839 reiserfs_wait_on_write_block(inode->i_sb);
2840 reiserfs_write_lock_nested(inode->i_sb, depth);
2841
2842 fix_tail_page_for_writing(page);
2843 if (reiserfs_transaction_running(inode->i_sb)) {
2844 struct reiserfs_transaction_handle *th;
2845 th = (struct reiserfs_transaction_handle *)current->
2846 journal_info;
2847 BUG_ON(!th->t_refcount);
2848 BUG_ON(!th->t_trans_id);
2849 old_ref = th->t_refcount;
2850 th->t_refcount++;
2851 }
2852
2853 ret = __block_write_begin(page, from, len, reiserfs_get_block);
2854 if (ret && reiserfs_transaction_running(inode->i_sb)) {
2855 struct reiserfs_transaction_handle *th = current->journal_info;
2856 /*
2857 * this gets a little ugly. If reiserfs_get_block returned an
2858 * error and left a transacstion running, we've got to close
2859 * it, and we've got to free handle if it was a persistent
2860 * transaction.
2861 *
2862 * But, if we had nested into an existing transaction, we need
2863 * to just drop the ref count on the handle.
2864 *
2865 * If old_ref == 0, the transaction is from reiserfs_get_block,
2866 * and it was a persistent trans. Otherwise, it was nested
2867 * above.
2868 */
2869 if (th->t_refcount > old_ref) {
2870 if (old_ref)
2871 th->t_refcount--;
2872 else {
2873 int err;
2874 reiserfs_write_lock(inode->i_sb);
2875 err = reiserfs_end_persistent_transaction(th);
2876 reiserfs_write_unlock(inode->i_sb);
2877 if (err)
2878 ret = err;
2879 }
2880 }
2881 }
2882 return ret;
2883
2884 }
2885
2886 static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block)
2887 {
2888 return generic_block_bmap(as, block, reiserfs_bmap);
2889 }
2890
2891 static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2892 loff_t pos, unsigned len, unsigned copied,
2893 struct page *page, void *fsdata)
2894 {
2895 struct inode *inode = page->mapping->host;
2896 int ret = 0;
2897 int update_sd = 0;
2898 struct reiserfs_transaction_handle *th;
2899 unsigned start;
2900 bool locked = false;
2901
2902 if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
2903 pos ++;
2904
2905 reiserfs_wait_on_write_block(inode->i_sb);
2906 if (reiserfs_transaction_running(inode->i_sb))
2907 th = current->journal_info;
2908 else
2909 th = NULL;
2910
2911 start = pos & (PAGE_CACHE_SIZE - 1);
2912 if (unlikely(copied < len)) {
2913 if (!PageUptodate(page))
2914 copied = 0;
2915
2916 page_zero_new_buffers(page, start + copied, start + len);
2917 }
2918 flush_dcache_page(page);
2919
2920 reiserfs_commit_page(inode, page, start, start + copied);
2921
2922 /*
2923 * generic_commit_write does this for us, but does not update the
2924 * transaction tracking stuff when the size changes. So, we have
2925 * to do the i_size updates here.
2926 */
2927 if (pos + copied > inode->i_size) {
2928 struct reiserfs_transaction_handle myth;
2929 reiserfs_write_lock(inode->i_sb);
2930 locked = true;
2931 /*
2932 * If the file have grown beyond the border where it
2933 * can have a tail, unmark it as needing a tail
2934 * packing
2935 */
2936 if ((have_large_tails(inode->i_sb)
2937 && inode->i_size > i_block_size(inode) * 4)
2938 || (have_small_tails(inode->i_sb)
2939 && inode->i_size > i_block_size(inode)))
2940 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
2941
2942 ret = journal_begin(&myth, inode->i_sb, 1);
2943 if (ret)
2944 goto journal_error;
2945
2946 reiserfs_update_inode_transaction(inode);
2947 inode->i_size = pos + copied;
2948 /*
2949 * this will just nest into our transaction. It's important
2950 * to use mark_inode_dirty so the inode gets pushed around on
2951 * the dirty lists, and so that O_SYNC works as expected
2952 */
2953 mark_inode_dirty(inode);
2954 reiserfs_update_sd(&myth, inode);
2955 update_sd = 1;
2956 ret = journal_end(&myth);
2957 if (ret)
2958 goto journal_error;
2959 }
2960 if (th) {
2961 if (!locked) {
2962 reiserfs_write_lock(inode->i_sb);
2963 locked = true;
2964 }
2965 if (!update_sd)
2966 mark_inode_dirty(inode);
2967 ret = reiserfs_end_persistent_transaction(th);
2968 if (ret)
2969 goto out;
2970 }
2971
2972 out:
2973 if (locked)
2974 reiserfs_write_unlock(inode->i_sb);
2975 unlock_page(page);
2976 page_cache_release(page);
2977
2978 if (pos + len > inode->i_size)
2979 reiserfs_truncate_failed_write(inode);
2980
2981 return ret == 0 ? copied : ret;
2982
2983 journal_error:
2984 reiserfs_write_unlock(inode->i_sb);
2985 locked = false;
2986 if (th) {
2987 if (!update_sd)
2988 reiserfs_update_sd(th, inode);
2989 ret = reiserfs_end_persistent_transaction(th);
2990 }
2991 goto out;
2992 }
2993
2994 int reiserfs_commit_write(struct file *f, struct page *page,
2995 unsigned from, unsigned to)
2996 {
2997 struct inode *inode = page->mapping->host;
2998 loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + to;
2999 int ret = 0;
3000 int update_sd = 0;
3001 struct reiserfs_transaction_handle *th = NULL;
3002 int depth;
3003
3004 depth = reiserfs_write_unlock_nested(inode->i_sb);
3005 reiserfs_wait_on_write_block(inode->i_sb);
3006 reiserfs_write_lock_nested(inode->i_sb, depth);
3007
3008 if (reiserfs_transaction_running(inode->i_sb)) {
3009 th = current->journal_info;
3010 }
3011 reiserfs_commit_page(inode, page, from, to);
3012
3013 /*
3014 * generic_commit_write does this for us, but does not update the
3015 * transaction tracking stuff when the size changes. So, we have
3016 * to do the i_size updates here.
3017 */
3018 if (pos > inode->i_size) {
3019 struct reiserfs_transaction_handle myth;
3020 /*
3021 * If the file have grown beyond the border where it
3022 * can have a tail, unmark it as needing a tail
3023 * packing
3024 */
3025 if ((have_large_tails(inode->i_sb)
3026 && inode->i_size > i_block_size(inode) * 4)
3027 || (have_small_tails(inode->i_sb)
3028 && inode->i_size > i_block_size(inode)))
3029 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
3030
3031 ret = journal_begin(&myth, inode->i_sb, 1);
3032 if (ret)
3033 goto journal_error;
3034
3035 reiserfs_update_inode_transaction(inode);
3036 inode->i_size = pos;
3037 /*
3038 * this will just nest into our transaction. It's important
3039 * to use mark_inode_dirty so the inode gets pushed around
3040 * on the dirty lists, and so that O_SYNC works as expected
3041 */
3042 mark_inode_dirty(inode);
3043 reiserfs_update_sd(&myth, inode);
3044 update_sd = 1;
3045 ret = journal_end(&myth);
3046 if (ret)
3047 goto journal_error;
3048 }
3049 if (th) {
3050 if (!update_sd)
3051 mark_inode_dirty(inode);
3052 ret = reiserfs_end_persistent_transaction(th);
3053 if (ret)
3054 goto out;
3055 }
3056
3057 out:
3058 return ret;
3059
3060 journal_error:
3061 if (th) {
3062 if (!update_sd)
3063 reiserfs_update_sd(th, inode);
3064 ret = reiserfs_end_persistent_transaction(th);
3065 }
3066
3067 return ret;
3068 }
3069
3070 void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode)
3071 {
3072 if (reiserfs_attrs(inode->i_sb)) {
3073 if (sd_attrs & REISERFS_SYNC_FL)
3074 inode->i_flags |= S_SYNC;
3075 else
3076 inode->i_flags &= ~S_SYNC;
3077 if (sd_attrs & REISERFS_IMMUTABLE_FL)
3078 inode->i_flags |= S_IMMUTABLE;
3079 else
3080 inode->i_flags &= ~S_IMMUTABLE;
3081 if (sd_attrs & REISERFS_APPEND_FL)
3082 inode->i_flags |= S_APPEND;
3083 else
3084 inode->i_flags &= ~S_APPEND;
3085 if (sd_attrs & REISERFS_NOATIME_FL)
3086 inode->i_flags |= S_NOATIME;
3087 else
3088 inode->i_flags &= ~S_NOATIME;
3089 if (sd_attrs & REISERFS_NOTAIL_FL)
3090 REISERFS_I(inode)->i_flags |= i_nopack_mask;
3091 else
3092 REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
3093 }
3094 }
3095
3096 void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs)
3097 {
3098 if (reiserfs_attrs(inode->i_sb)) {
3099 if (inode->i_flags & S_IMMUTABLE)
3100 *sd_attrs |= REISERFS_IMMUTABLE_FL;
3101 else
3102 *sd_attrs &= ~REISERFS_IMMUTABLE_FL;
3103 if (inode->i_flags & S_SYNC)
3104 *sd_attrs |= REISERFS_SYNC_FL;
3105 else
3106 *sd_attrs &= ~REISERFS_SYNC_FL;
3107 if (inode->i_flags & S_NOATIME)
3108 *sd_attrs |= REISERFS_NOATIME_FL;
3109 else
3110 *sd_attrs &= ~REISERFS_NOATIME_FL;
3111 if (REISERFS_I(inode)->i_flags & i_nopack_mask)
3112 *sd_attrs |= REISERFS_NOTAIL_FL;
3113 else
3114 *sd_attrs &= ~REISERFS_NOTAIL_FL;
3115 }
3116 }
3117
3118 /*
3119 * decide if this buffer needs to stay around for data logging or ordered
3120 * write purposes
3121 */
3122 static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
3123 {
3124 int ret = 1;
3125 struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
3126
3127 lock_buffer(bh);
3128 spin_lock(&j->j_dirty_buffers_lock);
3129 if (!buffer_mapped(bh)) {
3130 goto free_jh;
3131 }
3132 /*
3133 * the page is locked, and the only places that log a data buffer
3134 * also lock the page.
3135 */
3136 if (reiserfs_file_data_log(inode)) {
3137 /*
3138 * very conservative, leave the buffer pinned if
3139 * anyone might need it.
3140 */
3141 if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
3142 ret = 0;
3143 }
3144 } else if (buffer_dirty(bh)) {
3145 struct reiserfs_journal_list *jl;
3146 struct reiserfs_jh *jh = bh->b_private;
3147
3148 /*
3149 * why is this safe?
3150 * reiserfs_setattr updates i_size in the on disk
3151 * stat data before allowing vmtruncate to be called.
3152 *
3153 * If buffer was put onto the ordered list for this
3154 * transaction, we know for sure either this transaction
3155 * or an older one already has updated i_size on disk,
3156 * and this ordered data won't be referenced in the file
3157 * if we crash.
3158 *
3159 * if the buffer was put onto the ordered list for an older
3160 * transaction, we need to leave it around
3161 */
3162 if (jh && (jl = jh->jl)
3163 && jl != SB_JOURNAL(inode->i_sb)->j_current_jl)
3164 ret = 0;
3165 }
3166 free_jh:
3167 if (ret && bh->b_private) {
3168 reiserfs_free_jh(bh);
3169 }
3170 spin_unlock(&j->j_dirty_buffers_lock);
3171 unlock_buffer(bh);
3172 return ret;
3173 }
3174
3175 /* clm -- taken from fs/buffer.c:block_invalidate_page */
3176 static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
3177 unsigned int length)
3178 {
3179 struct buffer_head *head, *bh, *next;
3180 struct inode *inode = page->mapping->host;
3181 unsigned int curr_off = 0;
3182 unsigned int stop = offset + length;
3183 int partial_page = (offset || length < PAGE_CACHE_SIZE);
3184 int ret = 1;
3185
3186 BUG_ON(!PageLocked(page));
3187
3188 if (!partial_page)
3189 ClearPageChecked(page);
3190
3191 if (!page_has_buffers(page))
3192 goto out;
3193
3194 head = page_buffers(page);
3195 bh = head;
3196 do {
3197 unsigned int next_off = curr_off + bh->b_size;
3198 next = bh->b_this_page;
3199
3200 if (next_off > stop)
3201 goto out;
3202
3203 /*
3204 * is this block fully invalidated?
3205 */
3206 if (offset <= curr_off) {
3207 if (invalidatepage_can_drop(inode, bh))
3208 reiserfs_unmap_buffer(bh);
3209 else
3210 ret = 0;
3211 }
3212 curr_off = next_off;
3213 bh = next;
3214 } while (bh != head);
3215
3216 /*
3217 * We release buffers only if the entire page is being invalidated.
3218 * The get_block cached value has been unconditionally invalidated,
3219 * so real IO is not possible anymore.
3220 */
3221 if (!partial_page && ret) {
3222 ret = try_to_release_page(page, 0);
3223 /* maybe should BUG_ON(!ret); - neilb */
3224 }
3225 out:
3226 return;
3227 }
3228
3229 static int reiserfs_set_page_dirty(struct page *page)
3230 {
3231 struct inode *inode = page->mapping->host;
3232 if (reiserfs_file_data_log(inode)) {
3233 SetPageChecked(page);
3234 return __set_page_dirty_nobuffers(page);
3235 }
3236 return __set_page_dirty_buffers(page);
3237 }
3238
3239 /*
3240 * Returns 1 if the page's buffers were dropped. The page is locked.
3241 *
3242 * Takes j_dirty_buffers_lock to protect the b_assoc_buffers list_heads
3243 * in the buffers at page_buffers(page).
3244 *
3245 * even in -o notail mode, we can't be sure an old mount without -o notail
3246 * didn't create files with tails.
3247 */
3248 static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
3249 {
3250 struct inode *inode = page->mapping->host;
3251 struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
3252 struct buffer_head *head;
3253 struct buffer_head *bh;
3254 int ret = 1;
3255
3256 WARN_ON(PageChecked(page));
3257 spin_lock(&j->j_dirty_buffers_lock);
3258 head = page_buffers(page);
3259 bh = head;
3260 do {
3261 if (bh->b_private) {
3262 if (!buffer_dirty(bh) && !buffer_locked(bh)) {
3263 reiserfs_free_jh(bh);
3264 } else {
3265 ret = 0;
3266 break;
3267 }
3268 }
3269 bh = bh->b_this_page;
3270 } while (bh != head);
3271 if (ret)
3272 ret = try_to_free_buffers(page);
3273 spin_unlock(&j->j_dirty_buffers_lock);
3274 return ret;
3275 }
3276
3277 /*
3278 * We thank Mingming Cao for helping us understand in great detail what
3279 * to do in this section of the code.
3280 */
3281 static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
3282 struct iov_iter *iter, loff_t offset)
3283 {
3284 struct file *file = iocb->ki_filp;
3285 struct inode *inode = file->f_mapping->host;
3286 size_t count = iov_iter_count(iter);
3287 ssize_t ret;
3288
3289 ret = blockdev_direct_IO(iocb, inode, iter, offset,
3290 reiserfs_get_blocks_direct_io);
3291
3292 /*
3293 * In case of error extending write may have instantiated a few
3294 * blocks outside i_size. Trim these off again.
3295 */
3296 if (unlikely((rw & WRITE) && ret < 0)) {
3297 loff_t isize = i_size_read(inode);
3298 loff_t end = offset + count;
3299
3300 if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
3301 truncate_setsize(inode, isize);
3302 reiserfs_vfs_truncate_file(inode);
3303 }
3304 }
3305
3306 return ret;
3307 }
3308
3309 int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3310 {
3311 struct inode *inode = dentry->d_inode;
3312 unsigned int ia_valid;
3313 int error;
3314
3315 error = inode_change_ok(inode, attr);
3316 if (error)
3317 return error;
3318
3319 /* must be turned off for recursive notify_change calls */
3320 ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
3321
3322 if (is_quota_modification(inode, attr))
3323 dquot_initialize(inode);
3324 reiserfs_write_lock(inode->i_sb);
3325 if (attr->ia_valid & ATTR_SIZE) {
3326 /*
3327 * version 2 items will be caught by the s_maxbytes check
3328 * done for us in vmtruncate
3329 */
3330 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
3331 attr->ia_size > MAX_NON_LFS) {
3332 reiserfs_write_unlock(inode->i_sb);
3333 error = -EFBIG;
3334 goto out;
3335 }
3336
3337 inode_dio_wait(inode);
3338
3339 /* fill in hole pointers in the expanding truncate case. */
3340 if (attr->ia_size > inode->i_size) {
3341 error = generic_cont_expand_simple(inode, attr->ia_size);
3342 if (REISERFS_I(inode)->i_prealloc_count > 0) {
3343 int err;
3344 struct reiserfs_transaction_handle th;
3345 /* we're changing at most 2 bitmaps, inode + super */
3346 err = journal_begin(&th, inode->i_sb, 4);
3347 if (!err) {
3348 reiserfs_discard_prealloc(&th, inode);
3349 err = journal_end(&th);
3350 }
3351 if (err)
3352 error = err;
3353 }
3354 if (error) {
3355 reiserfs_write_unlock(inode->i_sb);
3356 goto out;
3357 }
3358 /*
3359 * file size is changed, ctime and mtime are
3360 * to be updated
3361 */
3362 attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME);
3363 }
3364 }
3365 reiserfs_write_unlock(inode->i_sb);
3366
3367 if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) ||
3368 ((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) &&
3369 (get_inode_sd_version(inode) == STAT_DATA_V1)) {
3370 /* stat data of format v3.5 has 16 bit uid and gid */
3371 error = -EINVAL;
3372 goto out;
3373 }
3374
3375 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
3376 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
3377 struct reiserfs_transaction_handle th;
3378 int jbegin_count =
3379 2 *
3380 (REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb) +
3381 REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)) +
3382 2;
3383
3384 error = reiserfs_chown_xattrs(inode, attr);
3385
3386 if (error)
3387 return error;
3388
3389 /*
3390 * (user+group)*(old+new) structure - we count quota
3391 * info and , inode write (sb, inode)
3392 */
3393 reiserfs_write_lock(inode->i_sb);
3394 error = journal_begin(&th, inode->i_sb, jbegin_count);
3395 reiserfs_write_unlock(inode->i_sb);
3396 if (error)
3397 goto out;
3398 error = dquot_transfer(inode, attr);
3399 reiserfs_write_lock(inode->i_sb);
3400 if (error) {
3401 journal_end(&th);
3402 reiserfs_write_unlock(inode->i_sb);
3403 goto out;
3404 }
3405
3406 /*
3407 * Update corresponding info in inode so that everything
3408 * is in one transaction
3409 */
3410 if (attr->ia_valid & ATTR_UID)
3411 inode->i_uid = attr->ia_uid;
3412 if (attr->ia_valid & ATTR_GID)
3413 inode->i_gid = attr->ia_gid;
3414 mark_inode_dirty(inode);
3415 error = journal_end(&th);
3416 reiserfs_write_unlock(inode->i_sb);
3417 if (error)
3418 goto out;
3419 }
3420
3421 if ((attr->ia_valid & ATTR_SIZE) &&
3422 attr->ia_size != i_size_read(inode)) {
3423 error = inode_newsize_ok(inode, attr->ia_size);
3424 if (!error) {
3425 /*
3426 * Could race against reiserfs_file_release
3427 * if called from NFS, so take tailpack mutex.
3428 */
3429 mutex_lock(&REISERFS_I(inode)->tailpack);
3430 truncate_setsize(inode, attr->ia_size);
3431 reiserfs_truncate_file(inode, 1);
3432 mutex_unlock(&REISERFS_I(inode)->tailpack);
3433 }
3434 }
3435
3436 if (!error) {
3437 setattr_copy(inode, attr);
3438 mark_inode_dirty(inode);
3439 }
3440
3441 if (!error && reiserfs_posixacl(inode->i_sb)) {
3442 if (attr->ia_valid & ATTR_MODE)
3443 error = reiserfs_acl_chmod(inode);
3444 }
3445
3446 out:
3447 return error;
3448 }
3449
3450 const struct address_space_operations reiserfs_address_space_operations = {
3451 .writepage = reiserfs_writepage,
3452 .readpage = reiserfs_readpage,
3453 .readpages = reiserfs_readpages,
3454 .releasepage = reiserfs_releasepage,
3455 .invalidatepage = reiserfs_invalidatepage,
3456 .write_begin = reiserfs_write_begin,
3457 .write_end = reiserfs_write_end,
3458 .bmap = reiserfs_aop_bmap,
3459 .direct_IO = reiserfs_direct_IO,
3460 .set_page_dirty = reiserfs_set_page_dirty,
3461 };