]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/btrfs/tree-log.c
Btrfs: add missing error handling after doing leaf/node binary search
[mirror_ubuntu-hirsute-kernel.git] / fs / btrfs / tree-log.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
11 #include "ctree.h"
12 #include "tree-log.h"
13 #include "disk-io.h"
14 #include "locking.h"
15 #include "print-tree.h"
16 #include "backref.h"
17 #include "compression.h"
18 #include "qgroup.h"
19 #include "inode-map.h"
20
21 /* magic values for the inode_only field in btrfs_log_inode:
22 *
23 * LOG_INODE_ALL means to log everything
24 * LOG_INODE_EXISTS means to log just enough to recreate the inode
25 * during log replay
26 */
27 #define LOG_INODE_ALL 0
28 #define LOG_INODE_EXISTS 1
29 #define LOG_OTHER_INODE 2
30 #define LOG_OTHER_INODE_ALL 3
31
32 /*
33 * directory trouble cases
34 *
35 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
36 * log, we must force a full commit before doing an fsync of the directory
37 * where the unlink was done.
38 * ---> record transid of last unlink/rename per directory
39 *
40 * mkdir foo/some_dir
41 * normal commit
42 * rename foo/some_dir foo2/some_dir
43 * mkdir foo/some_dir
44 * fsync foo/some_dir/some_file
45 *
46 * The fsync above will unlink the original some_dir without recording
47 * it in its new location (foo2). After a crash, some_dir will be gone
48 * unless the fsync of some_file forces a full commit
49 *
50 * 2) we must log any new names for any file or dir that is in the fsync
51 * log. ---> check inode while renaming/linking.
52 *
53 * 2a) we must log any new names for any file or dir during rename
54 * when the directory they are being removed from was logged.
55 * ---> check inode and old parent dir during rename
56 *
57 * 2a is actually the more important variant. With the extra logging
58 * a crash might unlink the old name without recreating the new one
59 *
60 * 3) after a crash, we must go through any directories with a link count
61 * of zero and redo the rm -rf
62 *
63 * mkdir f1/foo
64 * normal commit
65 * rm -rf f1/foo
66 * fsync(f1)
67 *
68 * The directory f1 was fully removed from the FS, but fsync was never
69 * called on f1, only its parent dir. After a crash the rm -rf must
70 * be replayed. This must be able to recurse down the entire
71 * directory tree. The inode link count fixup code takes care of the
72 * ugly details.
73 */
74
75 /*
76 * stages for the tree walking. The first
77 * stage (0) is to only pin down the blocks we find
78 * the second stage (1) is to make sure that all the inodes
79 * we find in the log are created in the subvolume.
80 *
81 * The last stage is to deal with directories and links and extents
82 * and all the other fun semantics
83 */
84 #define LOG_WALK_PIN_ONLY 0
85 #define LOG_WALK_REPLAY_INODES 1
86 #define LOG_WALK_REPLAY_DIR_INDEX 2
87 #define LOG_WALK_REPLAY_ALL 3
88
89 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
90 struct btrfs_root *root, struct btrfs_inode *inode,
91 int inode_only,
92 const loff_t start,
93 const loff_t end,
94 struct btrfs_log_ctx *ctx);
95 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
96 struct btrfs_root *root,
97 struct btrfs_path *path, u64 objectid);
98 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
99 struct btrfs_root *root,
100 struct btrfs_root *log,
101 struct btrfs_path *path,
102 u64 dirid, int del_all);
103
104 /*
105 * tree logging is a special write ahead log used to make sure that
106 * fsyncs and O_SYNCs can happen without doing full tree commits.
107 *
108 * Full tree commits are expensive because they require commonly
109 * modified blocks to be recowed, creating many dirty pages in the
110 * extent tree an 4x-6x higher write load than ext3.
111 *
112 * Instead of doing a tree commit on every fsync, we use the
113 * key ranges and transaction ids to find items for a given file or directory
114 * that have changed in this transaction. Those items are copied into
115 * a special tree (one per subvolume root), that tree is written to disk
116 * and then the fsync is considered complete.
117 *
118 * After a crash, items are copied out of the log-tree back into the
119 * subvolume tree. Any file data extents found are recorded in the extent
120 * allocation tree, and the log-tree freed.
121 *
122 * The log tree is read three times, once to pin down all the extents it is
123 * using in ram and once, once to create all the inodes logged in the tree
124 * and once to do all the other items.
125 */
126
127 /*
128 * start a sub transaction and setup the log tree
129 * this increments the log tree writer count to make the people
130 * syncing the tree wait for us to finish
131 */
132 static int start_log_trans(struct btrfs_trans_handle *trans,
133 struct btrfs_root *root,
134 struct btrfs_log_ctx *ctx)
135 {
136 struct btrfs_fs_info *fs_info = root->fs_info;
137 int ret = 0;
138
139 mutex_lock(&root->log_mutex);
140
141 if (root->log_root) {
142 if (btrfs_need_log_full_commit(fs_info, trans)) {
143 ret = -EAGAIN;
144 goto out;
145 }
146
147 if (!root->log_start_pid) {
148 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
149 root->log_start_pid = current->pid;
150 } else if (root->log_start_pid != current->pid) {
151 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
152 }
153 } else {
154 mutex_lock(&fs_info->tree_log_mutex);
155 if (!fs_info->log_root_tree)
156 ret = btrfs_init_log_root_tree(trans, fs_info);
157 mutex_unlock(&fs_info->tree_log_mutex);
158 if (ret)
159 goto out;
160
161 ret = btrfs_add_log_tree(trans, root);
162 if (ret)
163 goto out;
164
165 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
166 root->log_start_pid = current->pid;
167 }
168
169 atomic_inc(&root->log_batch);
170 atomic_inc(&root->log_writers);
171 if (ctx) {
172 int index = root->log_transid % 2;
173 list_add_tail(&ctx->list, &root->log_ctxs[index]);
174 ctx->log_transid = root->log_transid;
175 }
176
177 out:
178 mutex_unlock(&root->log_mutex);
179 return ret;
180 }
181
182 /*
183 * returns 0 if there was a log transaction running and we were able
184 * to join, or returns -ENOENT if there were not transactions
185 * in progress
186 */
187 static int join_running_log_trans(struct btrfs_root *root)
188 {
189 int ret = -ENOENT;
190
191 smp_mb();
192 if (!root->log_root)
193 return -ENOENT;
194
195 mutex_lock(&root->log_mutex);
196 if (root->log_root) {
197 ret = 0;
198 atomic_inc(&root->log_writers);
199 }
200 mutex_unlock(&root->log_mutex);
201 return ret;
202 }
203
204 /*
205 * This either makes the current running log transaction wait
206 * until you call btrfs_end_log_trans() or it makes any future
207 * log transactions wait until you call btrfs_end_log_trans()
208 */
209 void btrfs_pin_log_trans(struct btrfs_root *root)
210 {
211 mutex_lock(&root->log_mutex);
212 atomic_inc(&root->log_writers);
213 mutex_unlock(&root->log_mutex);
214 }
215
216 /*
217 * indicate we're done making changes to the log tree
218 * and wake up anyone waiting to do a sync
219 */
220 void btrfs_end_log_trans(struct btrfs_root *root)
221 {
222 if (atomic_dec_and_test(&root->log_writers)) {
223 /* atomic_dec_and_test implies a barrier */
224 cond_wake_up_nomb(&root->log_writer_wait);
225 }
226 }
227
228
229 /*
230 * the walk control struct is used to pass state down the chain when
231 * processing the log tree. The stage field tells us which part
232 * of the log tree processing we are currently doing. The others
233 * are state fields used for that specific part
234 */
235 struct walk_control {
236 /* should we free the extent on disk when done? This is used
237 * at transaction commit time while freeing a log tree
238 */
239 int free;
240
241 /* should we write out the extent buffer? This is used
242 * while flushing the log tree to disk during a sync
243 */
244 int write;
245
246 /* should we wait for the extent buffer io to finish? Also used
247 * while flushing the log tree to disk for a sync
248 */
249 int wait;
250
251 /* pin only walk, we record which extents on disk belong to the
252 * log trees
253 */
254 int pin;
255
256 /* what stage of the replay code we're currently in */
257 int stage;
258
259 /*
260 * Ignore any items from the inode currently being processed. Needs
261 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
262 * the LOG_WALK_REPLAY_INODES stage.
263 */
264 bool ignore_cur_inode;
265
266 /* the root we are currently replaying */
267 struct btrfs_root *replay_dest;
268
269 /* the trans handle for the current replay */
270 struct btrfs_trans_handle *trans;
271
272 /* the function that gets used to process blocks we find in the
273 * tree. Note the extent_buffer might not be up to date when it is
274 * passed in, and it must be checked or read if you need the data
275 * inside it
276 */
277 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
278 struct walk_control *wc, u64 gen, int level);
279 };
280
281 /*
282 * process_func used to pin down extents, write them or wait on them
283 */
284 static int process_one_buffer(struct btrfs_root *log,
285 struct extent_buffer *eb,
286 struct walk_control *wc, u64 gen, int level)
287 {
288 struct btrfs_fs_info *fs_info = log->fs_info;
289 int ret = 0;
290
291 /*
292 * If this fs is mixed then we need to be able to process the leaves to
293 * pin down any logged extents, so we have to read the block.
294 */
295 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
296 ret = btrfs_read_buffer(eb, gen, level, NULL);
297 if (ret)
298 return ret;
299 }
300
301 if (wc->pin)
302 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
303 eb->len);
304
305 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
306 if (wc->pin && btrfs_header_level(eb) == 0)
307 ret = btrfs_exclude_logged_extents(fs_info, eb);
308 if (wc->write)
309 btrfs_write_tree_block(eb);
310 if (wc->wait)
311 btrfs_wait_tree_block_writeback(eb);
312 }
313 return ret;
314 }
315
316 /*
317 * Item overwrite used by replay and tree logging. eb, slot and key all refer
318 * to the src data we are copying out.
319 *
320 * root is the tree we are copying into, and path is a scratch
321 * path for use in this function (it should be released on entry and
322 * will be released on exit).
323 *
324 * If the key is already in the destination tree the existing item is
325 * overwritten. If the existing item isn't big enough, it is extended.
326 * If it is too large, it is truncated.
327 *
328 * If the key isn't in the destination yet, a new item is inserted.
329 */
330 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
331 struct btrfs_root *root,
332 struct btrfs_path *path,
333 struct extent_buffer *eb, int slot,
334 struct btrfs_key *key)
335 {
336 struct btrfs_fs_info *fs_info = root->fs_info;
337 int ret;
338 u32 item_size;
339 u64 saved_i_size = 0;
340 int save_old_i_size = 0;
341 unsigned long src_ptr;
342 unsigned long dst_ptr;
343 int overwrite_root = 0;
344 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
345
346 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
347 overwrite_root = 1;
348
349 item_size = btrfs_item_size_nr(eb, slot);
350 src_ptr = btrfs_item_ptr_offset(eb, slot);
351
352 /* look for the key in the destination tree */
353 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
354 if (ret < 0)
355 return ret;
356
357 if (ret == 0) {
358 char *src_copy;
359 char *dst_copy;
360 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
361 path->slots[0]);
362 if (dst_size != item_size)
363 goto insert;
364
365 if (item_size == 0) {
366 btrfs_release_path(path);
367 return 0;
368 }
369 dst_copy = kmalloc(item_size, GFP_NOFS);
370 src_copy = kmalloc(item_size, GFP_NOFS);
371 if (!dst_copy || !src_copy) {
372 btrfs_release_path(path);
373 kfree(dst_copy);
374 kfree(src_copy);
375 return -ENOMEM;
376 }
377
378 read_extent_buffer(eb, src_copy, src_ptr, item_size);
379
380 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
381 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
382 item_size);
383 ret = memcmp(dst_copy, src_copy, item_size);
384
385 kfree(dst_copy);
386 kfree(src_copy);
387 /*
388 * they have the same contents, just return, this saves
389 * us from cowing blocks in the destination tree and doing
390 * extra writes that may not have been done by a previous
391 * sync
392 */
393 if (ret == 0) {
394 btrfs_release_path(path);
395 return 0;
396 }
397
398 /*
399 * We need to load the old nbytes into the inode so when we
400 * replay the extents we've logged we get the right nbytes.
401 */
402 if (inode_item) {
403 struct btrfs_inode_item *item;
404 u64 nbytes;
405 u32 mode;
406
407 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
408 struct btrfs_inode_item);
409 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
410 item = btrfs_item_ptr(eb, slot,
411 struct btrfs_inode_item);
412 btrfs_set_inode_nbytes(eb, item, nbytes);
413
414 /*
415 * If this is a directory we need to reset the i_size to
416 * 0 so that we can set it up properly when replaying
417 * the rest of the items in this log.
418 */
419 mode = btrfs_inode_mode(eb, item);
420 if (S_ISDIR(mode))
421 btrfs_set_inode_size(eb, item, 0);
422 }
423 } else if (inode_item) {
424 struct btrfs_inode_item *item;
425 u32 mode;
426
427 /*
428 * New inode, set nbytes to 0 so that the nbytes comes out
429 * properly when we replay the extents.
430 */
431 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
432 btrfs_set_inode_nbytes(eb, item, 0);
433
434 /*
435 * If this is a directory we need to reset the i_size to 0 so
436 * that we can set it up properly when replaying the rest of
437 * the items in this log.
438 */
439 mode = btrfs_inode_mode(eb, item);
440 if (S_ISDIR(mode))
441 btrfs_set_inode_size(eb, item, 0);
442 }
443 insert:
444 btrfs_release_path(path);
445 /* try to insert the key into the destination tree */
446 path->skip_release_on_error = 1;
447 ret = btrfs_insert_empty_item(trans, root, path,
448 key, item_size);
449 path->skip_release_on_error = 0;
450
451 /* make sure any existing item is the correct size */
452 if (ret == -EEXIST || ret == -EOVERFLOW) {
453 u32 found_size;
454 found_size = btrfs_item_size_nr(path->nodes[0],
455 path->slots[0]);
456 if (found_size > item_size)
457 btrfs_truncate_item(fs_info, path, item_size, 1);
458 else if (found_size < item_size)
459 btrfs_extend_item(fs_info, path,
460 item_size - found_size);
461 } else if (ret) {
462 return ret;
463 }
464 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
465 path->slots[0]);
466
467 /* don't overwrite an existing inode if the generation number
468 * was logged as zero. This is done when the tree logging code
469 * is just logging an inode to make sure it exists after recovery.
470 *
471 * Also, don't overwrite i_size on directories during replay.
472 * log replay inserts and removes directory items based on the
473 * state of the tree found in the subvolume, and i_size is modified
474 * as it goes
475 */
476 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
477 struct btrfs_inode_item *src_item;
478 struct btrfs_inode_item *dst_item;
479
480 src_item = (struct btrfs_inode_item *)src_ptr;
481 dst_item = (struct btrfs_inode_item *)dst_ptr;
482
483 if (btrfs_inode_generation(eb, src_item) == 0) {
484 struct extent_buffer *dst_eb = path->nodes[0];
485 const u64 ino_size = btrfs_inode_size(eb, src_item);
486
487 /*
488 * For regular files an ino_size == 0 is used only when
489 * logging that an inode exists, as part of a directory
490 * fsync, and the inode wasn't fsynced before. In this
491 * case don't set the size of the inode in the fs/subvol
492 * tree, otherwise we would be throwing valid data away.
493 */
494 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
495 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
496 ino_size != 0) {
497 struct btrfs_map_token token;
498
499 btrfs_init_map_token(&token);
500 btrfs_set_token_inode_size(dst_eb, dst_item,
501 ino_size, &token);
502 }
503 goto no_copy;
504 }
505
506 if (overwrite_root &&
507 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
508 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
509 save_old_i_size = 1;
510 saved_i_size = btrfs_inode_size(path->nodes[0],
511 dst_item);
512 }
513 }
514
515 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
516 src_ptr, item_size);
517
518 if (save_old_i_size) {
519 struct btrfs_inode_item *dst_item;
520 dst_item = (struct btrfs_inode_item *)dst_ptr;
521 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
522 }
523
524 /* make sure the generation is filled in */
525 if (key->type == BTRFS_INODE_ITEM_KEY) {
526 struct btrfs_inode_item *dst_item;
527 dst_item = (struct btrfs_inode_item *)dst_ptr;
528 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
529 btrfs_set_inode_generation(path->nodes[0], dst_item,
530 trans->transid);
531 }
532 }
533 no_copy:
534 btrfs_mark_buffer_dirty(path->nodes[0]);
535 btrfs_release_path(path);
536 return 0;
537 }
538
539 /*
540 * simple helper to read an inode off the disk from a given root
541 * This can only be called for subvolume roots and not for the log
542 */
543 static noinline struct inode *read_one_inode(struct btrfs_root *root,
544 u64 objectid)
545 {
546 struct btrfs_key key;
547 struct inode *inode;
548
549 key.objectid = objectid;
550 key.type = BTRFS_INODE_ITEM_KEY;
551 key.offset = 0;
552 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
553 if (IS_ERR(inode))
554 inode = NULL;
555 return inode;
556 }
557
558 /* replays a single extent in 'eb' at 'slot' with 'key' into the
559 * subvolume 'root'. path is released on entry and should be released
560 * on exit.
561 *
562 * extents in the log tree have not been allocated out of the extent
563 * tree yet. So, this completes the allocation, taking a reference
564 * as required if the extent already exists or creating a new extent
565 * if it isn't in the extent allocation tree yet.
566 *
567 * The extent is inserted into the file, dropping any existing extents
568 * from the file that overlap the new one.
569 */
570 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
571 struct btrfs_root *root,
572 struct btrfs_path *path,
573 struct extent_buffer *eb, int slot,
574 struct btrfs_key *key)
575 {
576 struct btrfs_fs_info *fs_info = root->fs_info;
577 int found_type;
578 u64 extent_end;
579 u64 start = key->offset;
580 u64 nbytes = 0;
581 struct btrfs_file_extent_item *item;
582 struct inode *inode = NULL;
583 unsigned long size;
584 int ret = 0;
585
586 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
587 found_type = btrfs_file_extent_type(eb, item);
588
589 if (found_type == BTRFS_FILE_EXTENT_REG ||
590 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
591 nbytes = btrfs_file_extent_num_bytes(eb, item);
592 extent_end = start + nbytes;
593
594 /*
595 * We don't add to the inodes nbytes if we are prealloc or a
596 * hole.
597 */
598 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
599 nbytes = 0;
600 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
601 size = btrfs_file_extent_ram_bytes(eb, item);
602 nbytes = btrfs_file_extent_ram_bytes(eb, item);
603 extent_end = ALIGN(start + size,
604 fs_info->sectorsize);
605 } else {
606 ret = 0;
607 goto out;
608 }
609
610 inode = read_one_inode(root, key->objectid);
611 if (!inode) {
612 ret = -EIO;
613 goto out;
614 }
615
616 /*
617 * first check to see if we already have this extent in the
618 * file. This must be done before the btrfs_drop_extents run
619 * so we don't try to drop this extent.
620 */
621 ret = btrfs_lookup_file_extent(trans, root, path,
622 btrfs_ino(BTRFS_I(inode)), start, 0);
623
624 if (ret == 0 &&
625 (found_type == BTRFS_FILE_EXTENT_REG ||
626 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
627 struct btrfs_file_extent_item cmp1;
628 struct btrfs_file_extent_item cmp2;
629 struct btrfs_file_extent_item *existing;
630 struct extent_buffer *leaf;
631
632 leaf = path->nodes[0];
633 existing = btrfs_item_ptr(leaf, path->slots[0],
634 struct btrfs_file_extent_item);
635
636 read_extent_buffer(eb, &cmp1, (unsigned long)item,
637 sizeof(cmp1));
638 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
639 sizeof(cmp2));
640
641 /*
642 * we already have a pointer to this exact extent,
643 * we don't have to do anything
644 */
645 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
646 btrfs_release_path(path);
647 goto out;
648 }
649 }
650 btrfs_release_path(path);
651
652 /* drop any overlapping extents */
653 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
654 if (ret)
655 goto out;
656
657 if (found_type == BTRFS_FILE_EXTENT_REG ||
658 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
659 u64 offset;
660 unsigned long dest_offset;
661 struct btrfs_key ins;
662
663 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
664 btrfs_fs_incompat(fs_info, NO_HOLES))
665 goto update_inode;
666
667 ret = btrfs_insert_empty_item(trans, root, path, key,
668 sizeof(*item));
669 if (ret)
670 goto out;
671 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
672 path->slots[0]);
673 copy_extent_buffer(path->nodes[0], eb, dest_offset,
674 (unsigned long)item, sizeof(*item));
675
676 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
677 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
678 ins.type = BTRFS_EXTENT_ITEM_KEY;
679 offset = key->offset - btrfs_file_extent_offset(eb, item);
680
681 /*
682 * Manually record dirty extent, as here we did a shallow
683 * file extent item copy and skip normal backref update,
684 * but modifying extent tree all by ourselves.
685 * So need to manually record dirty extent for qgroup,
686 * as the owner of the file extent changed from log tree
687 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
688 */
689 ret = btrfs_qgroup_trace_extent(trans,
690 btrfs_file_extent_disk_bytenr(eb, item),
691 btrfs_file_extent_disk_num_bytes(eb, item),
692 GFP_NOFS);
693 if (ret < 0)
694 goto out;
695
696 if (ins.objectid > 0) {
697 u64 csum_start;
698 u64 csum_end;
699 LIST_HEAD(ordered_sums);
700 /*
701 * is this extent already allocated in the extent
702 * allocation tree? If so, just add a reference
703 */
704 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
705 ins.offset);
706 if (ret == 0) {
707 ret = btrfs_inc_extent_ref(trans, root,
708 ins.objectid, ins.offset,
709 0, root->root_key.objectid,
710 key->objectid, offset);
711 if (ret)
712 goto out;
713 } else {
714 /*
715 * insert the extent pointer in the extent
716 * allocation tree
717 */
718 ret = btrfs_alloc_logged_file_extent(trans,
719 root->root_key.objectid,
720 key->objectid, offset, &ins);
721 if (ret)
722 goto out;
723 }
724 btrfs_release_path(path);
725
726 if (btrfs_file_extent_compression(eb, item)) {
727 csum_start = ins.objectid;
728 csum_end = csum_start + ins.offset;
729 } else {
730 csum_start = ins.objectid +
731 btrfs_file_extent_offset(eb, item);
732 csum_end = csum_start +
733 btrfs_file_extent_num_bytes(eb, item);
734 }
735
736 ret = btrfs_lookup_csums_range(root->log_root,
737 csum_start, csum_end - 1,
738 &ordered_sums, 0);
739 if (ret)
740 goto out;
741 /*
742 * Now delete all existing cums in the csum root that
743 * cover our range. We do this because we can have an
744 * extent that is completely referenced by one file
745 * extent item and partially referenced by another
746 * file extent item (like after using the clone or
747 * extent_same ioctls). In this case if we end up doing
748 * the replay of the one that partially references the
749 * extent first, and we do not do the csum deletion
750 * below, we can get 2 csum items in the csum tree that
751 * overlap each other. For example, imagine our log has
752 * the two following file extent items:
753 *
754 * key (257 EXTENT_DATA 409600)
755 * extent data disk byte 12845056 nr 102400
756 * extent data offset 20480 nr 20480 ram 102400
757 *
758 * key (257 EXTENT_DATA 819200)
759 * extent data disk byte 12845056 nr 102400
760 * extent data offset 0 nr 102400 ram 102400
761 *
762 * Where the second one fully references the 100K extent
763 * that starts at disk byte 12845056, and the log tree
764 * has a single csum item that covers the entire range
765 * of the extent:
766 *
767 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
768 *
769 * After the first file extent item is replayed, the
770 * csum tree gets the following csum item:
771 *
772 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
773 *
774 * Which covers the 20K sub-range starting at offset 20K
775 * of our extent. Now when we replay the second file
776 * extent item, if we do not delete existing csum items
777 * that cover any of its blocks, we end up getting two
778 * csum items in our csum tree that overlap each other:
779 *
780 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
781 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
782 *
783 * Which is a problem, because after this anyone trying
784 * to lookup up for the checksum of any block of our
785 * extent starting at an offset of 40K or higher, will
786 * end up looking at the second csum item only, which
787 * does not contain the checksum for any block starting
788 * at offset 40K or higher of our extent.
789 */
790 while (!list_empty(&ordered_sums)) {
791 struct btrfs_ordered_sum *sums;
792 sums = list_entry(ordered_sums.next,
793 struct btrfs_ordered_sum,
794 list);
795 if (!ret)
796 ret = btrfs_del_csums(trans, fs_info,
797 sums->bytenr,
798 sums->len);
799 if (!ret)
800 ret = btrfs_csum_file_blocks(trans,
801 fs_info->csum_root, sums);
802 list_del(&sums->list);
803 kfree(sums);
804 }
805 if (ret)
806 goto out;
807 } else {
808 btrfs_release_path(path);
809 }
810 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
811 /* inline extents are easy, we just overwrite them */
812 ret = overwrite_item(trans, root, path, eb, slot, key);
813 if (ret)
814 goto out;
815 }
816
817 inode_add_bytes(inode, nbytes);
818 update_inode:
819 ret = btrfs_update_inode(trans, root, inode);
820 out:
821 if (inode)
822 iput(inode);
823 return ret;
824 }
825
826 /*
827 * when cleaning up conflicts between the directory names in the
828 * subvolume, directory names in the log and directory names in the
829 * inode back references, we may have to unlink inodes from directories.
830 *
831 * This is a helper function to do the unlink of a specific directory
832 * item
833 */
834 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
835 struct btrfs_root *root,
836 struct btrfs_path *path,
837 struct btrfs_inode *dir,
838 struct btrfs_dir_item *di)
839 {
840 struct inode *inode;
841 char *name;
842 int name_len;
843 struct extent_buffer *leaf;
844 struct btrfs_key location;
845 int ret;
846
847 leaf = path->nodes[0];
848
849 btrfs_dir_item_key_to_cpu(leaf, di, &location);
850 name_len = btrfs_dir_name_len(leaf, di);
851 name = kmalloc(name_len, GFP_NOFS);
852 if (!name)
853 return -ENOMEM;
854
855 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
856 btrfs_release_path(path);
857
858 inode = read_one_inode(root, location.objectid);
859 if (!inode) {
860 ret = -EIO;
861 goto out;
862 }
863
864 ret = link_to_fixup_dir(trans, root, path, location.objectid);
865 if (ret)
866 goto out;
867
868 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
869 name_len);
870 if (ret)
871 goto out;
872 else
873 ret = btrfs_run_delayed_items(trans);
874 out:
875 kfree(name);
876 iput(inode);
877 return ret;
878 }
879
880 /*
881 * helper function to see if a given name and sequence number found
882 * in an inode back reference are already in a directory and correctly
883 * point to this inode
884 */
885 static noinline int inode_in_dir(struct btrfs_root *root,
886 struct btrfs_path *path,
887 u64 dirid, u64 objectid, u64 index,
888 const char *name, int name_len)
889 {
890 struct btrfs_dir_item *di;
891 struct btrfs_key location;
892 int match = 0;
893
894 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
895 index, name, name_len, 0);
896 if (di && !IS_ERR(di)) {
897 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
898 if (location.objectid != objectid)
899 goto out;
900 } else
901 goto out;
902 btrfs_release_path(path);
903
904 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
905 if (di && !IS_ERR(di)) {
906 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
907 if (location.objectid != objectid)
908 goto out;
909 } else
910 goto out;
911 match = 1;
912 out:
913 btrfs_release_path(path);
914 return match;
915 }
916
917 /*
918 * helper function to check a log tree for a named back reference in
919 * an inode. This is used to decide if a back reference that is
920 * found in the subvolume conflicts with what we find in the log.
921 *
922 * inode backreferences may have multiple refs in a single item,
923 * during replay we process one reference at a time, and we don't
924 * want to delete valid links to a file from the subvolume if that
925 * link is also in the log.
926 */
927 static noinline int backref_in_log(struct btrfs_root *log,
928 struct btrfs_key *key,
929 u64 ref_objectid,
930 const char *name, int namelen)
931 {
932 struct btrfs_path *path;
933 struct btrfs_inode_ref *ref;
934 unsigned long ptr;
935 unsigned long ptr_end;
936 unsigned long name_ptr;
937 int found_name_len;
938 int item_size;
939 int ret;
940 int match = 0;
941
942 path = btrfs_alloc_path();
943 if (!path)
944 return -ENOMEM;
945
946 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
947 if (ret != 0)
948 goto out;
949
950 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
951
952 if (key->type == BTRFS_INODE_EXTREF_KEY) {
953 if (btrfs_find_name_in_ext_backref(path->nodes[0],
954 path->slots[0],
955 ref_objectid,
956 name, namelen, NULL))
957 match = 1;
958
959 goto out;
960 }
961
962 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
963 ptr_end = ptr + item_size;
964 while (ptr < ptr_end) {
965 ref = (struct btrfs_inode_ref *)ptr;
966 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
967 if (found_name_len == namelen) {
968 name_ptr = (unsigned long)(ref + 1);
969 ret = memcmp_extent_buffer(path->nodes[0], name,
970 name_ptr, namelen);
971 if (ret == 0) {
972 match = 1;
973 goto out;
974 }
975 }
976 ptr = (unsigned long)(ref + 1) + found_name_len;
977 }
978 out:
979 btrfs_free_path(path);
980 return match;
981 }
982
983 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
984 struct btrfs_root *root,
985 struct btrfs_path *path,
986 struct btrfs_root *log_root,
987 struct btrfs_inode *dir,
988 struct btrfs_inode *inode,
989 u64 inode_objectid, u64 parent_objectid,
990 u64 ref_index, char *name, int namelen,
991 int *search_done)
992 {
993 int ret;
994 char *victim_name;
995 int victim_name_len;
996 struct extent_buffer *leaf;
997 struct btrfs_dir_item *di;
998 struct btrfs_key search_key;
999 struct btrfs_inode_extref *extref;
1000
1001 again:
1002 /* Search old style refs */
1003 search_key.objectid = inode_objectid;
1004 search_key.type = BTRFS_INODE_REF_KEY;
1005 search_key.offset = parent_objectid;
1006 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1007 if (ret == 0) {
1008 struct btrfs_inode_ref *victim_ref;
1009 unsigned long ptr;
1010 unsigned long ptr_end;
1011
1012 leaf = path->nodes[0];
1013
1014 /* are we trying to overwrite a back ref for the root directory
1015 * if so, just jump out, we're done
1016 */
1017 if (search_key.objectid == search_key.offset)
1018 return 1;
1019
1020 /* check all the names in this back reference to see
1021 * if they are in the log. if so, we allow them to stay
1022 * otherwise they must be unlinked as a conflict
1023 */
1024 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1025 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1026 while (ptr < ptr_end) {
1027 victim_ref = (struct btrfs_inode_ref *)ptr;
1028 victim_name_len = btrfs_inode_ref_name_len(leaf,
1029 victim_ref);
1030 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1031 if (!victim_name)
1032 return -ENOMEM;
1033
1034 read_extent_buffer(leaf, victim_name,
1035 (unsigned long)(victim_ref + 1),
1036 victim_name_len);
1037
1038 if (!backref_in_log(log_root, &search_key,
1039 parent_objectid,
1040 victim_name,
1041 victim_name_len)) {
1042 inc_nlink(&inode->vfs_inode);
1043 btrfs_release_path(path);
1044
1045 ret = btrfs_unlink_inode(trans, root, dir, inode,
1046 victim_name, victim_name_len);
1047 kfree(victim_name);
1048 if (ret)
1049 return ret;
1050 ret = btrfs_run_delayed_items(trans);
1051 if (ret)
1052 return ret;
1053 *search_done = 1;
1054 goto again;
1055 }
1056 kfree(victim_name);
1057
1058 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1059 }
1060
1061 /*
1062 * NOTE: we have searched root tree and checked the
1063 * corresponding ref, it does not need to check again.
1064 */
1065 *search_done = 1;
1066 }
1067 btrfs_release_path(path);
1068
1069 /* Same search but for extended refs */
1070 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1071 inode_objectid, parent_objectid, 0,
1072 0);
1073 if (!IS_ERR_OR_NULL(extref)) {
1074 u32 item_size;
1075 u32 cur_offset = 0;
1076 unsigned long base;
1077 struct inode *victim_parent;
1078
1079 leaf = path->nodes[0];
1080
1081 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1082 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1083
1084 while (cur_offset < item_size) {
1085 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1086
1087 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1088
1089 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1090 goto next;
1091
1092 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1093 if (!victim_name)
1094 return -ENOMEM;
1095 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1096 victim_name_len);
1097
1098 search_key.objectid = inode_objectid;
1099 search_key.type = BTRFS_INODE_EXTREF_KEY;
1100 search_key.offset = btrfs_extref_hash(parent_objectid,
1101 victim_name,
1102 victim_name_len);
1103 ret = 0;
1104 if (!backref_in_log(log_root, &search_key,
1105 parent_objectid, victim_name,
1106 victim_name_len)) {
1107 ret = -ENOENT;
1108 victim_parent = read_one_inode(root,
1109 parent_objectid);
1110 if (victim_parent) {
1111 inc_nlink(&inode->vfs_inode);
1112 btrfs_release_path(path);
1113
1114 ret = btrfs_unlink_inode(trans, root,
1115 BTRFS_I(victim_parent),
1116 inode,
1117 victim_name,
1118 victim_name_len);
1119 if (!ret)
1120 ret = btrfs_run_delayed_items(
1121 trans);
1122 }
1123 iput(victim_parent);
1124 kfree(victim_name);
1125 if (ret)
1126 return ret;
1127 *search_done = 1;
1128 goto again;
1129 }
1130 kfree(victim_name);
1131 next:
1132 cur_offset += victim_name_len + sizeof(*extref);
1133 }
1134 *search_done = 1;
1135 }
1136 btrfs_release_path(path);
1137
1138 /* look for a conflicting sequence number */
1139 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1140 ref_index, name, namelen, 0);
1141 if (di && !IS_ERR(di)) {
1142 ret = drop_one_dir_item(trans, root, path, dir, di);
1143 if (ret)
1144 return ret;
1145 }
1146 btrfs_release_path(path);
1147
1148 /* look for a conflicting name */
1149 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1150 name, namelen, 0);
1151 if (di && !IS_ERR(di)) {
1152 ret = drop_one_dir_item(trans, root, path, dir, di);
1153 if (ret)
1154 return ret;
1155 }
1156 btrfs_release_path(path);
1157
1158 return 0;
1159 }
1160
1161 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1162 u32 *namelen, char **name, u64 *index,
1163 u64 *parent_objectid)
1164 {
1165 struct btrfs_inode_extref *extref;
1166
1167 extref = (struct btrfs_inode_extref *)ref_ptr;
1168
1169 *namelen = btrfs_inode_extref_name_len(eb, extref);
1170 *name = kmalloc(*namelen, GFP_NOFS);
1171 if (*name == NULL)
1172 return -ENOMEM;
1173
1174 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1175 *namelen);
1176
1177 if (index)
1178 *index = btrfs_inode_extref_index(eb, extref);
1179 if (parent_objectid)
1180 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1181
1182 return 0;
1183 }
1184
1185 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1186 u32 *namelen, char **name, u64 *index)
1187 {
1188 struct btrfs_inode_ref *ref;
1189
1190 ref = (struct btrfs_inode_ref *)ref_ptr;
1191
1192 *namelen = btrfs_inode_ref_name_len(eb, ref);
1193 *name = kmalloc(*namelen, GFP_NOFS);
1194 if (*name == NULL)
1195 return -ENOMEM;
1196
1197 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1198
1199 if (index)
1200 *index = btrfs_inode_ref_index(eb, ref);
1201
1202 return 0;
1203 }
1204
1205 /*
1206 * Take an inode reference item from the log tree and iterate all names from the
1207 * inode reference item in the subvolume tree with the same key (if it exists).
1208 * For any name that is not in the inode reference item from the log tree, do a
1209 * proper unlink of that name (that is, remove its entry from the inode
1210 * reference item and both dir index keys).
1211 */
1212 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1213 struct btrfs_root *root,
1214 struct btrfs_path *path,
1215 struct btrfs_inode *inode,
1216 struct extent_buffer *log_eb,
1217 int log_slot,
1218 struct btrfs_key *key)
1219 {
1220 int ret;
1221 unsigned long ref_ptr;
1222 unsigned long ref_end;
1223 struct extent_buffer *eb;
1224
1225 again:
1226 btrfs_release_path(path);
1227 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1228 if (ret > 0) {
1229 ret = 0;
1230 goto out;
1231 }
1232 if (ret < 0)
1233 goto out;
1234
1235 eb = path->nodes[0];
1236 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1237 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1238 while (ref_ptr < ref_end) {
1239 char *name = NULL;
1240 int namelen;
1241 u64 parent_id;
1242
1243 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1244 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1245 NULL, &parent_id);
1246 } else {
1247 parent_id = key->offset;
1248 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1249 NULL);
1250 }
1251 if (ret)
1252 goto out;
1253
1254 if (key->type == BTRFS_INODE_EXTREF_KEY)
1255 ret = btrfs_find_name_in_ext_backref(log_eb, log_slot,
1256 parent_id, name,
1257 namelen, NULL);
1258 else
1259 ret = btrfs_find_name_in_backref(log_eb, log_slot, name,
1260 namelen, NULL);
1261
1262 if (!ret) {
1263 struct inode *dir;
1264
1265 btrfs_release_path(path);
1266 dir = read_one_inode(root, parent_id);
1267 if (!dir) {
1268 ret = -ENOENT;
1269 kfree(name);
1270 goto out;
1271 }
1272 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
1273 inode, name, namelen);
1274 kfree(name);
1275 iput(dir);
1276 if (ret)
1277 goto out;
1278 goto again;
1279 }
1280
1281 kfree(name);
1282 ref_ptr += namelen;
1283 if (key->type == BTRFS_INODE_EXTREF_KEY)
1284 ref_ptr += sizeof(struct btrfs_inode_extref);
1285 else
1286 ref_ptr += sizeof(struct btrfs_inode_ref);
1287 }
1288 ret = 0;
1289 out:
1290 btrfs_release_path(path);
1291 return ret;
1292 }
1293
1294 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1295 const u8 ref_type, const char *name,
1296 const int namelen)
1297 {
1298 struct btrfs_key key;
1299 struct btrfs_path *path;
1300 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1301 int ret;
1302
1303 path = btrfs_alloc_path();
1304 if (!path)
1305 return -ENOMEM;
1306
1307 key.objectid = btrfs_ino(BTRFS_I(inode));
1308 key.type = ref_type;
1309 if (key.type == BTRFS_INODE_REF_KEY)
1310 key.offset = parent_id;
1311 else
1312 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1313
1314 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1315 if (ret < 0)
1316 goto out;
1317 if (ret > 0) {
1318 ret = 0;
1319 goto out;
1320 }
1321 if (key.type == BTRFS_INODE_EXTREF_KEY)
1322 ret = btrfs_find_name_in_ext_backref(path->nodes[0],
1323 path->slots[0], parent_id,
1324 name, namelen, NULL);
1325 else
1326 ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1327 name, namelen, NULL);
1328
1329 out:
1330 btrfs_free_path(path);
1331 return ret;
1332 }
1333
1334 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1335 struct inode *dir, struct inode *inode, const char *name,
1336 int namelen, u64 ref_index)
1337 {
1338 struct btrfs_dir_item *dir_item;
1339 struct btrfs_key key;
1340 struct btrfs_path *path;
1341 struct inode *other_inode = NULL;
1342 int ret;
1343
1344 path = btrfs_alloc_path();
1345 if (!path)
1346 return -ENOMEM;
1347
1348 dir_item = btrfs_lookup_dir_item(NULL, root, path,
1349 btrfs_ino(BTRFS_I(dir)),
1350 name, namelen, 0);
1351 if (!dir_item) {
1352 btrfs_release_path(path);
1353 goto add_link;
1354 } else if (IS_ERR(dir_item)) {
1355 ret = PTR_ERR(dir_item);
1356 goto out;
1357 }
1358
1359 /*
1360 * Our inode's dentry collides with the dentry of another inode which is
1361 * in the log but not yet processed since it has a higher inode number.
1362 * So delete that other dentry.
1363 */
1364 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1365 btrfs_release_path(path);
1366 other_inode = read_one_inode(root, key.objectid);
1367 if (!other_inode) {
1368 ret = -ENOENT;
1369 goto out;
1370 }
1371 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
1372 name, namelen);
1373 if (ret)
1374 goto out;
1375 /*
1376 * If we dropped the link count to 0, bump it so that later the iput()
1377 * on the inode will not free it. We will fixup the link count later.
1378 */
1379 if (other_inode->i_nlink == 0)
1380 inc_nlink(other_inode);
1381
1382 ret = btrfs_run_delayed_items(trans);
1383 if (ret)
1384 goto out;
1385 add_link:
1386 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1387 name, namelen, 0, ref_index);
1388 out:
1389 iput(other_inode);
1390 btrfs_free_path(path);
1391
1392 return ret;
1393 }
1394
1395 /*
1396 * replay one inode back reference item found in the log tree.
1397 * eb, slot and key refer to the buffer and key found in the log tree.
1398 * root is the destination we are replaying into, and path is for temp
1399 * use by this function. (it should be released on return).
1400 */
1401 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1402 struct btrfs_root *root,
1403 struct btrfs_root *log,
1404 struct btrfs_path *path,
1405 struct extent_buffer *eb, int slot,
1406 struct btrfs_key *key)
1407 {
1408 struct inode *dir = NULL;
1409 struct inode *inode = NULL;
1410 unsigned long ref_ptr;
1411 unsigned long ref_end;
1412 char *name = NULL;
1413 int namelen;
1414 int ret;
1415 int search_done = 0;
1416 int log_ref_ver = 0;
1417 u64 parent_objectid;
1418 u64 inode_objectid;
1419 u64 ref_index = 0;
1420 int ref_struct_size;
1421
1422 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1423 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1424
1425 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1426 struct btrfs_inode_extref *r;
1427
1428 ref_struct_size = sizeof(struct btrfs_inode_extref);
1429 log_ref_ver = 1;
1430 r = (struct btrfs_inode_extref *)ref_ptr;
1431 parent_objectid = btrfs_inode_extref_parent(eb, r);
1432 } else {
1433 ref_struct_size = sizeof(struct btrfs_inode_ref);
1434 parent_objectid = key->offset;
1435 }
1436 inode_objectid = key->objectid;
1437
1438 /*
1439 * it is possible that we didn't log all the parent directories
1440 * for a given inode. If we don't find the dir, just don't
1441 * copy the back ref in. The link count fixup code will take
1442 * care of the rest
1443 */
1444 dir = read_one_inode(root, parent_objectid);
1445 if (!dir) {
1446 ret = -ENOENT;
1447 goto out;
1448 }
1449
1450 inode = read_one_inode(root, inode_objectid);
1451 if (!inode) {
1452 ret = -EIO;
1453 goto out;
1454 }
1455
1456 while (ref_ptr < ref_end) {
1457 if (log_ref_ver) {
1458 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1459 &ref_index, &parent_objectid);
1460 /*
1461 * parent object can change from one array
1462 * item to another.
1463 */
1464 if (!dir)
1465 dir = read_one_inode(root, parent_objectid);
1466 if (!dir) {
1467 ret = -ENOENT;
1468 goto out;
1469 }
1470 } else {
1471 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1472 &ref_index);
1473 }
1474 if (ret)
1475 goto out;
1476
1477 /* if we already have a perfect match, we're done */
1478 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1479 btrfs_ino(BTRFS_I(inode)), ref_index,
1480 name, namelen)) {
1481 /*
1482 * look for a conflicting back reference in the
1483 * metadata. if we find one we have to unlink that name
1484 * of the file before we add our new link. Later on, we
1485 * overwrite any existing back reference, and we don't
1486 * want to create dangling pointers in the directory.
1487 */
1488
1489 if (!search_done) {
1490 ret = __add_inode_ref(trans, root, path, log,
1491 BTRFS_I(dir),
1492 BTRFS_I(inode),
1493 inode_objectid,
1494 parent_objectid,
1495 ref_index, name, namelen,
1496 &search_done);
1497 if (ret) {
1498 if (ret == 1)
1499 ret = 0;
1500 goto out;
1501 }
1502 }
1503
1504 /*
1505 * If a reference item already exists for this inode
1506 * with the same parent and name, but different index,
1507 * drop it and the corresponding directory index entries
1508 * from the parent before adding the new reference item
1509 * and dir index entries, otherwise we would fail with
1510 * -EEXIST returned from btrfs_add_link() below.
1511 */
1512 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1513 name, namelen);
1514 if (ret > 0) {
1515 ret = btrfs_unlink_inode(trans, root,
1516 BTRFS_I(dir),
1517 BTRFS_I(inode),
1518 name, namelen);
1519 /*
1520 * If we dropped the link count to 0, bump it so
1521 * that later the iput() on the inode will not
1522 * free it. We will fixup the link count later.
1523 */
1524 if (!ret && inode->i_nlink == 0)
1525 inc_nlink(inode);
1526 }
1527 if (ret < 0)
1528 goto out;
1529
1530 /* insert our name */
1531 ret = add_link(trans, root, dir, inode, name, namelen,
1532 ref_index);
1533 if (ret)
1534 goto out;
1535
1536 btrfs_update_inode(trans, root, inode);
1537 }
1538
1539 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1540 kfree(name);
1541 name = NULL;
1542 if (log_ref_ver) {
1543 iput(dir);
1544 dir = NULL;
1545 }
1546 }
1547
1548 /*
1549 * Before we overwrite the inode reference item in the subvolume tree
1550 * with the item from the log tree, we must unlink all names from the
1551 * parent directory that are in the subvolume's tree inode reference
1552 * item, otherwise we end up with an inconsistent subvolume tree where
1553 * dir index entries exist for a name but there is no inode reference
1554 * item with the same name.
1555 */
1556 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1557 key);
1558 if (ret)
1559 goto out;
1560
1561 /* finally write the back reference in the inode */
1562 ret = overwrite_item(trans, root, path, eb, slot, key);
1563 out:
1564 btrfs_release_path(path);
1565 kfree(name);
1566 iput(dir);
1567 iput(inode);
1568 return ret;
1569 }
1570
1571 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1572 struct btrfs_root *root, u64 ino)
1573 {
1574 int ret;
1575
1576 ret = btrfs_insert_orphan_item(trans, root, ino);
1577 if (ret == -EEXIST)
1578 ret = 0;
1579
1580 return ret;
1581 }
1582
1583 static int count_inode_extrefs(struct btrfs_root *root,
1584 struct btrfs_inode *inode, struct btrfs_path *path)
1585 {
1586 int ret = 0;
1587 int name_len;
1588 unsigned int nlink = 0;
1589 u32 item_size;
1590 u32 cur_offset = 0;
1591 u64 inode_objectid = btrfs_ino(inode);
1592 u64 offset = 0;
1593 unsigned long ptr;
1594 struct btrfs_inode_extref *extref;
1595 struct extent_buffer *leaf;
1596
1597 while (1) {
1598 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1599 &extref, &offset);
1600 if (ret)
1601 break;
1602
1603 leaf = path->nodes[0];
1604 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1605 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1606 cur_offset = 0;
1607
1608 while (cur_offset < item_size) {
1609 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1610 name_len = btrfs_inode_extref_name_len(leaf, extref);
1611
1612 nlink++;
1613
1614 cur_offset += name_len + sizeof(*extref);
1615 }
1616
1617 offset++;
1618 btrfs_release_path(path);
1619 }
1620 btrfs_release_path(path);
1621
1622 if (ret < 0 && ret != -ENOENT)
1623 return ret;
1624 return nlink;
1625 }
1626
1627 static int count_inode_refs(struct btrfs_root *root,
1628 struct btrfs_inode *inode, struct btrfs_path *path)
1629 {
1630 int ret;
1631 struct btrfs_key key;
1632 unsigned int nlink = 0;
1633 unsigned long ptr;
1634 unsigned long ptr_end;
1635 int name_len;
1636 u64 ino = btrfs_ino(inode);
1637
1638 key.objectid = ino;
1639 key.type = BTRFS_INODE_REF_KEY;
1640 key.offset = (u64)-1;
1641
1642 while (1) {
1643 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1644 if (ret < 0)
1645 break;
1646 if (ret > 0) {
1647 if (path->slots[0] == 0)
1648 break;
1649 path->slots[0]--;
1650 }
1651 process_slot:
1652 btrfs_item_key_to_cpu(path->nodes[0], &key,
1653 path->slots[0]);
1654 if (key.objectid != ino ||
1655 key.type != BTRFS_INODE_REF_KEY)
1656 break;
1657 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1658 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1659 path->slots[0]);
1660 while (ptr < ptr_end) {
1661 struct btrfs_inode_ref *ref;
1662
1663 ref = (struct btrfs_inode_ref *)ptr;
1664 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1665 ref);
1666 ptr = (unsigned long)(ref + 1) + name_len;
1667 nlink++;
1668 }
1669
1670 if (key.offset == 0)
1671 break;
1672 if (path->slots[0] > 0) {
1673 path->slots[0]--;
1674 goto process_slot;
1675 }
1676 key.offset--;
1677 btrfs_release_path(path);
1678 }
1679 btrfs_release_path(path);
1680
1681 return nlink;
1682 }
1683
1684 /*
1685 * There are a few corners where the link count of the file can't
1686 * be properly maintained during replay. So, instead of adding
1687 * lots of complexity to the log code, we just scan the backrefs
1688 * for any file that has been through replay.
1689 *
1690 * The scan will update the link count on the inode to reflect the
1691 * number of back refs found. If it goes down to zero, the iput
1692 * will free the inode.
1693 */
1694 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1695 struct btrfs_root *root,
1696 struct inode *inode)
1697 {
1698 struct btrfs_path *path;
1699 int ret;
1700 u64 nlink = 0;
1701 u64 ino = btrfs_ino(BTRFS_I(inode));
1702
1703 path = btrfs_alloc_path();
1704 if (!path)
1705 return -ENOMEM;
1706
1707 ret = count_inode_refs(root, BTRFS_I(inode), path);
1708 if (ret < 0)
1709 goto out;
1710
1711 nlink = ret;
1712
1713 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1714 if (ret < 0)
1715 goto out;
1716
1717 nlink += ret;
1718
1719 ret = 0;
1720
1721 if (nlink != inode->i_nlink) {
1722 set_nlink(inode, nlink);
1723 btrfs_update_inode(trans, root, inode);
1724 }
1725 BTRFS_I(inode)->index_cnt = (u64)-1;
1726
1727 if (inode->i_nlink == 0) {
1728 if (S_ISDIR(inode->i_mode)) {
1729 ret = replay_dir_deletes(trans, root, NULL, path,
1730 ino, 1);
1731 if (ret)
1732 goto out;
1733 }
1734 ret = insert_orphan_item(trans, root, ino);
1735 }
1736
1737 out:
1738 btrfs_free_path(path);
1739 return ret;
1740 }
1741
1742 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1743 struct btrfs_root *root,
1744 struct btrfs_path *path)
1745 {
1746 int ret;
1747 struct btrfs_key key;
1748 struct inode *inode;
1749
1750 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1751 key.type = BTRFS_ORPHAN_ITEM_KEY;
1752 key.offset = (u64)-1;
1753 while (1) {
1754 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1755 if (ret < 0)
1756 break;
1757
1758 if (ret == 1) {
1759 if (path->slots[0] == 0)
1760 break;
1761 path->slots[0]--;
1762 }
1763
1764 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1765 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1766 key.type != BTRFS_ORPHAN_ITEM_KEY)
1767 break;
1768
1769 ret = btrfs_del_item(trans, root, path);
1770 if (ret)
1771 goto out;
1772
1773 btrfs_release_path(path);
1774 inode = read_one_inode(root, key.offset);
1775 if (!inode)
1776 return -EIO;
1777
1778 ret = fixup_inode_link_count(trans, root, inode);
1779 iput(inode);
1780 if (ret)
1781 goto out;
1782
1783 /*
1784 * fixup on a directory may create new entries,
1785 * make sure we always look for the highset possible
1786 * offset
1787 */
1788 key.offset = (u64)-1;
1789 }
1790 ret = 0;
1791 out:
1792 btrfs_release_path(path);
1793 return ret;
1794 }
1795
1796
1797 /*
1798 * record a given inode in the fixup dir so we can check its link
1799 * count when replay is done. The link count is incremented here
1800 * so the inode won't go away until we check it
1801 */
1802 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1803 struct btrfs_root *root,
1804 struct btrfs_path *path,
1805 u64 objectid)
1806 {
1807 struct btrfs_key key;
1808 int ret = 0;
1809 struct inode *inode;
1810
1811 inode = read_one_inode(root, objectid);
1812 if (!inode)
1813 return -EIO;
1814
1815 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1816 key.type = BTRFS_ORPHAN_ITEM_KEY;
1817 key.offset = objectid;
1818
1819 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1820
1821 btrfs_release_path(path);
1822 if (ret == 0) {
1823 if (!inode->i_nlink)
1824 set_nlink(inode, 1);
1825 else
1826 inc_nlink(inode);
1827 ret = btrfs_update_inode(trans, root, inode);
1828 } else if (ret == -EEXIST) {
1829 ret = 0;
1830 } else {
1831 BUG(); /* Logic Error */
1832 }
1833 iput(inode);
1834
1835 return ret;
1836 }
1837
1838 /*
1839 * when replaying the log for a directory, we only insert names
1840 * for inodes that actually exist. This means an fsync on a directory
1841 * does not implicitly fsync all the new files in it
1842 */
1843 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1844 struct btrfs_root *root,
1845 u64 dirid, u64 index,
1846 char *name, int name_len,
1847 struct btrfs_key *location)
1848 {
1849 struct inode *inode;
1850 struct inode *dir;
1851 int ret;
1852
1853 inode = read_one_inode(root, location->objectid);
1854 if (!inode)
1855 return -ENOENT;
1856
1857 dir = read_one_inode(root, dirid);
1858 if (!dir) {
1859 iput(inode);
1860 return -EIO;
1861 }
1862
1863 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1864 name_len, 1, index);
1865
1866 /* FIXME, put inode into FIXUP list */
1867
1868 iput(inode);
1869 iput(dir);
1870 return ret;
1871 }
1872
1873 /*
1874 * Return true if an inode reference exists in the log for the given name,
1875 * inode and parent inode.
1876 */
1877 static bool name_in_log_ref(struct btrfs_root *log_root,
1878 const char *name, const int name_len,
1879 const u64 dirid, const u64 ino)
1880 {
1881 struct btrfs_key search_key;
1882
1883 search_key.objectid = ino;
1884 search_key.type = BTRFS_INODE_REF_KEY;
1885 search_key.offset = dirid;
1886 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1887 return true;
1888
1889 search_key.type = BTRFS_INODE_EXTREF_KEY;
1890 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1891 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1892 return true;
1893
1894 return false;
1895 }
1896
1897 /*
1898 * take a single entry in a log directory item and replay it into
1899 * the subvolume.
1900 *
1901 * if a conflicting item exists in the subdirectory already,
1902 * the inode it points to is unlinked and put into the link count
1903 * fix up tree.
1904 *
1905 * If a name from the log points to a file or directory that does
1906 * not exist in the FS, it is skipped. fsyncs on directories
1907 * do not force down inodes inside that directory, just changes to the
1908 * names or unlinks in a directory.
1909 *
1910 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1911 * non-existing inode) and 1 if the name was replayed.
1912 */
1913 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1914 struct btrfs_root *root,
1915 struct btrfs_path *path,
1916 struct extent_buffer *eb,
1917 struct btrfs_dir_item *di,
1918 struct btrfs_key *key)
1919 {
1920 char *name;
1921 int name_len;
1922 struct btrfs_dir_item *dst_di;
1923 struct btrfs_key found_key;
1924 struct btrfs_key log_key;
1925 struct inode *dir;
1926 u8 log_type;
1927 int exists;
1928 int ret = 0;
1929 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1930 bool name_added = false;
1931
1932 dir = read_one_inode(root, key->objectid);
1933 if (!dir)
1934 return -EIO;
1935
1936 name_len = btrfs_dir_name_len(eb, di);
1937 name = kmalloc(name_len, GFP_NOFS);
1938 if (!name) {
1939 ret = -ENOMEM;
1940 goto out;
1941 }
1942
1943 log_type = btrfs_dir_type(eb, di);
1944 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1945 name_len);
1946
1947 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1948 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1949 if (exists == 0)
1950 exists = 1;
1951 else
1952 exists = 0;
1953 btrfs_release_path(path);
1954
1955 if (key->type == BTRFS_DIR_ITEM_KEY) {
1956 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1957 name, name_len, 1);
1958 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1959 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1960 key->objectid,
1961 key->offset, name,
1962 name_len, 1);
1963 } else {
1964 /* Corruption */
1965 ret = -EINVAL;
1966 goto out;
1967 }
1968 if (IS_ERR_OR_NULL(dst_di)) {
1969 /* we need a sequence number to insert, so we only
1970 * do inserts for the BTRFS_DIR_INDEX_KEY types
1971 */
1972 if (key->type != BTRFS_DIR_INDEX_KEY)
1973 goto out;
1974 goto insert;
1975 }
1976
1977 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1978 /* the existing item matches the logged item */
1979 if (found_key.objectid == log_key.objectid &&
1980 found_key.type == log_key.type &&
1981 found_key.offset == log_key.offset &&
1982 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1983 update_size = false;
1984 goto out;
1985 }
1986
1987 /*
1988 * don't drop the conflicting directory entry if the inode
1989 * for the new entry doesn't exist
1990 */
1991 if (!exists)
1992 goto out;
1993
1994 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
1995 if (ret)
1996 goto out;
1997
1998 if (key->type == BTRFS_DIR_INDEX_KEY)
1999 goto insert;
2000 out:
2001 btrfs_release_path(path);
2002 if (!ret && update_size) {
2003 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
2004 ret = btrfs_update_inode(trans, root, dir);
2005 }
2006 kfree(name);
2007 iput(dir);
2008 if (!ret && name_added)
2009 ret = 1;
2010 return ret;
2011
2012 insert:
2013 if (name_in_log_ref(root->log_root, name, name_len,
2014 key->objectid, log_key.objectid)) {
2015 /* The dentry will be added later. */
2016 ret = 0;
2017 update_size = false;
2018 goto out;
2019 }
2020 btrfs_release_path(path);
2021 ret = insert_one_name(trans, root, key->objectid, key->offset,
2022 name, name_len, &log_key);
2023 if (ret && ret != -ENOENT && ret != -EEXIST)
2024 goto out;
2025 if (!ret)
2026 name_added = true;
2027 update_size = false;
2028 ret = 0;
2029 goto out;
2030 }
2031
2032 /*
2033 * find all the names in a directory item and reconcile them into
2034 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2035 * one name in a directory item, but the same code gets used for
2036 * both directory index types
2037 */
2038 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2039 struct btrfs_root *root,
2040 struct btrfs_path *path,
2041 struct extent_buffer *eb, int slot,
2042 struct btrfs_key *key)
2043 {
2044 int ret = 0;
2045 u32 item_size = btrfs_item_size_nr(eb, slot);
2046 struct btrfs_dir_item *di;
2047 int name_len;
2048 unsigned long ptr;
2049 unsigned long ptr_end;
2050 struct btrfs_path *fixup_path = NULL;
2051
2052 ptr = btrfs_item_ptr_offset(eb, slot);
2053 ptr_end = ptr + item_size;
2054 while (ptr < ptr_end) {
2055 di = (struct btrfs_dir_item *)ptr;
2056 name_len = btrfs_dir_name_len(eb, di);
2057 ret = replay_one_name(trans, root, path, eb, di, key);
2058 if (ret < 0)
2059 break;
2060 ptr = (unsigned long)(di + 1);
2061 ptr += name_len;
2062
2063 /*
2064 * If this entry refers to a non-directory (directories can not
2065 * have a link count > 1) and it was added in the transaction
2066 * that was not committed, make sure we fixup the link count of
2067 * the inode it the entry points to. Otherwise something like
2068 * the following would result in a directory pointing to an
2069 * inode with a wrong link that does not account for this dir
2070 * entry:
2071 *
2072 * mkdir testdir
2073 * touch testdir/foo
2074 * touch testdir/bar
2075 * sync
2076 *
2077 * ln testdir/bar testdir/bar_link
2078 * ln testdir/foo testdir/foo_link
2079 * xfs_io -c "fsync" testdir/bar
2080 *
2081 * <power failure>
2082 *
2083 * mount fs, log replay happens
2084 *
2085 * File foo would remain with a link count of 1 when it has two
2086 * entries pointing to it in the directory testdir. This would
2087 * make it impossible to ever delete the parent directory has
2088 * it would result in stale dentries that can never be deleted.
2089 */
2090 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2091 struct btrfs_key di_key;
2092
2093 if (!fixup_path) {
2094 fixup_path = btrfs_alloc_path();
2095 if (!fixup_path) {
2096 ret = -ENOMEM;
2097 break;
2098 }
2099 }
2100
2101 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2102 ret = link_to_fixup_dir(trans, root, fixup_path,
2103 di_key.objectid);
2104 if (ret)
2105 break;
2106 }
2107 ret = 0;
2108 }
2109 btrfs_free_path(fixup_path);
2110 return ret;
2111 }
2112
2113 /*
2114 * directory replay has two parts. There are the standard directory
2115 * items in the log copied from the subvolume, and range items
2116 * created in the log while the subvolume was logged.
2117 *
2118 * The range items tell us which parts of the key space the log
2119 * is authoritative for. During replay, if a key in the subvolume
2120 * directory is in a logged range item, but not actually in the log
2121 * that means it was deleted from the directory before the fsync
2122 * and should be removed.
2123 */
2124 static noinline int find_dir_range(struct btrfs_root *root,
2125 struct btrfs_path *path,
2126 u64 dirid, int key_type,
2127 u64 *start_ret, u64 *end_ret)
2128 {
2129 struct btrfs_key key;
2130 u64 found_end;
2131 struct btrfs_dir_log_item *item;
2132 int ret;
2133 int nritems;
2134
2135 if (*start_ret == (u64)-1)
2136 return 1;
2137
2138 key.objectid = dirid;
2139 key.type = key_type;
2140 key.offset = *start_ret;
2141
2142 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2143 if (ret < 0)
2144 goto out;
2145 if (ret > 0) {
2146 if (path->slots[0] == 0)
2147 goto out;
2148 path->slots[0]--;
2149 }
2150 if (ret != 0)
2151 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2152
2153 if (key.type != key_type || key.objectid != dirid) {
2154 ret = 1;
2155 goto next;
2156 }
2157 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2158 struct btrfs_dir_log_item);
2159 found_end = btrfs_dir_log_end(path->nodes[0], item);
2160
2161 if (*start_ret >= key.offset && *start_ret <= found_end) {
2162 ret = 0;
2163 *start_ret = key.offset;
2164 *end_ret = found_end;
2165 goto out;
2166 }
2167 ret = 1;
2168 next:
2169 /* check the next slot in the tree to see if it is a valid item */
2170 nritems = btrfs_header_nritems(path->nodes[0]);
2171 path->slots[0]++;
2172 if (path->slots[0] >= nritems) {
2173 ret = btrfs_next_leaf(root, path);
2174 if (ret)
2175 goto out;
2176 }
2177
2178 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2179
2180 if (key.type != key_type || key.objectid != dirid) {
2181 ret = 1;
2182 goto out;
2183 }
2184 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2185 struct btrfs_dir_log_item);
2186 found_end = btrfs_dir_log_end(path->nodes[0], item);
2187 *start_ret = key.offset;
2188 *end_ret = found_end;
2189 ret = 0;
2190 out:
2191 btrfs_release_path(path);
2192 return ret;
2193 }
2194
2195 /*
2196 * this looks for a given directory item in the log. If the directory
2197 * item is not in the log, the item is removed and the inode it points
2198 * to is unlinked
2199 */
2200 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2201 struct btrfs_root *root,
2202 struct btrfs_root *log,
2203 struct btrfs_path *path,
2204 struct btrfs_path *log_path,
2205 struct inode *dir,
2206 struct btrfs_key *dir_key)
2207 {
2208 int ret;
2209 struct extent_buffer *eb;
2210 int slot;
2211 u32 item_size;
2212 struct btrfs_dir_item *di;
2213 struct btrfs_dir_item *log_di;
2214 int name_len;
2215 unsigned long ptr;
2216 unsigned long ptr_end;
2217 char *name;
2218 struct inode *inode;
2219 struct btrfs_key location;
2220
2221 again:
2222 eb = path->nodes[0];
2223 slot = path->slots[0];
2224 item_size = btrfs_item_size_nr(eb, slot);
2225 ptr = btrfs_item_ptr_offset(eb, slot);
2226 ptr_end = ptr + item_size;
2227 while (ptr < ptr_end) {
2228 di = (struct btrfs_dir_item *)ptr;
2229 name_len = btrfs_dir_name_len(eb, di);
2230 name = kmalloc(name_len, GFP_NOFS);
2231 if (!name) {
2232 ret = -ENOMEM;
2233 goto out;
2234 }
2235 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2236 name_len);
2237 log_di = NULL;
2238 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2239 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2240 dir_key->objectid,
2241 name, name_len, 0);
2242 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2243 log_di = btrfs_lookup_dir_index_item(trans, log,
2244 log_path,
2245 dir_key->objectid,
2246 dir_key->offset,
2247 name, name_len, 0);
2248 }
2249 if (!log_di || log_di == ERR_PTR(-ENOENT)) {
2250 btrfs_dir_item_key_to_cpu(eb, di, &location);
2251 btrfs_release_path(path);
2252 btrfs_release_path(log_path);
2253 inode = read_one_inode(root, location.objectid);
2254 if (!inode) {
2255 kfree(name);
2256 return -EIO;
2257 }
2258
2259 ret = link_to_fixup_dir(trans, root,
2260 path, location.objectid);
2261 if (ret) {
2262 kfree(name);
2263 iput(inode);
2264 goto out;
2265 }
2266
2267 inc_nlink(inode);
2268 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2269 BTRFS_I(inode), name, name_len);
2270 if (!ret)
2271 ret = btrfs_run_delayed_items(trans);
2272 kfree(name);
2273 iput(inode);
2274 if (ret)
2275 goto out;
2276
2277 /* there might still be more names under this key
2278 * check and repeat if required
2279 */
2280 ret = btrfs_search_slot(NULL, root, dir_key, path,
2281 0, 0);
2282 if (ret == 0)
2283 goto again;
2284 ret = 0;
2285 goto out;
2286 } else if (IS_ERR(log_di)) {
2287 kfree(name);
2288 return PTR_ERR(log_di);
2289 }
2290 btrfs_release_path(log_path);
2291 kfree(name);
2292
2293 ptr = (unsigned long)(di + 1);
2294 ptr += name_len;
2295 }
2296 ret = 0;
2297 out:
2298 btrfs_release_path(path);
2299 btrfs_release_path(log_path);
2300 return ret;
2301 }
2302
2303 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2304 struct btrfs_root *root,
2305 struct btrfs_root *log,
2306 struct btrfs_path *path,
2307 const u64 ino)
2308 {
2309 struct btrfs_key search_key;
2310 struct btrfs_path *log_path;
2311 int i;
2312 int nritems;
2313 int ret;
2314
2315 log_path = btrfs_alloc_path();
2316 if (!log_path)
2317 return -ENOMEM;
2318
2319 search_key.objectid = ino;
2320 search_key.type = BTRFS_XATTR_ITEM_KEY;
2321 search_key.offset = 0;
2322 again:
2323 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2324 if (ret < 0)
2325 goto out;
2326 process_leaf:
2327 nritems = btrfs_header_nritems(path->nodes[0]);
2328 for (i = path->slots[0]; i < nritems; i++) {
2329 struct btrfs_key key;
2330 struct btrfs_dir_item *di;
2331 struct btrfs_dir_item *log_di;
2332 u32 total_size;
2333 u32 cur;
2334
2335 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2336 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2337 ret = 0;
2338 goto out;
2339 }
2340
2341 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2342 total_size = btrfs_item_size_nr(path->nodes[0], i);
2343 cur = 0;
2344 while (cur < total_size) {
2345 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2346 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2347 u32 this_len = sizeof(*di) + name_len + data_len;
2348 char *name;
2349
2350 name = kmalloc(name_len, GFP_NOFS);
2351 if (!name) {
2352 ret = -ENOMEM;
2353 goto out;
2354 }
2355 read_extent_buffer(path->nodes[0], name,
2356 (unsigned long)(di + 1), name_len);
2357
2358 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2359 name, name_len, 0);
2360 btrfs_release_path(log_path);
2361 if (!log_di) {
2362 /* Doesn't exist in log tree, so delete it. */
2363 btrfs_release_path(path);
2364 di = btrfs_lookup_xattr(trans, root, path, ino,
2365 name, name_len, -1);
2366 kfree(name);
2367 if (IS_ERR(di)) {
2368 ret = PTR_ERR(di);
2369 goto out;
2370 }
2371 ASSERT(di);
2372 ret = btrfs_delete_one_dir_name(trans, root,
2373 path, di);
2374 if (ret)
2375 goto out;
2376 btrfs_release_path(path);
2377 search_key = key;
2378 goto again;
2379 }
2380 kfree(name);
2381 if (IS_ERR(log_di)) {
2382 ret = PTR_ERR(log_di);
2383 goto out;
2384 }
2385 cur += this_len;
2386 di = (struct btrfs_dir_item *)((char *)di + this_len);
2387 }
2388 }
2389 ret = btrfs_next_leaf(root, path);
2390 if (ret > 0)
2391 ret = 0;
2392 else if (ret == 0)
2393 goto process_leaf;
2394 out:
2395 btrfs_free_path(log_path);
2396 btrfs_release_path(path);
2397 return ret;
2398 }
2399
2400
2401 /*
2402 * deletion replay happens before we copy any new directory items
2403 * out of the log or out of backreferences from inodes. It
2404 * scans the log to find ranges of keys that log is authoritative for,
2405 * and then scans the directory to find items in those ranges that are
2406 * not present in the log.
2407 *
2408 * Anything we don't find in the log is unlinked and removed from the
2409 * directory.
2410 */
2411 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2412 struct btrfs_root *root,
2413 struct btrfs_root *log,
2414 struct btrfs_path *path,
2415 u64 dirid, int del_all)
2416 {
2417 u64 range_start;
2418 u64 range_end;
2419 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2420 int ret = 0;
2421 struct btrfs_key dir_key;
2422 struct btrfs_key found_key;
2423 struct btrfs_path *log_path;
2424 struct inode *dir;
2425
2426 dir_key.objectid = dirid;
2427 dir_key.type = BTRFS_DIR_ITEM_KEY;
2428 log_path = btrfs_alloc_path();
2429 if (!log_path)
2430 return -ENOMEM;
2431
2432 dir = read_one_inode(root, dirid);
2433 /* it isn't an error if the inode isn't there, that can happen
2434 * because we replay the deletes before we copy in the inode item
2435 * from the log
2436 */
2437 if (!dir) {
2438 btrfs_free_path(log_path);
2439 return 0;
2440 }
2441 again:
2442 range_start = 0;
2443 range_end = 0;
2444 while (1) {
2445 if (del_all)
2446 range_end = (u64)-1;
2447 else {
2448 ret = find_dir_range(log, path, dirid, key_type,
2449 &range_start, &range_end);
2450 if (ret != 0)
2451 break;
2452 }
2453
2454 dir_key.offset = range_start;
2455 while (1) {
2456 int nritems;
2457 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2458 0, 0);
2459 if (ret < 0)
2460 goto out;
2461
2462 nritems = btrfs_header_nritems(path->nodes[0]);
2463 if (path->slots[0] >= nritems) {
2464 ret = btrfs_next_leaf(root, path);
2465 if (ret == 1)
2466 break;
2467 else if (ret < 0)
2468 goto out;
2469 }
2470 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2471 path->slots[0]);
2472 if (found_key.objectid != dirid ||
2473 found_key.type != dir_key.type)
2474 goto next_type;
2475
2476 if (found_key.offset > range_end)
2477 break;
2478
2479 ret = check_item_in_log(trans, root, log, path,
2480 log_path, dir,
2481 &found_key);
2482 if (ret)
2483 goto out;
2484 if (found_key.offset == (u64)-1)
2485 break;
2486 dir_key.offset = found_key.offset + 1;
2487 }
2488 btrfs_release_path(path);
2489 if (range_end == (u64)-1)
2490 break;
2491 range_start = range_end + 1;
2492 }
2493
2494 next_type:
2495 ret = 0;
2496 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2497 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2498 dir_key.type = BTRFS_DIR_INDEX_KEY;
2499 btrfs_release_path(path);
2500 goto again;
2501 }
2502 out:
2503 btrfs_release_path(path);
2504 btrfs_free_path(log_path);
2505 iput(dir);
2506 return ret;
2507 }
2508
2509 /*
2510 * the process_func used to replay items from the log tree. This
2511 * gets called in two different stages. The first stage just looks
2512 * for inodes and makes sure they are all copied into the subvolume.
2513 *
2514 * The second stage copies all the other item types from the log into
2515 * the subvolume. The two stage approach is slower, but gets rid of
2516 * lots of complexity around inodes referencing other inodes that exist
2517 * only in the log (references come from either directory items or inode
2518 * back refs).
2519 */
2520 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2521 struct walk_control *wc, u64 gen, int level)
2522 {
2523 int nritems;
2524 struct btrfs_path *path;
2525 struct btrfs_root *root = wc->replay_dest;
2526 struct btrfs_key key;
2527 int i;
2528 int ret;
2529
2530 ret = btrfs_read_buffer(eb, gen, level, NULL);
2531 if (ret)
2532 return ret;
2533
2534 level = btrfs_header_level(eb);
2535
2536 if (level != 0)
2537 return 0;
2538
2539 path = btrfs_alloc_path();
2540 if (!path)
2541 return -ENOMEM;
2542
2543 nritems = btrfs_header_nritems(eb);
2544 for (i = 0; i < nritems; i++) {
2545 btrfs_item_key_to_cpu(eb, &key, i);
2546
2547 /* inode keys are done during the first stage */
2548 if (key.type == BTRFS_INODE_ITEM_KEY &&
2549 wc->stage == LOG_WALK_REPLAY_INODES) {
2550 struct btrfs_inode_item *inode_item;
2551 u32 mode;
2552
2553 inode_item = btrfs_item_ptr(eb, i,
2554 struct btrfs_inode_item);
2555 /*
2556 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2557 * and never got linked before the fsync, skip it, as
2558 * replaying it is pointless since it would be deleted
2559 * later. We skip logging tmpfiles, but it's always
2560 * possible we are replaying a log created with a kernel
2561 * that used to log tmpfiles.
2562 */
2563 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2564 wc->ignore_cur_inode = true;
2565 continue;
2566 } else {
2567 wc->ignore_cur_inode = false;
2568 }
2569 ret = replay_xattr_deletes(wc->trans, root, log,
2570 path, key.objectid);
2571 if (ret)
2572 break;
2573 mode = btrfs_inode_mode(eb, inode_item);
2574 if (S_ISDIR(mode)) {
2575 ret = replay_dir_deletes(wc->trans,
2576 root, log, path, key.objectid, 0);
2577 if (ret)
2578 break;
2579 }
2580 ret = overwrite_item(wc->trans, root, path,
2581 eb, i, &key);
2582 if (ret)
2583 break;
2584
2585 /*
2586 * Before replaying extents, truncate the inode to its
2587 * size. We need to do it now and not after log replay
2588 * because before an fsync we can have prealloc extents
2589 * added beyond the inode's i_size. If we did it after,
2590 * through orphan cleanup for example, we would drop
2591 * those prealloc extents just after replaying them.
2592 */
2593 if (S_ISREG(mode)) {
2594 struct inode *inode;
2595 u64 from;
2596
2597 inode = read_one_inode(root, key.objectid);
2598 if (!inode) {
2599 ret = -EIO;
2600 break;
2601 }
2602 from = ALIGN(i_size_read(inode),
2603 root->fs_info->sectorsize);
2604 ret = btrfs_drop_extents(wc->trans, root, inode,
2605 from, (u64)-1, 1);
2606 if (!ret) {
2607 /* Update the inode's nbytes. */
2608 ret = btrfs_update_inode(wc->trans,
2609 root, inode);
2610 }
2611 iput(inode);
2612 if (ret)
2613 break;
2614 }
2615
2616 ret = link_to_fixup_dir(wc->trans, root,
2617 path, key.objectid);
2618 if (ret)
2619 break;
2620 }
2621
2622 if (wc->ignore_cur_inode)
2623 continue;
2624
2625 if (key.type == BTRFS_DIR_INDEX_KEY &&
2626 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2627 ret = replay_one_dir_item(wc->trans, root, path,
2628 eb, i, &key);
2629 if (ret)
2630 break;
2631 }
2632
2633 if (wc->stage < LOG_WALK_REPLAY_ALL)
2634 continue;
2635
2636 /* these keys are simply copied */
2637 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2638 ret = overwrite_item(wc->trans, root, path,
2639 eb, i, &key);
2640 if (ret)
2641 break;
2642 } else if (key.type == BTRFS_INODE_REF_KEY ||
2643 key.type == BTRFS_INODE_EXTREF_KEY) {
2644 ret = add_inode_ref(wc->trans, root, log, path,
2645 eb, i, &key);
2646 if (ret && ret != -ENOENT)
2647 break;
2648 ret = 0;
2649 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2650 ret = replay_one_extent(wc->trans, root, path,
2651 eb, i, &key);
2652 if (ret)
2653 break;
2654 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2655 ret = replay_one_dir_item(wc->trans, root, path,
2656 eb, i, &key);
2657 if (ret)
2658 break;
2659 }
2660 }
2661 btrfs_free_path(path);
2662 return ret;
2663 }
2664
2665 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2666 struct btrfs_root *root,
2667 struct btrfs_path *path, int *level,
2668 struct walk_control *wc)
2669 {
2670 struct btrfs_fs_info *fs_info = root->fs_info;
2671 u64 root_owner;
2672 u64 bytenr;
2673 u64 ptr_gen;
2674 struct extent_buffer *next;
2675 struct extent_buffer *cur;
2676 struct extent_buffer *parent;
2677 u32 blocksize;
2678 int ret = 0;
2679
2680 WARN_ON(*level < 0);
2681 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2682
2683 while (*level > 0) {
2684 struct btrfs_key first_key;
2685
2686 WARN_ON(*level < 0);
2687 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2688 cur = path->nodes[*level];
2689
2690 WARN_ON(btrfs_header_level(cur) != *level);
2691
2692 if (path->slots[*level] >=
2693 btrfs_header_nritems(cur))
2694 break;
2695
2696 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2697 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2698 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2699 blocksize = fs_info->nodesize;
2700
2701 parent = path->nodes[*level];
2702 root_owner = btrfs_header_owner(parent);
2703
2704 next = btrfs_find_create_tree_block(fs_info, bytenr);
2705 if (IS_ERR(next))
2706 return PTR_ERR(next);
2707
2708 if (*level == 1) {
2709 ret = wc->process_func(root, next, wc, ptr_gen,
2710 *level - 1);
2711 if (ret) {
2712 free_extent_buffer(next);
2713 return ret;
2714 }
2715
2716 path->slots[*level]++;
2717 if (wc->free) {
2718 ret = btrfs_read_buffer(next, ptr_gen,
2719 *level - 1, &first_key);
2720 if (ret) {
2721 free_extent_buffer(next);
2722 return ret;
2723 }
2724
2725 if (trans) {
2726 btrfs_tree_lock(next);
2727 btrfs_set_lock_blocking_write(next);
2728 clean_tree_block(fs_info, next);
2729 btrfs_wait_tree_block_writeback(next);
2730 btrfs_tree_unlock(next);
2731 } else {
2732 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2733 clear_extent_buffer_dirty(next);
2734 }
2735
2736 WARN_ON(root_owner !=
2737 BTRFS_TREE_LOG_OBJECTID);
2738 ret = btrfs_free_and_pin_reserved_extent(
2739 fs_info, bytenr,
2740 blocksize);
2741 if (ret) {
2742 free_extent_buffer(next);
2743 return ret;
2744 }
2745 }
2746 free_extent_buffer(next);
2747 continue;
2748 }
2749 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2750 if (ret) {
2751 free_extent_buffer(next);
2752 return ret;
2753 }
2754
2755 WARN_ON(*level <= 0);
2756 if (path->nodes[*level-1])
2757 free_extent_buffer(path->nodes[*level-1]);
2758 path->nodes[*level-1] = next;
2759 *level = btrfs_header_level(next);
2760 path->slots[*level] = 0;
2761 cond_resched();
2762 }
2763 WARN_ON(*level < 0);
2764 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2765
2766 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2767
2768 cond_resched();
2769 return 0;
2770 }
2771
2772 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2773 struct btrfs_root *root,
2774 struct btrfs_path *path, int *level,
2775 struct walk_control *wc)
2776 {
2777 struct btrfs_fs_info *fs_info = root->fs_info;
2778 u64 root_owner;
2779 int i;
2780 int slot;
2781 int ret;
2782
2783 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2784 slot = path->slots[i];
2785 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2786 path->slots[i]++;
2787 *level = i;
2788 WARN_ON(*level == 0);
2789 return 0;
2790 } else {
2791 struct extent_buffer *parent;
2792 if (path->nodes[*level] == root->node)
2793 parent = path->nodes[*level];
2794 else
2795 parent = path->nodes[*level + 1];
2796
2797 root_owner = btrfs_header_owner(parent);
2798 ret = wc->process_func(root, path->nodes[*level], wc,
2799 btrfs_header_generation(path->nodes[*level]),
2800 *level);
2801 if (ret)
2802 return ret;
2803
2804 if (wc->free) {
2805 struct extent_buffer *next;
2806
2807 next = path->nodes[*level];
2808
2809 if (trans) {
2810 btrfs_tree_lock(next);
2811 btrfs_set_lock_blocking_write(next);
2812 clean_tree_block(fs_info, next);
2813 btrfs_wait_tree_block_writeback(next);
2814 btrfs_tree_unlock(next);
2815 } else {
2816 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2817 clear_extent_buffer_dirty(next);
2818 }
2819
2820 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2821 ret = btrfs_free_and_pin_reserved_extent(
2822 fs_info,
2823 path->nodes[*level]->start,
2824 path->nodes[*level]->len);
2825 if (ret)
2826 return ret;
2827 }
2828 free_extent_buffer(path->nodes[*level]);
2829 path->nodes[*level] = NULL;
2830 *level = i + 1;
2831 }
2832 }
2833 return 1;
2834 }
2835
2836 /*
2837 * drop the reference count on the tree rooted at 'snap'. This traverses
2838 * the tree freeing any blocks that have a ref count of zero after being
2839 * decremented.
2840 */
2841 static int walk_log_tree(struct btrfs_trans_handle *trans,
2842 struct btrfs_root *log, struct walk_control *wc)
2843 {
2844 struct btrfs_fs_info *fs_info = log->fs_info;
2845 int ret = 0;
2846 int wret;
2847 int level;
2848 struct btrfs_path *path;
2849 int orig_level;
2850
2851 path = btrfs_alloc_path();
2852 if (!path)
2853 return -ENOMEM;
2854
2855 level = btrfs_header_level(log->node);
2856 orig_level = level;
2857 path->nodes[level] = log->node;
2858 extent_buffer_get(log->node);
2859 path->slots[level] = 0;
2860
2861 while (1) {
2862 wret = walk_down_log_tree(trans, log, path, &level, wc);
2863 if (wret > 0)
2864 break;
2865 if (wret < 0) {
2866 ret = wret;
2867 goto out;
2868 }
2869
2870 wret = walk_up_log_tree(trans, log, path, &level, wc);
2871 if (wret > 0)
2872 break;
2873 if (wret < 0) {
2874 ret = wret;
2875 goto out;
2876 }
2877 }
2878
2879 /* was the root node processed? if not, catch it here */
2880 if (path->nodes[orig_level]) {
2881 ret = wc->process_func(log, path->nodes[orig_level], wc,
2882 btrfs_header_generation(path->nodes[orig_level]),
2883 orig_level);
2884 if (ret)
2885 goto out;
2886 if (wc->free) {
2887 struct extent_buffer *next;
2888
2889 next = path->nodes[orig_level];
2890
2891 if (trans) {
2892 btrfs_tree_lock(next);
2893 btrfs_set_lock_blocking_write(next);
2894 clean_tree_block(fs_info, next);
2895 btrfs_wait_tree_block_writeback(next);
2896 btrfs_tree_unlock(next);
2897 } else {
2898 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2899 clear_extent_buffer_dirty(next);
2900 }
2901
2902 WARN_ON(log->root_key.objectid !=
2903 BTRFS_TREE_LOG_OBJECTID);
2904 ret = btrfs_free_and_pin_reserved_extent(fs_info,
2905 next->start, next->len);
2906 if (ret)
2907 goto out;
2908 }
2909 }
2910
2911 out:
2912 btrfs_free_path(path);
2913 return ret;
2914 }
2915
2916 /*
2917 * helper function to update the item for a given subvolumes log root
2918 * in the tree of log roots
2919 */
2920 static int update_log_root(struct btrfs_trans_handle *trans,
2921 struct btrfs_root *log)
2922 {
2923 struct btrfs_fs_info *fs_info = log->fs_info;
2924 int ret;
2925
2926 if (log->log_transid == 1) {
2927 /* insert root item on the first sync */
2928 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2929 &log->root_key, &log->root_item);
2930 } else {
2931 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2932 &log->root_key, &log->root_item);
2933 }
2934 return ret;
2935 }
2936
2937 static void wait_log_commit(struct btrfs_root *root, int transid)
2938 {
2939 DEFINE_WAIT(wait);
2940 int index = transid % 2;
2941
2942 /*
2943 * we only allow two pending log transactions at a time,
2944 * so we know that if ours is more than 2 older than the
2945 * current transaction, we're done
2946 */
2947 for (;;) {
2948 prepare_to_wait(&root->log_commit_wait[index],
2949 &wait, TASK_UNINTERRUPTIBLE);
2950
2951 if (!(root->log_transid_committed < transid &&
2952 atomic_read(&root->log_commit[index])))
2953 break;
2954
2955 mutex_unlock(&root->log_mutex);
2956 schedule();
2957 mutex_lock(&root->log_mutex);
2958 }
2959 finish_wait(&root->log_commit_wait[index], &wait);
2960 }
2961
2962 static void wait_for_writer(struct btrfs_root *root)
2963 {
2964 DEFINE_WAIT(wait);
2965
2966 for (;;) {
2967 prepare_to_wait(&root->log_writer_wait, &wait,
2968 TASK_UNINTERRUPTIBLE);
2969 if (!atomic_read(&root->log_writers))
2970 break;
2971
2972 mutex_unlock(&root->log_mutex);
2973 schedule();
2974 mutex_lock(&root->log_mutex);
2975 }
2976 finish_wait(&root->log_writer_wait, &wait);
2977 }
2978
2979 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2980 struct btrfs_log_ctx *ctx)
2981 {
2982 if (!ctx)
2983 return;
2984
2985 mutex_lock(&root->log_mutex);
2986 list_del_init(&ctx->list);
2987 mutex_unlock(&root->log_mutex);
2988 }
2989
2990 /*
2991 * Invoked in log mutex context, or be sure there is no other task which
2992 * can access the list.
2993 */
2994 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2995 int index, int error)
2996 {
2997 struct btrfs_log_ctx *ctx;
2998 struct btrfs_log_ctx *safe;
2999
3000 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
3001 list_del_init(&ctx->list);
3002 ctx->log_ret = error;
3003 }
3004
3005 INIT_LIST_HEAD(&root->log_ctxs[index]);
3006 }
3007
3008 /*
3009 * btrfs_sync_log does sends a given tree log down to the disk and
3010 * updates the super blocks to record it. When this call is done,
3011 * you know that any inodes previously logged are safely on disk only
3012 * if it returns 0.
3013 *
3014 * Any other return value means you need to call btrfs_commit_transaction.
3015 * Some of the edge cases for fsyncing directories that have had unlinks
3016 * or renames done in the past mean that sometimes the only safe
3017 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3018 * that has happened.
3019 */
3020 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3021 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3022 {
3023 int index1;
3024 int index2;
3025 int mark;
3026 int ret;
3027 struct btrfs_fs_info *fs_info = root->fs_info;
3028 struct btrfs_root *log = root->log_root;
3029 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3030 int log_transid = 0;
3031 struct btrfs_log_ctx root_log_ctx;
3032 struct blk_plug plug;
3033
3034 mutex_lock(&root->log_mutex);
3035 log_transid = ctx->log_transid;
3036 if (root->log_transid_committed >= log_transid) {
3037 mutex_unlock(&root->log_mutex);
3038 return ctx->log_ret;
3039 }
3040
3041 index1 = log_transid % 2;
3042 if (atomic_read(&root->log_commit[index1])) {
3043 wait_log_commit(root, log_transid);
3044 mutex_unlock(&root->log_mutex);
3045 return ctx->log_ret;
3046 }
3047 ASSERT(log_transid == root->log_transid);
3048 atomic_set(&root->log_commit[index1], 1);
3049
3050 /* wait for previous tree log sync to complete */
3051 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3052 wait_log_commit(root, log_transid - 1);
3053
3054 while (1) {
3055 int batch = atomic_read(&root->log_batch);
3056 /* when we're on an ssd, just kick the log commit out */
3057 if (!btrfs_test_opt(fs_info, SSD) &&
3058 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3059 mutex_unlock(&root->log_mutex);
3060 schedule_timeout_uninterruptible(1);
3061 mutex_lock(&root->log_mutex);
3062 }
3063 wait_for_writer(root);
3064 if (batch == atomic_read(&root->log_batch))
3065 break;
3066 }
3067
3068 /* bail out if we need to do a full commit */
3069 if (btrfs_need_log_full_commit(fs_info, trans)) {
3070 ret = -EAGAIN;
3071 mutex_unlock(&root->log_mutex);
3072 goto out;
3073 }
3074
3075 if (log_transid % 2 == 0)
3076 mark = EXTENT_DIRTY;
3077 else
3078 mark = EXTENT_NEW;
3079
3080 /* we start IO on all the marked extents here, but we don't actually
3081 * wait for them until later.
3082 */
3083 blk_start_plug(&plug);
3084 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3085 if (ret) {
3086 blk_finish_plug(&plug);
3087 btrfs_abort_transaction(trans, ret);
3088 btrfs_set_log_full_commit(fs_info, trans);
3089 mutex_unlock(&root->log_mutex);
3090 goto out;
3091 }
3092
3093 btrfs_set_root_node(&log->root_item, log->node);
3094
3095 root->log_transid++;
3096 log->log_transid = root->log_transid;
3097 root->log_start_pid = 0;
3098 /*
3099 * IO has been started, blocks of the log tree have WRITTEN flag set
3100 * in their headers. new modifications of the log will be written to
3101 * new positions. so it's safe to allow log writers to go in.
3102 */
3103 mutex_unlock(&root->log_mutex);
3104
3105 btrfs_init_log_ctx(&root_log_ctx, NULL);
3106
3107 mutex_lock(&log_root_tree->log_mutex);
3108 atomic_inc(&log_root_tree->log_batch);
3109 atomic_inc(&log_root_tree->log_writers);
3110
3111 index2 = log_root_tree->log_transid % 2;
3112 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3113 root_log_ctx.log_transid = log_root_tree->log_transid;
3114
3115 mutex_unlock(&log_root_tree->log_mutex);
3116
3117 ret = update_log_root(trans, log);
3118
3119 mutex_lock(&log_root_tree->log_mutex);
3120 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
3121 /* atomic_dec_and_test implies a barrier */
3122 cond_wake_up_nomb(&log_root_tree->log_writer_wait);
3123 }
3124
3125 if (ret) {
3126 if (!list_empty(&root_log_ctx.list))
3127 list_del_init(&root_log_ctx.list);
3128
3129 blk_finish_plug(&plug);
3130 btrfs_set_log_full_commit(fs_info, trans);
3131
3132 if (ret != -ENOSPC) {
3133 btrfs_abort_transaction(trans, ret);
3134 mutex_unlock(&log_root_tree->log_mutex);
3135 goto out;
3136 }
3137 btrfs_wait_tree_log_extents(log, mark);
3138 mutex_unlock(&log_root_tree->log_mutex);
3139 ret = -EAGAIN;
3140 goto out;
3141 }
3142
3143 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3144 blk_finish_plug(&plug);
3145 list_del_init(&root_log_ctx.list);
3146 mutex_unlock(&log_root_tree->log_mutex);
3147 ret = root_log_ctx.log_ret;
3148 goto out;
3149 }
3150
3151 index2 = root_log_ctx.log_transid % 2;
3152 if (atomic_read(&log_root_tree->log_commit[index2])) {
3153 blk_finish_plug(&plug);
3154 ret = btrfs_wait_tree_log_extents(log, mark);
3155 wait_log_commit(log_root_tree,
3156 root_log_ctx.log_transid);
3157 mutex_unlock(&log_root_tree->log_mutex);
3158 if (!ret)
3159 ret = root_log_ctx.log_ret;
3160 goto out;
3161 }
3162 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3163 atomic_set(&log_root_tree->log_commit[index2], 1);
3164
3165 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3166 wait_log_commit(log_root_tree,
3167 root_log_ctx.log_transid - 1);
3168 }
3169
3170 wait_for_writer(log_root_tree);
3171
3172 /*
3173 * now that we've moved on to the tree of log tree roots,
3174 * check the full commit flag again
3175 */
3176 if (btrfs_need_log_full_commit(fs_info, trans)) {
3177 blk_finish_plug(&plug);
3178 btrfs_wait_tree_log_extents(log, mark);
3179 mutex_unlock(&log_root_tree->log_mutex);
3180 ret = -EAGAIN;
3181 goto out_wake_log_root;
3182 }
3183
3184 ret = btrfs_write_marked_extents(fs_info,
3185 &log_root_tree->dirty_log_pages,
3186 EXTENT_DIRTY | EXTENT_NEW);
3187 blk_finish_plug(&plug);
3188 if (ret) {
3189 btrfs_set_log_full_commit(fs_info, trans);
3190 btrfs_abort_transaction(trans, ret);
3191 mutex_unlock(&log_root_tree->log_mutex);
3192 goto out_wake_log_root;
3193 }
3194 ret = btrfs_wait_tree_log_extents(log, mark);
3195 if (!ret)
3196 ret = btrfs_wait_tree_log_extents(log_root_tree,
3197 EXTENT_NEW | EXTENT_DIRTY);
3198 if (ret) {
3199 btrfs_set_log_full_commit(fs_info, trans);
3200 mutex_unlock(&log_root_tree->log_mutex);
3201 goto out_wake_log_root;
3202 }
3203
3204 btrfs_set_super_log_root(fs_info->super_for_commit,
3205 log_root_tree->node->start);
3206 btrfs_set_super_log_root_level(fs_info->super_for_commit,
3207 btrfs_header_level(log_root_tree->node));
3208
3209 log_root_tree->log_transid++;
3210 mutex_unlock(&log_root_tree->log_mutex);
3211
3212 /*
3213 * Nobody else is going to jump in and write the ctree
3214 * super here because the log_commit atomic below is protecting
3215 * us. We must be called with a transaction handle pinning
3216 * the running transaction open, so a full commit can't hop
3217 * in and cause problems either.
3218 */
3219 ret = write_all_supers(fs_info, 1);
3220 if (ret) {
3221 btrfs_set_log_full_commit(fs_info, trans);
3222 btrfs_abort_transaction(trans, ret);
3223 goto out_wake_log_root;
3224 }
3225
3226 mutex_lock(&root->log_mutex);
3227 if (root->last_log_commit < log_transid)
3228 root->last_log_commit = log_transid;
3229 mutex_unlock(&root->log_mutex);
3230
3231 out_wake_log_root:
3232 mutex_lock(&log_root_tree->log_mutex);
3233 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3234
3235 log_root_tree->log_transid_committed++;
3236 atomic_set(&log_root_tree->log_commit[index2], 0);
3237 mutex_unlock(&log_root_tree->log_mutex);
3238
3239 /*
3240 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3241 * all the updates above are seen by the woken threads. It might not be
3242 * necessary, but proving that seems to be hard.
3243 */
3244 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3245 out:
3246 mutex_lock(&root->log_mutex);
3247 btrfs_remove_all_log_ctxs(root, index1, ret);
3248 root->log_transid_committed++;
3249 atomic_set(&root->log_commit[index1], 0);
3250 mutex_unlock(&root->log_mutex);
3251
3252 /*
3253 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3254 * all the updates above are seen by the woken threads. It might not be
3255 * necessary, but proving that seems to be hard.
3256 */
3257 cond_wake_up(&root->log_commit_wait[index1]);
3258 return ret;
3259 }
3260
3261 static void free_log_tree(struct btrfs_trans_handle *trans,
3262 struct btrfs_root *log)
3263 {
3264 int ret;
3265 struct walk_control wc = {
3266 .free = 1,
3267 .process_func = process_one_buffer
3268 };
3269
3270 ret = walk_log_tree(trans, log, &wc);
3271 if (ret) {
3272 if (trans)
3273 btrfs_abort_transaction(trans, ret);
3274 else
3275 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3276 }
3277
3278 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3279 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3280 free_extent_buffer(log->node);
3281 kfree(log);
3282 }
3283
3284 /*
3285 * free all the extents used by the tree log. This should be called
3286 * at commit time of the full transaction
3287 */
3288 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3289 {
3290 if (root->log_root) {
3291 free_log_tree(trans, root->log_root);
3292 root->log_root = NULL;
3293 }
3294 return 0;
3295 }
3296
3297 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3298 struct btrfs_fs_info *fs_info)
3299 {
3300 if (fs_info->log_root_tree) {
3301 free_log_tree(trans, fs_info->log_root_tree);
3302 fs_info->log_root_tree = NULL;
3303 }
3304 return 0;
3305 }
3306
3307 /*
3308 * If both a file and directory are logged, and unlinks or renames are
3309 * mixed in, we have a few interesting corners:
3310 *
3311 * create file X in dir Y
3312 * link file X to X.link in dir Y
3313 * fsync file X
3314 * unlink file X but leave X.link
3315 * fsync dir Y
3316 *
3317 * After a crash we would expect only X.link to exist. But file X
3318 * didn't get fsync'd again so the log has back refs for X and X.link.
3319 *
3320 * We solve this by removing directory entries and inode backrefs from the
3321 * log when a file that was logged in the current transaction is
3322 * unlinked. Any later fsync will include the updated log entries, and
3323 * we'll be able to reconstruct the proper directory items from backrefs.
3324 *
3325 * This optimizations allows us to avoid relogging the entire inode
3326 * or the entire directory.
3327 */
3328 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3329 struct btrfs_root *root,
3330 const char *name, int name_len,
3331 struct btrfs_inode *dir, u64 index)
3332 {
3333 struct btrfs_root *log;
3334 struct btrfs_dir_item *di;
3335 struct btrfs_path *path;
3336 int ret;
3337 int err = 0;
3338 int bytes_del = 0;
3339 u64 dir_ino = btrfs_ino(dir);
3340
3341 if (dir->logged_trans < trans->transid)
3342 return 0;
3343
3344 ret = join_running_log_trans(root);
3345 if (ret)
3346 return 0;
3347
3348 mutex_lock(&dir->log_mutex);
3349
3350 log = root->log_root;
3351 path = btrfs_alloc_path();
3352 if (!path) {
3353 err = -ENOMEM;
3354 goto out_unlock;
3355 }
3356
3357 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3358 name, name_len, -1);
3359 if (IS_ERR(di)) {
3360 err = PTR_ERR(di);
3361 goto fail;
3362 }
3363 if (di) {
3364 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3365 bytes_del += name_len;
3366 if (ret) {
3367 err = ret;
3368 goto fail;
3369 }
3370 }
3371 btrfs_release_path(path);
3372 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3373 index, name, name_len, -1);
3374 if (IS_ERR(di)) {
3375 err = PTR_ERR(di);
3376 goto fail;
3377 }
3378 if (di) {
3379 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3380 bytes_del += name_len;
3381 if (ret) {
3382 err = ret;
3383 goto fail;
3384 }
3385 }
3386
3387 /* update the directory size in the log to reflect the names
3388 * we have removed
3389 */
3390 if (bytes_del) {
3391 struct btrfs_key key;
3392
3393 key.objectid = dir_ino;
3394 key.offset = 0;
3395 key.type = BTRFS_INODE_ITEM_KEY;
3396 btrfs_release_path(path);
3397
3398 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3399 if (ret < 0) {
3400 err = ret;
3401 goto fail;
3402 }
3403 if (ret == 0) {
3404 struct btrfs_inode_item *item;
3405 u64 i_size;
3406
3407 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3408 struct btrfs_inode_item);
3409 i_size = btrfs_inode_size(path->nodes[0], item);
3410 if (i_size > bytes_del)
3411 i_size -= bytes_del;
3412 else
3413 i_size = 0;
3414 btrfs_set_inode_size(path->nodes[0], item, i_size);
3415 btrfs_mark_buffer_dirty(path->nodes[0]);
3416 } else
3417 ret = 0;
3418 btrfs_release_path(path);
3419 }
3420 fail:
3421 btrfs_free_path(path);
3422 out_unlock:
3423 mutex_unlock(&dir->log_mutex);
3424 if (ret == -ENOSPC) {
3425 btrfs_set_log_full_commit(root->fs_info, trans);
3426 ret = 0;
3427 } else if (ret < 0)
3428 btrfs_abort_transaction(trans, ret);
3429
3430 btrfs_end_log_trans(root);
3431
3432 return err;
3433 }
3434
3435 /* see comments for btrfs_del_dir_entries_in_log */
3436 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3437 struct btrfs_root *root,
3438 const char *name, int name_len,
3439 struct btrfs_inode *inode, u64 dirid)
3440 {
3441 struct btrfs_fs_info *fs_info = root->fs_info;
3442 struct btrfs_root *log;
3443 u64 index;
3444 int ret;
3445
3446 if (inode->logged_trans < trans->transid)
3447 return 0;
3448
3449 ret = join_running_log_trans(root);
3450 if (ret)
3451 return 0;
3452 log = root->log_root;
3453 mutex_lock(&inode->log_mutex);
3454
3455 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3456 dirid, &index);
3457 mutex_unlock(&inode->log_mutex);
3458 if (ret == -ENOSPC) {
3459 btrfs_set_log_full_commit(fs_info, trans);
3460 ret = 0;
3461 } else if (ret < 0 && ret != -ENOENT)
3462 btrfs_abort_transaction(trans, ret);
3463 btrfs_end_log_trans(root);
3464
3465 return ret;
3466 }
3467
3468 /*
3469 * creates a range item in the log for 'dirid'. first_offset and
3470 * last_offset tell us which parts of the key space the log should
3471 * be considered authoritative for.
3472 */
3473 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3474 struct btrfs_root *log,
3475 struct btrfs_path *path,
3476 int key_type, u64 dirid,
3477 u64 first_offset, u64 last_offset)
3478 {
3479 int ret;
3480 struct btrfs_key key;
3481 struct btrfs_dir_log_item *item;
3482
3483 key.objectid = dirid;
3484 key.offset = first_offset;
3485 if (key_type == BTRFS_DIR_ITEM_KEY)
3486 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3487 else
3488 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3489 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3490 if (ret)
3491 return ret;
3492
3493 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3494 struct btrfs_dir_log_item);
3495 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3496 btrfs_mark_buffer_dirty(path->nodes[0]);
3497 btrfs_release_path(path);
3498 return 0;
3499 }
3500
3501 /*
3502 * log all the items included in the current transaction for a given
3503 * directory. This also creates the range items in the log tree required
3504 * to replay anything deleted before the fsync
3505 */
3506 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3507 struct btrfs_root *root, struct btrfs_inode *inode,
3508 struct btrfs_path *path,
3509 struct btrfs_path *dst_path, int key_type,
3510 struct btrfs_log_ctx *ctx,
3511 u64 min_offset, u64 *last_offset_ret)
3512 {
3513 struct btrfs_key min_key;
3514 struct btrfs_root *log = root->log_root;
3515 struct extent_buffer *src;
3516 int err = 0;
3517 int ret;
3518 int i;
3519 int nritems;
3520 u64 first_offset = min_offset;
3521 u64 last_offset = (u64)-1;
3522 u64 ino = btrfs_ino(inode);
3523
3524 log = root->log_root;
3525
3526 min_key.objectid = ino;
3527 min_key.type = key_type;
3528 min_key.offset = min_offset;
3529
3530 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3531
3532 /*
3533 * we didn't find anything from this transaction, see if there
3534 * is anything at all
3535 */
3536 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3537 min_key.objectid = ino;
3538 min_key.type = key_type;
3539 min_key.offset = (u64)-1;
3540 btrfs_release_path(path);
3541 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3542 if (ret < 0) {
3543 btrfs_release_path(path);
3544 return ret;
3545 }
3546 ret = btrfs_previous_item(root, path, ino, key_type);
3547
3548 /* if ret == 0 there are items for this type,
3549 * create a range to tell us the last key of this type.
3550 * otherwise, there are no items in this directory after
3551 * *min_offset, and we create a range to indicate that.
3552 */
3553 if (ret == 0) {
3554 struct btrfs_key tmp;
3555 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3556 path->slots[0]);
3557 if (key_type == tmp.type)
3558 first_offset = max(min_offset, tmp.offset) + 1;
3559 }
3560 goto done;
3561 }
3562
3563 /* go backward to find any previous key */
3564 ret = btrfs_previous_item(root, path, ino, key_type);
3565 if (ret == 0) {
3566 struct btrfs_key tmp;
3567 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3568 if (key_type == tmp.type) {
3569 first_offset = tmp.offset;
3570 ret = overwrite_item(trans, log, dst_path,
3571 path->nodes[0], path->slots[0],
3572 &tmp);
3573 if (ret) {
3574 err = ret;
3575 goto done;
3576 }
3577 }
3578 }
3579 btrfs_release_path(path);
3580
3581 /* find the first key from this transaction again */
3582 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3583 if (WARN_ON(ret != 0))
3584 goto done;
3585
3586 /*
3587 * we have a block from this transaction, log every item in it
3588 * from our directory
3589 */
3590 while (1) {
3591 struct btrfs_key tmp;
3592 src = path->nodes[0];
3593 nritems = btrfs_header_nritems(src);
3594 for (i = path->slots[0]; i < nritems; i++) {
3595 struct btrfs_dir_item *di;
3596
3597 btrfs_item_key_to_cpu(src, &min_key, i);
3598
3599 if (min_key.objectid != ino || min_key.type != key_type)
3600 goto done;
3601 ret = overwrite_item(trans, log, dst_path, src, i,
3602 &min_key);
3603 if (ret) {
3604 err = ret;
3605 goto done;
3606 }
3607
3608 /*
3609 * We must make sure that when we log a directory entry,
3610 * the corresponding inode, after log replay, has a
3611 * matching link count. For example:
3612 *
3613 * touch foo
3614 * mkdir mydir
3615 * sync
3616 * ln foo mydir/bar
3617 * xfs_io -c "fsync" mydir
3618 * <crash>
3619 * <mount fs and log replay>
3620 *
3621 * Would result in a fsync log that when replayed, our
3622 * file inode would have a link count of 1, but we get
3623 * two directory entries pointing to the same inode.
3624 * After removing one of the names, it would not be
3625 * possible to remove the other name, which resulted
3626 * always in stale file handle errors, and would not
3627 * be possible to rmdir the parent directory, since
3628 * its i_size could never decrement to the value
3629 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3630 */
3631 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3632 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3633 if (ctx &&
3634 (btrfs_dir_transid(src, di) == trans->transid ||
3635 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3636 tmp.type != BTRFS_ROOT_ITEM_KEY)
3637 ctx->log_new_dentries = true;
3638 }
3639 path->slots[0] = nritems;
3640
3641 /*
3642 * look ahead to the next item and see if it is also
3643 * from this directory and from this transaction
3644 */
3645 ret = btrfs_next_leaf(root, path);
3646 if (ret) {
3647 if (ret == 1)
3648 last_offset = (u64)-1;
3649 else
3650 err = ret;
3651 goto done;
3652 }
3653 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3654 if (tmp.objectid != ino || tmp.type != key_type) {
3655 last_offset = (u64)-1;
3656 goto done;
3657 }
3658 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3659 ret = overwrite_item(trans, log, dst_path,
3660 path->nodes[0], path->slots[0],
3661 &tmp);
3662 if (ret)
3663 err = ret;
3664 else
3665 last_offset = tmp.offset;
3666 goto done;
3667 }
3668 }
3669 done:
3670 btrfs_release_path(path);
3671 btrfs_release_path(dst_path);
3672
3673 if (err == 0) {
3674 *last_offset_ret = last_offset;
3675 /*
3676 * insert the log range keys to indicate where the log
3677 * is valid
3678 */
3679 ret = insert_dir_log_key(trans, log, path, key_type,
3680 ino, first_offset, last_offset);
3681 if (ret)
3682 err = ret;
3683 }
3684 return err;
3685 }
3686
3687 /*
3688 * logging directories is very similar to logging inodes, We find all the items
3689 * from the current transaction and write them to the log.
3690 *
3691 * The recovery code scans the directory in the subvolume, and if it finds a
3692 * key in the range logged that is not present in the log tree, then it means
3693 * that dir entry was unlinked during the transaction.
3694 *
3695 * In order for that scan to work, we must include one key smaller than
3696 * the smallest logged by this transaction and one key larger than the largest
3697 * key logged by this transaction.
3698 */
3699 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3700 struct btrfs_root *root, struct btrfs_inode *inode,
3701 struct btrfs_path *path,
3702 struct btrfs_path *dst_path,
3703 struct btrfs_log_ctx *ctx)
3704 {
3705 u64 min_key;
3706 u64 max_key;
3707 int ret;
3708 int key_type = BTRFS_DIR_ITEM_KEY;
3709
3710 again:
3711 min_key = 0;
3712 max_key = 0;
3713 while (1) {
3714 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3715 ctx, min_key, &max_key);
3716 if (ret)
3717 return ret;
3718 if (max_key == (u64)-1)
3719 break;
3720 min_key = max_key + 1;
3721 }
3722
3723 if (key_type == BTRFS_DIR_ITEM_KEY) {
3724 key_type = BTRFS_DIR_INDEX_KEY;
3725 goto again;
3726 }
3727 return 0;
3728 }
3729
3730 /*
3731 * a helper function to drop items from the log before we relog an
3732 * inode. max_key_type indicates the highest item type to remove.
3733 * This cannot be run for file data extents because it does not
3734 * free the extents they point to.
3735 */
3736 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3737 struct btrfs_root *log,
3738 struct btrfs_path *path,
3739 u64 objectid, int max_key_type)
3740 {
3741 int ret;
3742 struct btrfs_key key;
3743 struct btrfs_key found_key;
3744 int start_slot;
3745
3746 key.objectid = objectid;
3747 key.type = max_key_type;
3748 key.offset = (u64)-1;
3749
3750 while (1) {
3751 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3752 BUG_ON(ret == 0); /* Logic error */
3753 if (ret < 0)
3754 break;
3755
3756 if (path->slots[0] == 0)
3757 break;
3758
3759 path->slots[0]--;
3760 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3761 path->slots[0]);
3762
3763 if (found_key.objectid != objectid)
3764 break;
3765
3766 found_key.offset = 0;
3767 found_key.type = 0;
3768 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3769 &start_slot);
3770 if (ret < 0)
3771 break;
3772
3773 ret = btrfs_del_items(trans, log, path, start_slot,
3774 path->slots[0] - start_slot + 1);
3775 /*
3776 * If start slot isn't 0 then we don't need to re-search, we've
3777 * found the last guy with the objectid in this tree.
3778 */
3779 if (ret || start_slot != 0)
3780 break;
3781 btrfs_release_path(path);
3782 }
3783 btrfs_release_path(path);
3784 if (ret > 0)
3785 ret = 0;
3786 return ret;
3787 }
3788
3789 static void fill_inode_item(struct btrfs_trans_handle *trans,
3790 struct extent_buffer *leaf,
3791 struct btrfs_inode_item *item,
3792 struct inode *inode, int log_inode_only,
3793 u64 logged_isize)
3794 {
3795 struct btrfs_map_token token;
3796
3797 btrfs_init_map_token(&token);
3798
3799 if (log_inode_only) {
3800 /* set the generation to zero so the recover code
3801 * can tell the difference between an logging
3802 * just to say 'this inode exists' and a logging
3803 * to say 'update this inode with these values'
3804 */
3805 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3806 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3807 } else {
3808 btrfs_set_token_inode_generation(leaf, item,
3809 BTRFS_I(inode)->generation,
3810 &token);
3811 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3812 }
3813
3814 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3815 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3816 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3817 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3818
3819 btrfs_set_token_timespec_sec(leaf, &item->atime,
3820 inode->i_atime.tv_sec, &token);
3821 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3822 inode->i_atime.tv_nsec, &token);
3823
3824 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3825 inode->i_mtime.tv_sec, &token);
3826 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3827 inode->i_mtime.tv_nsec, &token);
3828
3829 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3830 inode->i_ctime.tv_sec, &token);
3831 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3832 inode->i_ctime.tv_nsec, &token);
3833
3834 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3835 &token);
3836
3837 btrfs_set_token_inode_sequence(leaf, item,
3838 inode_peek_iversion(inode), &token);
3839 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3840 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3841 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3842 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3843 }
3844
3845 static int log_inode_item(struct btrfs_trans_handle *trans,
3846 struct btrfs_root *log, struct btrfs_path *path,
3847 struct btrfs_inode *inode)
3848 {
3849 struct btrfs_inode_item *inode_item;
3850 int ret;
3851
3852 ret = btrfs_insert_empty_item(trans, log, path,
3853 &inode->location, sizeof(*inode_item));
3854 if (ret && ret != -EEXIST)
3855 return ret;
3856 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3857 struct btrfs_inode_item);
3858 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
3859 0, 0);
3860 btrfs_release_path(path);
3861 return 0;
3862 }
3863
3864 static noinline int copy_items(struct btrfs_trans_handle *trans,
3865 struct btrfs_inode *inode,
3866 struct btrfs_path *dst_path,
3867 struct btrfs_path *src_path, u64 *last_extent,
3868 int start_slot, int nr, int inode_only,
3869 u64 logged_isize)
3870 {
3871 struct btrfs_fs_info *fs_info = trans->fs_info;
3872 unsigned long src_offset;
3873 unsigned long dst_offset;
3874 struct btrfs_root *log = inode->root->log_root;
3875 struct btrfs_file_extent_item *extent;
3876 struct btrfs_inode_item *inode_item;
3877 struct extent_buffer *src = src_path->nodes[0];
3878 struct btrfs_key first_key, last_key, key;
3879 int ret;
3880 struct btrfs_key *ins_keys;
3881 u32 *ins_sizes;
3882 char *ins_data;
3883 int i;
3884 struct list_head ordered_sums;
3885 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
3886 bool has_extents = false;
3887 bool need_find_last_extent = true;
3888 bool done = false;
3889
3890 INIT_LIST_HEAD(&ordered_sums);
3891
3892 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3893 nr * sizeof(u32), GFP_NOFS);
3894 if (!ins_data)
3895 return -ENOMEM;
3896
3897 first_key.objectid = (u64)-1;
3898
3899 ins_sizes = (u32 *)ins_data;
3900 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3901
3902 for (i = 0; i < nr; i++) {
3903 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3904 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3905 }
3906 ret = btrfs_insert_empty_items(trans, log, dst_path,
3907 ins_keys, ins_sizes, nr);
3908 if (ret) {
3909 kfree(ins_data);
3910 return ret;
3911 }
3912
3913 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3914 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3915 dst_path->slots[0]);
3916
3917 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3918
3919 if (i == nr - 1)
3920 last_key = ins_keys[i];
3921
3922 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3923 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3924 dst_path->slots[0],
3925 struct btrfs_inode_item);
3926 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3927 &inode->vfs_inode,
3928 inode_only == LOG_INODE_EXISTS,
3929 logged_isize);
3930 } else {
3931 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3932 src_offset, ins_sizes[i]);
3933 }
3934
3935 /*
3936 * We set need_find_last_extent here in case we know we were
3937 * processing other items and then walk into the first extent in
3938 * the inode. If we don't hit an extent then nothing changes,
3939 * we'll do the last search the next time around.
3940 */
3941 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3942 has_extents = true;
3943 if (first_key.objectid == (u64)-1)
3944 first_key = ins_keys[i];
3945 } else {
3946 need_find_last_extent = false;
3947 }
3948
3949 /* take a reference on file data extents so that truncates
3950 * or deletes of this inode don't have to relog the inode
3951 * again
3952 */
3953 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3954 !skip_csum) {
3955 int found_type;
3956 extent = btrfs_item_ptr(src, start_slot + i,
3957 struct btrfs_file_extent_item);
3958
3959 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3960 continue;
3961
3962 found_type = btrfs_file_extent_type(src, extent);
3963 if (found_type == BTRFS_FILE_EXTENT_REG) {
3964 u64 ds, dl, cs, cl;
3965 ds = btrfs_file_extent_disk_bytenr(src,
3966 extent);
3967 /* ds == 0 is a hole */
3968 if (ds == 0)
3969 continue;
3970
3971 dl = btrfs_file_extent_disk_num_bytes(src,
3972 extent);
3973 cs = btrfs_file_extent_offset(src, extent);
3974 cl = btrfs_file_extent_num_bytes(src,
3975 extent);
3976 if (btrfs_file_extent_compression(src,
3977 extent)) {
3978 cs = 0;
3979 cl = dl;
3980 }
3981
3982 ret = btrfs_lookup_csums_range(
3983 fs_info->csum_root,
3984 ds + cs, ds + cs + cl - 1,
3985 &ordered_sums, 0);
3986 if (ret) {
3987 btrfs_release_path(dst_path);
3988 kfree(ins_data);
3989 return ret;
3990 }
3991 }
3992 }
3993 }
3994
3995 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3996 btrfs_release_path(dst_path);
3997 kfree(ins_data);
3998
3999 /*
4000 * we have to do this after the loop above to avoid changing the
4001 * log tree while trying to change the log tree.
4002 */
4003 ret = 0;
4004 while (!list_empty(&ordered_sums)) {
4005 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4006 struct btrfs_ordered_sum,
4007 list);
4008 if (!ret)
4009 ret = btrfs_csum_file_blocks(trans, log, sums);
4010 list_del(&sums->list);
4011 kfree(sums);
4012 }
4013
4014 if (!has_extents)
4015 return ret;
4016
4017 if (need_find_last_extent && *last_extent == first_key.offset) {
4018 /*
4019 * We don't have any leafs between our current one and the one
4020 * we processed before that can have file extent items for our
4021 * inode (and have a generation number smaller than our current
4022 * transaction id).
4023 */
4024 need_find_last_extent = false;
4025 }
4026
4027 /*
4028 * Because we use btrfs_search_forward we could skip leaves that were
4029 * not modified and then assume *last_extent is valid when it really
4030 * isn't. So back up to the previous leaf and read the end of the last
4031 * extent before we go and fill in holes.
4032 */
4033 if (need_find_last_extent) {
4034 u64 len;
4035
4036 ret = btrfs_prev_leaf(inode->root, src_path);
4037 if (ret < 0)
4038 return ret;
4039 if (ret)
4040 goto fill_holes;
4041 if (src_path->slots[0])
4042 src_path->slots[0]--;
4043 src = src_path->nodes[0];
4044 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
4045 if (key.objectid != btrfs_ino(inode) ||
4046 key.type != BTRFS_EXTENT_DATA_KEY)
4047 goto fill_holes;
4048 extent = btrfs_item_ptr(src, src_path->slots[0],
4049 struct btrfs_file_extent_item);
4050 if (btrfs_file_extent_type(src, extent) ==
4051 BTRFS_FILE_EXTENT_INLINE) {
4052 len = btrfs_file_extent_ram_bytes(src, extent);
4053 *last_extent = ALIGN(key.offset + len,
4054 fs_info->sectorsize);
4055 } else {
4056 len = btrfs_file_extent_num_bytes(src, extent);
4057 *last_extent = key.offset + len;
4058 }
4059 }
4060 fill_holes:
4061 /* So we did prev_leaf, now we need to move to the next leaf, but a few
4062 * things could have happened
4063 *
4064 * 1) A merge could have happened, so we could currently be on a leaf
4065 * that holds what we were copying in the first place.
4066 * 2) A split could have happened, and now not all of the items we want
4067 * are on the same leaf.
4068 *
4069 * So we need to adjust how we search for holes, we need to drop the
4070 * path and re-search for the first extent key we found, and then walk
4071 * forward until we hit the last one we copied.
4072 */
4073 if (need_find_last_extent) {
4074 /* btrfs_prev_leaf could return 1 without releasing the path */
4075 btrfs_release_path(src_path);
4076 ret = btrfs_search_slot(NULL, inode->root, &first_key,
4077 src_path, 0, 0);
4078 if (ret < 0)
4079 return ret;
4080 ASSERT(ret == 0);
4081 src = src_path->nodes[0];
4082 i = src_path->slots[0];
4083 } else {
4084 i = start_slot;
4085 }
4086
4087 /*
4088 * Ok so here we need to go through and fill in any holes we may have
4089 * to make sure that holes are punched for those areas in case they had
4090 * extents previously.
4091 */
4092 while (!done) {
4093 u64 offset, len;
4094 u64 extent_end;
4095
4096 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
4097 ret = btrfs_next_leaf(inode->root, src_path);
4098 if (ret < 0)
4099 return ret;
4100 ASSERT(ret == 0);
4101 src = src_path->nodes[0];
4102 i = 0;
4103 need_find_last_extent = true;
4104 }
4105
4106 btrfs_item_key_to_cpu(src, &key, i);
4107 if (!btrfs_comp_cpu_keys(&key, &last_key))
4108 done = true;
4109 if (key.objectid != btrfs_ino(inode) ||
4110 key.type != BTRFS_EXTENT_DATA_KEY) {
4111 i++;
4112 continue;
4113 }
4114 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
4115 if (btrfs_file_extent_type(src, extent) ==
4116 BTRFS_FILE_EXTENT_INLINE) {
4117 len = btrfs_file_extent_ram_bytes(src, extent);
4118 extent_end = ALIGN(key.offset + len,
4119 fs_info->sectorsize);
4120 } else {
4121 len = btrfs_file_extent_num_bytes(src, extent);
4122 extent_end = key.offset + len;
4123 }
4124 i++;
4125
4126 if (*last_extent == key.offset) {
4127 *last_extent = extent_end;
4128 continue;
4129 }
4130 offset = *last_extent;
4131 len = key.offset - *last_extent;
4132 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
4133 offset, 0, 0, len, 0, len, 0, 0, 0);
4134 if (ret)
4135 break;
4136 *last_extent = extent_end;
4137 }
4138
4139 /*
4140 * Check if there is a hole between the last extent found in our leaf
4141 * and the first extent in the next leaf. If there is one, we need to
4142 * log an explicit hole so that at replay time we can punch the hole.
4143 */
4144 if (ret == 0 &&
4145 key.objectid == btrfs_ino(inode) &&
4146 key.type == BTRFS_EXTENT_DATA_KEY &&
4147 i == btrfs_header_nritems(src_path->nodes[0])) {
4148 ret = btrfs_next_leaf(inode->root, src_path);
4149 need_find_last_extent = true;
4150 if (ret > 0) {
4151 ret = 0;
4152 } else if (ret == 0) {
4153 btrfs_item_key_to_cpu(src_path->nodes[0], &key,
4154 src_path->slots[0]);
4155 if (key.objectid == btrfs_ino(inode) &&
4156 key.type == BTRFS_EXTENT_DATA_KEY &&
4157 *last_extent < key.offset) {
4158 const u64 len = key.offset - *last_extent;
4159
4160 ret = btrfs_insert_file_extent(trans, log,
4161 btrfs_ino(inode),
4162 *last_extent, 0,
4163 0, len, 0, len,
4164 0, 0, 0);
4165 }
4166 }
4167 }
4168 /*
4169 * Need to let the callers know we dropped the path so they should
4170 * re-search.
4171 */
4172 if (!ret && need_find_last_extent)
4173 ret = 1;
4174 return ret;
4175 }
4176
4177 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
4178 {
4179 struct extent_map *em1, *em2;
4180
4181 em1 = list_entry(a, struct extent_map, list);
4182 em2 = list_entry(b, struct extent_map, list);
4183
4184 if (em1->start < em2->start)
4185 return -1;
4186 else if (em1->start > em2->start)
4187 return 1;
4188 return 0;
4189 }
4190
4191 static int log_extent_csums(struct btrfs_trans_handle *trans,
4192 struct btrfs_inode *inode,
4193 struct btrfs_root *log_root,
4194 const struct extent_map *em)
4195 {
4196 u64 csum_offset;
4197 u64 csum_len;
4198 LIST_HEAD(ordered_sums);
4199 int ret = 0;
4200
4201 if (inode->flags & BTRFS_INODE_NODATASUM ||
4202 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4203 em->block_start == EXTENT_MAP_HOLE)
4204 return 0;
4205
4206 /* If we're compressed we have to save the entire range of csums. */
4207 if (em->compress_type) {
4208 csum_offset = 0;
4209 csum_len = max(em->block_len, em->orig_block_len);
4210 } else {
4211 csum_offset = em->mod_start - em->start;
4212 csum_len = em->mod_len;
4213 }
4214
4215 /* block start is already adjusted for the file extent offset. */
4216 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4217 em->block_start + csum_offset,
4218 em->block_start + csum_offset +
4219 csum_len - 1, &ordered_sums, 0);
4220 if (ret)
4221 return ret;
4222
4223 while (!list_empty(&ordered_sums)) {
4224 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4225 struct btrfs_ordered_sum,
4226 list);
4227 if (!ret)
4228 ret = btrfs_csum_file_blocks(trans, log_root, sums);
4229 list_del(&sums->list);
4230 kfree(sums);
4231 }
4232
4233 return ret;
4234 }
4235
4236 static int log_one_extent(struct btrfs_trans_handle *trans,
4237 struct btrfs_inode *inode, struct btrfs_root *root,
4238 const struct extent_map *em,
4239 struct btrfs_path *path,
4240 struct btrfs_log_ctx *ctx)
4241 {
4242 struct btrfs_root *log = root->log_root;
4243 struct btrfs_file_extent_item *fi;
4244 struct extent_buffer *leaf;
4245 struct btrfs_map_token token;
4246 struct btrfs_key key;
4247 u64 extent_offset = em->start - em->orig_start;
4248 u64 block_len;
4249 int ret;
4250 int extent_inserted = 0;
4251
4252 ret = log_extent_csums(trans, inode, log, em);
4253 if (ret)
4254 return ret;
4255
4256 btrfs_init_map_token(&token);
4257
4258 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
4259 em->start + em->len, NULL, 0, 1,
4260 sizeof(*fi), &extent_inserted);
4261 if (ret)
4262 return ret;
4263
4264 if (!extent_inserted) {
4265 key.objectid = btrfs_ino(inode);
4266 key.type = BTRFS_EXTENT_DATA_KEY;
4267 key.offset = em->start;
4268
4269 ret = btrfs_insert_empty_item(trans, log, path, &key,
4270 sizeof(*fi));
4271 if (ret)
4272 return ret;
4273 }
4274 leaf = path->nodes[0];
4275 fi = btrfs_item_ptr(leaf, path->slots[0],
4276 struct btrfs_file_extent_item);
4277
4278 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4279 &token);
4280 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4281 btrfs_set_token_file_extent_type(leaf, fi,
4282 BTRFS_FILE_EXTENT_PREALLOC,
4283 &token);
4284 else
4285 btrfs_set_token_file_extent_type(leaf, fi,
4286 BTRFS_FILE_EXTENT_REG,
4287 &token);
4288
4289 block_len = max(em->block_len, em->orig_block_len);
4290 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4291 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4292 em->block_start,
4293 &token);
4294 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4295 &token);
4296 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4297 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4298 em->block_start -
4299 extent_offset, &token);
4300 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4301 &token);
4302 } else {
4303 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4304 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4305 &token);
4306 }
4307
4308 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4309 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4310 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4311 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4312 &token);
4313 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4314 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4315 btrfs_mark_buffer_dirty(leaf);
4316
4317 btrfs_release_path(path);
4318
4319 return ret;
4320 }
4321
4322 /*
4323 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4324 * lose them after doing a fast fsync and replaying the log. We scan the
4325 * subvolume's root instead of iterating the inode's extent map tree because
4326 * otherwise we can log incorrect extent items based on extent map conversion.
4327 * That can happen due to the fact that extent maps are merged when they
4328 * are not in the extent map tree's list of modified extents.
4329 */
4330 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4331 struct btrfs_inode *inode,
4332 struct btrfs_path *path)
4333 {
4334 struct btrfs_root *root = inode->root;
4335 struct btrfs_key key;
4336 const u64 i_size = i_size_read(&inode->vfs_inode);
4337 const u64 ino = btrfs_ino(inode);
4338 struct btrfs_path *dst_path = NULL;
4339 u64 last_extent = (u64)-1;
4340 int ins_nr = 0;
4341 int start_slot;
4342 int ret;
4343
4344 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4345 return 0;
4346
4347 key.objectid = ino;
4348 key.type = BTRFS_EXTENT_DATA_KEY;
4349 key.offset = i_size;
4350 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4351 if (ret < 0)
4352 goto out;
4353
4354 while (true) {
4355 struct extent_buffer *leaf = path->nodes[0];
4356 int slot = path->slots[0];
4357
4358 if (slot >= btrfs_header_nritems(leaf)) {
4359 if (ins_nr > 0) {
4360 ret = copy_items(trans, inode, dst_path, path,
4361 &last_extent, start_slot,
4362 ins_nr, 1, 0);
4363 if (ret < 0)
4364 goto out;
4365 ins_nr = 0;
4366 }
4367 ret = btrfs_next_leaf(root, path);
4368 if (ret < 0)
4369 goto out;
4370 if (ret > 0) {
4371 ret = 0;
4372 break;
4373 }
4374 continue;
4375 }
4376
4377 btrfs_item_key_to_cpu(leaf, &key, slot);
4378 if (key.objectid > ino)
4379 break;
4380 if (WARN_ON_ONCE(key.objectid < ino) ||
4381 key.type < BTRFS_EXTENT_DATA_KEY ||
4382 key.offset < i_size) {
4383 path->slots[0]++;
4384 continue;
4385 }
4386 if (last_extent == (u64)-1) {
4387 last_extent = key.offset;
4388 /*
4389 * Avoid logging extent items logged in past fsync calls
4390 * and leading to duplicate keys in the log tree.
4391 */
4392 do {
4393 ret = btrfs_truncate_inode_items(trans,
4394 root->log_root,
4395 &inode->vfs_inode,
4396 i_size,
4397 BTRFS_EXTENT_DATA_KEY);
4398 } while (ret == -EAGAIN);
4399 if (ret)
4400 goto out;
4401 }
4402 if (ins_nr == 0)
4403 start_slot = slot;
4404 ins_nr++;
4405 path->slots[0]++;
4406 if (!dst_path) {
4407 dst_path = btrfs_alloc_path();
4408 if (!dst_path) {
4409 ret = -ENOMEM;
4410 goto out;
4411 }
4412 }
4413 }
4414 if (ins_nr > 0) {
4415 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4416 start_slot, ins_nr, 1, 0);
4417 if (ret > 0)
4418 ret = 0;
4419 }
4420 out:
4421 btrfs_release_path(path);
4422 btrfs_free_path(dst_path);
4423 return ret;
4424 }
4425
4426 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4427 struct btrfs_root *root,
4428 struct btrfs_inode *inode,
4429 struct btrfs_path *path,
4430 struct btrfs_log_ctx *ctx,
4431 const u64 start,
4432 const u64 end)
4433 {
4434 struct extent_map *em, *n;
4435 struct list_head extents;
4436 struct extent_map_tree *tree = &inode->extent_tree;
4437 u64 test_gen;
4438 int ret = 0;
4439 int num = 0;
4440
4441 INIT_LIST_HEAD(&extents);
4442
4443 write_lock(&tree->lock);
4444 test_gen = root->fs_info->last_trans_committed;
4445
4446 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4447 /*
4448 * Skip extents outside our logging range. It's important to do
4449 * it for correctness because if we don't ignore them, we may
4450 * log them before their ordered extent completes, and therefore
4451 * we could log them without logging their respective checksums
4452 * (the checksum items are added to the csum tree at the very
4453 * end of btrfs_finish_ordered_io()). Also leave such extents
4454 * outside of our range in the list, since we may have another
4455 * ranged fsync in the near future that needs them. If an extent
4456 * outside our range corresponds to a hole, log it to avoid
4457 * leaving gaps between extents (fsck will complain when we are
4458 * not using the NO_HOLES feature).
4459 */
4460 if ((em->start > end || em->start + em->len <= start) &&
4461 em->block_start != EXTENT_MAP_HOLE)
4462 continue;
4463
4464 list_del_init(&em->list);
4465 /*
4466 * Just an arbitrary number, this can be really CPU intensive
4467 * once we start getting a lot of extents, and really once we
4468 * have a bunch of extents we just want to commit since it will
4469 * be faster.
4470 */
4471 if (++num > 32768) {
4472 list_del_init(&tree->modified_extents);
4473 ret = -EFBIG;
4474 goto process;
4475 }
4476
4477 if (em->generation <= test_gen)
4478 continue;
4479
4480 /* We log prealloc extents beyond eof later. */
4481 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4482 em->start >= i_size_read(&inode->vfs_inode))
4483 continue;
4484
4485 /* Need a ref to keep it from getting evicted from cache */
4486 refcount_inc(&em->refs);
4487 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4488 list_add_tail(&em->list, &extents);
4489 num++;
4490 }
4491
4492 list_sort(NULL, &extents, extent_cmp);
4493 process:
4494 while (!list_empty(&extents)) {
4495 em = list_entry(extents.next, struct extent_map, list);
4496
4497 list_del_init(&em->list);
4498
4499 /*
4500 * If we had an error we just need to delete everybody from our
4501 * private list.
4502 */
4503 if (ret) {
4504 clear_em_logging(tree, em);
4505 free_extent_map(em);
4506 continue;
4507 }
4508
4509 write_unlock(&tree->lock);
4510
4511 ret = log_one_extent(trans, inode, root, em, path, ctx);
4512 write_lock(&tree->lock);
4513 clear_em_logging(tree, em);
4514 free_extent_map(em);
4515 }
4516 WARN_ON(!list_empty(&extents));
4517 write_unlock(&tree->lock);
4518
4519 btrfs_release_path(path);
4520 if (!ret)
4521 ret = btrfs_log_prealloc_extents(trans, inode, path);
4522
4523 return ret;
4524 }
4525
4526 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4527 struct btrfs_path *path, u64 *size_ret)
4528 {
4529 struct btrfs_key key;
4530 int ret;
4531
4532 key.objectid = btrfs_ino(inode);
4533 key.type = BTRFS_INODE_ITEM_KEY;
4534 key.offset = 0;
4535
4536 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4537 if (ret < 0) {
4538 return ret;
4539 } else if (ret > 0) {
4540 *size_ret = 0;
4541 } else {
4542 struct btrfs_inode_item *item;
4543
4544 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4545 struct btrfs_inode_item);
4546 *size_ret = btrfs_inode_size(path->nodes[0], item);
4547 }
4548
4549 btrfs_release_path(path);
4550 return 0;
4551 }
4552
4553 /*
4554 * At the moment we always log all xattrs. This is to figure out at log replay
4555 * time which xattrs must have their deletion replayed. If a xattr is missing
4556 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4557 * because if a xattr is deleted, the inode is fsynced and a power failure
4558 * happens, causing the log to be replayed the next time the fs is mounted,
4559 * we want the xattr to not exist anymore (same behaviour as other filesystems
4560 * with a journal, ext3/4, xfs, f2fs, etc).
4561 */
4562 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4563 struct btrfs_root *root,
4564 struct btrfs_inode *inode,
4565 struct btrfs_path *path,
4566 struct btrfs_path *dst_path)
4567 {
4568 int ret;
4569 struct btrfs_key key;
4570 const u64 ino = btrfs_ino(inode);
4571 int ins_nr = 0;
4572 int start_slot = 0;
4573
4574 key.objectid = ino;
4575 key.type = BTRFS_XATTR_ITEM_KEY;
4576 key.offset = 0;
4577
4578 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4579 if (ret < 0)
4580 return ret;
4581
4582 while (true) {
4583 int slot = path->slots[0];
4584 struct extent_buffer *leaf = path->nodes[0];
4585 int nritems = btrfs_header_nritems(leaf);
4586
4587 if (slot >= nritems) {
4588 if (ins_nr > 0) {
4589 u64 last_extent = 0;
4590
4591 ret = copy_items(trans, inode, dst_path, path,
4592 &last_extent, start_slot,
4593 ins_nr, 1, 0);
4594 /* can't be 1, extent items aren't processed */
4595 ASSERT(ret <= 0);
4596 if (ret < 0)
4597 return ret;
4598 ins_nr = 0;
4599 }
4600 ret = btrfs_next_leaf(root, path);
4601 if (ret < 0)
4602 return ret;
4603 else if (ret > 0)
4604 break;
4605 continue;
4606 }
4607
4608 btrfs_item_key_to_cpu(leaf, &key, slot);
4609 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4610 break;
4611
4612 if (ins_nr == 0)
4613 start_slot = slot;
4614 ins_nr++;
4615 path->slots[0]++;
4616 cond_resched();
4617 }
4618 if (ins_nr > 0) {
4619 u64 last_extent = 0;
4620
4621 ret = copy_items(trans, inode, dst_path, path,
4622 &last_extent, start_slot,
4623 ins_nr, 1, 0);
4624 /* can't be 1, extent items aren't processed */
4625 ASSERT(ret <= 0);
4626 if (ret < 0)
4627 return ret;
4628 }
4629
4630 return 0;
4631 }
4632
4633 /*
4634 * If the no holes feature is enabled we need to make sure any hole between the
4635 * last extent and the i_size of our inode is explicitly marked in the log. This
4636 * is to make sure that doing something like:
4637 *
4638 * 1) create file with 128Kb of data
4639 * 2) truncate file to 64Kb
4640 * 3) truncate file to 256Kb
4641 * 4) fsync file
4642 * 5) <crash/power failure>
4643 * 6) mount fs and trigger log replay
4644 *
4645 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4646 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4647 * file correspond to a hole. The presence of explicit holes in a log tree is
4648 * what guarantees that log replay will remove/adjust file extent items in the
4649 * fs/subvol tree.
4650 *
4651 * Here we do not need to care about holes between extents, that is already done
4652 * by copy_items(). We also only need to do this in the full sync path, where we
4653 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4654 * lookup the list of modified extent maps and if any represents a hole, we
4655 * insert a corresponding extent representing a hole in the log tree.
4656 */
4657 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4658 struct btrfs_root *root,
4659 struct btrfs_inode *inode,
4660 struct btrfs_path *path)
4661 {
4662 struct btrfs_fs_info *fs_info = root->fs_info;
4663 int ret;
4664 struct btrfs_key key;
4665 u64 hole_start;
4666 u64 hole_size;
4667 struct extent_buffer *leaf;
4668 struct btrfs_root *log = root->log_root;
4669 const u64 ino = btrfs_ino(inode);
4670 const u64 i_size = i_size_read(&inode->vfs_inode);
4671
4672 if (!btrfs_fs_incompat(fs_info, NO_HOLES))
4673 return 0;
4674
4675 key.objectid = ino;
4676 key.type = BTRFS_EXTENT_DATA_KEY;
4677 key.offset = (u64)-1;
4678
4679 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4680 ASSERT(ret != 0);
4681 if (ret < 0)
4682 return ret;
4683
4684 ASSERT(path->slots[0] > 0);
4685 path->slots[0]--;
4686 leaf = path->nodes[0];
4687 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4688
4689 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4690 /* inode does not have any extents */
4691 hole_start = 0;
4692 hole_size = i_size;
4693 } else {
4694 struct btrfs_file_extent_item *extent;
4695 u64 len;
4696
4697 /*
4698 * If there's an extent beyond i_size, an explicit hole was
4699 * already inserted by copy_items().
4700 */
4701 if (key.offset >= i_size)
4702 return 0;
4703
4704 extent = btrfs_item_ptr(leaf, path->slots[0],
4705 struct btrfs_file_extent_item);
4706
4707 if (btrfs_file_extent_type(leaf, extent) ==
4708 BTRFS_FILE_EXTENT_INLINE) {
4709 len = btrfs_file_extent_ram_bytes(leaf, extent);
4710 ASSERT(len == i_size ||
4711 (len == fs_info->sectorsize &&
4712 btrfs_file_extent_compression(leaf, extent) !=
4713 BTRFS_COMPRESS_NONE) ||
4714 (len < i_size && i_size < fs_info->sectorsize));
4715 return 0;
4716 }
4717
4718 len = btrfs_file_extent_num_bytes(leaf, extent);
4719 /* Last extent goes beyond i_size, no need to log a hole. */
4720 if (key.offset + len > i_size)
4721 return 0;
4722 hole_start = key.offset + len;
4723 hole_size = i_size - hole_start;
4724 }
4725 btrfs_release_path(path);
4726
4727 /* Last extent ends at i_size. */
4728 if (hole_size == 0)
4729 return 0;
4730
4731 hole_size = ALIGN(hole_size, fs_info->sectorsize);
4732 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4733 hole_size, 0, hole_size, 0, 0, 0);
4734 return ret;
4735 }
4736
4737 /*
4738 * When we are logging a new inode X, check if it doesn't have a reference that
4739 * matches the reference from some other inode Y created in a past transaction
4740 * and that was renamed in the current transaction. If we don't do this, then at
4741 * log replay time we can lose inode Y (and all its files if it's a directory):
4742 *
4743 * mkdir /mnt/x
4744 * echo "hello world" > /mnt/x/foobar
4745 * sync
4746 * mv /mnt/x /mnt/y
4747 * mkdir /mnt/x # or touch /mnt/x
4748 * xfs_io -c fsync /mnt/x
4749 * <power fail>
4750 * mount fs, trigger log replay
4751 *
4752 * After the log replay procedure, we would lose the first directory and all its
4753 * files (file foobar).
4754 * For the case where inode Y is not a directory we simply end up losing it:
4755 *
4756 * echo "123" > /mnt/foo
4757 * sync
4758 * mv /mnt/foo /mnt/bar
4759 * echo "abc" > /mnt/foo
4760 * xfs_io -c fsync /mnt/foo
4761 * <power fail>
4762 *
4763 * We also need this for cases where a snapshot entry is replaced by some other
4764 * entry (file or directory) otherwise we end up with an unreplayable log due to
4765 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4766 * if it were a regular entry:
4767 *
4768 * mkdir /mnt/x
4769 * btrfs subvolume snapshot /mnt /mnt/x/snap
4770 * btrfs subvolume delete /mnt/x/snap
4771 * rmdir /mnt/x
4772 * mkdir /mnt/x
4773 * fsync /mnt/x or fsync some new file inside it
4774 * <power fail>
4775 *
4776 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4777 * the same transaction.
4778 */
4779 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4780 const int slot,
4781 const struct btrfs_key *key,
4782 struct btrfs_inode *inode,
4783 u64 *other_ino, u64 *other_parent)
4784 {
4785 int ret;
4786 struct btrfs_path *search_path;
4787 char *name = NULL;
4788 u32 name_len = 0;
4789 u32 item_size = btrfs_item_size_nr(eb, slot);
4790 u32 cur_offset = 0;
4791 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4792
4793 search_path = btrfs_alloc_path();
4794 if (!search_path)
4795 return -ENOMEM;
4796 search_path->search_commit_root = 1;
4797 search_path->skip_locking = 1;
4798
4799 while (cur_offset < item_size) {
4800 u64 parent;
4801 u32 this_name_len;
4802 u32 this_len;
4803 unsigned long name_ptr;
4804 struct btrfs_dir_item *di;
4805
4806 if (key->type == BTRFS_INODE_REF_KEY) {
4807 struct btrfs_inode_ref *iref;
4808
4809 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4810 parent = key->offset;
4811 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4812 name_ptr = (unsigned long)(iref + 1);
4813 this_len = sizeof(*iref) + this_name_len;
4814 } else {
4815 struct btrfs_inode_extref *extref;
4816
4817 extref = (struct btrfs_inode_extref *)(ptr +
4818 cur_offset);
4819 parent = btrfs_inode_extref_parent(eb, extref);
4820 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4821 name_ptr = (unsigned long)&extref->name;
4822 this_len = sizeof(*extref) + this_name_len;
4823 }
4824
4825 if (this_name_len > name_len) {
4826 char *new_name;
4827
4828 new_name = krealloc(name, this_name_len, GFP_NOFS);
4829 if (!new_name) {
4830 ret = -ENOMEM;
4831 goto out;
4832 }
4833 name_len = this_name_len;
4834 name = new_name;
4835 }
4836
4837 read_extent_buffer(eb, name, name_ptr, this_name_len);
4838 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4839 parent, name, this_name_len, 0);
4840 if (di && !IS_ERR(di)) {
4841 struct btrfs_key di_key;
4842
4843 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4844 di, &di_key);
4845 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4846 if (di_key.objectid != key->objectid) {
4847 ret = 1;
4848 *other_ino = di_key.objectid;
4849 *other_parent = parent;
4850 } else {
4851 ret = 0;
4852 }
4853 } else {
4854 ret = -EAGAIN;
4855 }
4856 goto out;
4857 } else if (IS_ERR(di)) {
4858 ret = PTR_ERR(di);
4859 goto out;
4860 }
4861 btrfs_release_path(search_path);
4862
4863 cur_offset += this_len;
4864 }
4865 ret = 0;
4866 out:
4867 btrfs_free_path(search_path);
4868 kfree(name);
4869 return ret;
4870 }
4871
4872 struct btrfs_ino_list {
4873 u64 ino;
4874 u64 parent;
4875 struct list_head list;
4876 };
4877
4878 static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
4879 struct btrfs_root *root,
4880 struct btrfs_path *path,
4881 struct btrfs_log_ctx *ctx,
4882 u64 ino, u64 parent)
4883 {
4884 struct btrfs_ino_list *ino_elem;
4885 LIST_HEAD(inode_list);
4886 int ret = 0;
4887
4888 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
4889 if (!ino_elem)
4890 return -ENOMEM;
4891 ino_elem->ino = ino;
4892 ino_elem->parent = parent;
4893 list_add_tail(&ino_elem->list, &inode_list);
4894
4895 while (!list_empty(&inode_list)) {
4896 struct btrfs_fs_info *fs_info = root->fs_info;
4897 struct btrfs_key key;
4898 struct inode *inode;
4899
4900 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
4901 list);
4902 ino = ino_elem->ino;
4903 parent = ino_elem->parent;
4904 list_del(&ino_elem->list);
4905 kfree(ino_elem);
4906 if (ret)
4907 continue;
4908
4909 btrfs_release_path(path);
4910
4911 key.objectid = ino;
4912 key.type = BTRFS_INODE_ITEM_KEY;
4913 key.offset = 0;
4914 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4915 /*
4916 * If the other inode that had a conflicting dir entry was
4917 * deleted in the current transaction, we need to log its parent
4918 * directory.
4919 */
4920 if (IS_ERR(inode)) {
4921 ret = PTR_ERR(inode);
4922 if (ret == -ENOENT) {
4923 key.objectid = parent;
4924 inode = btrfs_iget(fs_info->sb, &key, root,
4925 NULL);
4926 if (IS_ERR(inode)) {
4927 ret = PTR_ERR(inode);
4928 } else {
4929 ret = btrfs_log_inode(trans, root,
4930 BTRFS_I(inode),
4931 LOG_OTHER_INODE_ALL,
4932 0, LLONG_MAX, ctx);
4933 iput(inode);
4934 }
4935 }
4936 continue;
4937 }
4938 /*
4939 * We are safe logging the other inode without acquiring its
4940 * lock as long as we log with the LOG_INODE_EXISTS mode. We
4941 * are safe against concurrent renames of the other inode as
4942 * well because during a rename we pin the log and update the
4943 * log with the new name before we unpin it.
4944 */
4945 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
4946 LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
4947 if (ret) {
4948 iput(inode);
4949 continue;
4950 }
4951
4952 key.objectid = ino;
4953 key.type = BTRFS_INODE_REF_KEY;
4954 key.offset = 0;
4955 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4956 if (ret < 0) {
4957 iput(inode);
4958 continue;
4959 }
4960
4961 while (true) {
4962 struct extent_buffer *leaf = path->nodes[0];
4963 int slot = path->slots[0];
4964 u64 other_ino = 0;
4965 u64 other_parent = 0;
4966
4967 if (slot >= btrfs_header_nritems(leaf)) {
4968 ret = btrfs_next_leaf(root, path);
4969 if (ret < 0) {
4970 break;
4971 } else if (ret > 0) {
4972 ret = 0;
4973 break;
4974 }
4975 continue;
4976 }
4977
4978 btrfs_item_key_to_cpu(leaf, &key, slot);
4979 if (key.objectid != ino ||
4980 (key.type != BTRFS_INODE_REF_KEY &&
4981 key.type != BTRFS_INODE_EXTREF_KEY)) {
4982 ret = 0;
4983 break;
4984 }
4985
4986 ret = btrfs_check_ref_name_override(leaf, slot, &key,
4987 BTRFS_I(inode), &other_ino,
4988 &other_parent);
4989 if (ret < 0)
4990 break;
4991 if (ret > 0) {
4992 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
4993 if (!ino_elem) {
4994 ret = -ENOMEM;
4995 break;
4996 }
4997 ino_elem->ino = other_ino;
4998 ino_elem->parent = other_parent;
4999 list_add_tail(&ino_elem->list, &inode_list);
5000 ret = 0;
5001 }
5002 path->slots[0]++;
5003 }
5004 iput(inode);
5005 }
5006
5007 return ret;
5008 }
5009
5010 /* log a single inode in the tree log.
5011 * At least one parent directory for this inode must exist in the tree
5012 * or be logged already.
5013 *
5014 * Any items from this inode changed by the current transaction are copied
5015 * to the log tree. An extra reference is taken on any extents in this
5016 * file, allowing us to avoid a whole pile of corner cases around logging
5017 * blocks that have been removed from the tree.
5018 *
5019 * See LOG_INODE_ALL and related defines for a description of what inode_only
5020 * does.
5021 *
5022 * This handles both files and directories.
5023 */
5024 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5025 struct btrfs_root *root, struct btrfs_inode *inode,
5026 int inode_only,
5027 const loff_t start,
5028 const loff_t end,
5029 struct btrfs_log_ctx *ctx)
5030 {
5031 struct btrfs_fs_info *fs_info = root->fs_info;
5032 struct btrfs_path *path;
5033 struct btrfs_path *dst_path;
5034 struct btrfs_key min_key;
5035 struct btrfs_key max_key;
5036 struct btrfs_root *log = root->log_root;
5037 u64 last_extent = 0;
5038 int err = 0;
5039 int ret;
5040 int nritems;
5041 int ins_start_slot = 0;
5042 int ins_nr;
5043 bool fast_search = false;
5044 u64 ino = btrfs_ino(inode);
5045 struct extent_map_tree *em_tree = &inode->extent_tree;
5046 u64 logged_isize = 0;
5047 bool need_log_inode_item = true;
5048 bool xattrs_logged = false;
5049 bool recursive_logging = false;
5050
5051 path = btrfs_alloc_path();
5052 if (!path)
5053 return -ENOMEM;
5054 dst_path = btrfs_alloc_path();
5055 if (!dst_path) {
5056 btrfs_free_path(path);
5057 return -ENOMEM;
5058 }
5059
5060 min_key.objectid = ino;
5061 min_key.type = BTRFS_INODE_ITEM_KEY;
5062 min_key.offset = 0;
5063
5064 max_key.objectid = ino;
5065
5066
5067 /* today the code can only do partial logging of directories */
5068 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5069 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5070 &inode->runtime_flags) &&
5071 inode_only >= LOG_INODE_EXISTS))
5072 max_key.type = BTRFS_XATTR_ITEM_KEY;
5073 else
5074 max_key.type = (u8)-1;
5075 max_key.offset = (u64)-1;
5076
5077 /*
5078 * Only run delayed items if we are a dir or a new file.
5079 * Otherwise commit the delayed inode only, which is needed in
5080 * order for the log replay code to mark inodes for link count
5081 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
5082 */
5083 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5084 inode->generation > fs_info->last_trans_committed)
5085 ret = btrfs_commit_inode_delayed_items(trans, inode);
5086 else
5087 ret = btrfs_commit_inode_delayed_inode(inode);
5088
5089 if (ret) {
5090 btrfs_free_path(path);
5091 btrfs_free_path(dst_path);
5092 return ret;
5093 }
5094
5095 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
5096 recursive_logging = true;
5097 if (inode_only == LOG_OTHER_INODE)
5098 inode_only = LOG_INODE_EXISTS;
5099 else
5100 inode_only = LOG_INODE_ALL;
5101 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5102 } else {
5103 mutex_lock(&inode->log_mutex);
5104 }
5105
5106 /*
5107 * a brute force approach to making sure we get the most uptodate
5108 * copies of everything.
5109 */
5110 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5111 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5112
5113 if (inode_only == LOG_INODE_EXISTS)
5114 max_key_type = BTRFS_XATTR_ITEM_KEY;
5115 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
5116 } else {
5117 if (inode_only == LOG_INODE_EXISTS) {
5118 /*
5119 * Make sure the new inode item we write to the log has
5120 * the same isize as the current one (if it exists).
5121 * This is necessary to prevent data loss after log
5122 * replay, and also to prevent doing a wrong expanding
5123 * truncate - for e.g. create file, write 4K into offset
5124 * 0, fsync, write 4K into offset 4096, add hard link,
5125 * fsync some other file (to sync log), power fail - if
5126 * we use the inode's current i_size, after log replay
5127 * we get a 8Kb file, with the last 4Kb extent as a hole
5128 * (zeroes), as if an expanding truncate happened,
5129 * instead of getting a file of 4Kb only.
5130 */
5131 err = logged_inode_size(log, inode, path, &logged_isize);
5132 if (err)
5133 goto out_unlock;
5134 }
5135 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5136 &inode->runtime_flags)) {
5137 if (inode_only == LOG_INODE_EXISTS) {
5138 max_key.type = BTRFS_XATTR_ITEM_KEY;
5139 ret = drop_objectid_items(trans, log, path, ino,
5140 max_key.type);
5141 } else {
5142 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5143 &inode->runtime_flags);
5144 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5145 &inode->runtime_flags);
5146 while(1) {
5147 ret = btrfs_truncate_inode_items(trans,
5148 log, &inode->vfs_inode, 0, 0);
5149 if (ret != -EAGAIN)
5150 break;
5151 }
5152 }
5153 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5154 &inode->runtime_flags) ||
5155 inode_only == LOG_INODE_EXISTS) {
5156 if (inode_only == LOG_INODE_ALL)
5157 fast_search = true;
5158 max_key.type = BTRFS_XATTR_ITEM_KEY;
5159 ret = drop_objectid_items(trans, log, path, ino,
5160 max_key.type);
5161 } else {
5162 if (inode_only == LOG_INODE_ALL)
5163 fast_search = true;
5164 goto log_extents;
5165 }
5166
5167 }
5168 if (ret) {
5169 err = ret;
5170 goto out_unlock;
5171 }
5172
5173 while (1) {
5174 ins_nr = 0;
5175 ret = btrfs_search_forward(root, &min_key,
5176 path, trans->transid);
5177 if (ret < 0) {
5178 err = ret;
5179 goto out_unlock;
5180 }
5181 if (ret != 0)
5182 break;
5183 again:
5184 /* note, ins_nr might be > 0 here, cleanup outside the loop */
5185 if (min_key.objectid != ino)
5186 break;
5187 if (min_key.type > max_key.type)
5188 break;
5189
5190 if (min_key.type == BTRFS_INODE_ITEM_KEY)
5191 need_log_inode_item = false;
5192
5193 if ((min_key.type == BTRFS_INODE_REF_KEY ||
5194 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
5195 inode->generation == trans->transid &&
5196 !recursive_logging) {
5197 u64 other_ino = 0;
5198 u64 other_parent = 0;
5199
5200 ret = btrfs_check_ref_name_override(path->nodes[0],
5201 path->slots[0], &min_key, inode,
5202 &other_ino, &other_parent);
5203 if (ret < 0) {
5204 err = ret;
5205 goto out_unlock;
5206 } else if (ret > 0 && ctx &&
5207 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5208 if (ins_nr > 0) {
5209 ins_nr++;
5210 } else {
5211 ins_nr = 1;
5212 ins_start_slot = path->slots[0];
5213 }
5214 ret = copy_items(trans, inode, dst_path, path,
5215 &last_extent, ins_start_slot,
5216 ins_nr, inode_only,
5217 logged_isize);
5218 if (ret < 0) {
5219 err = ret;
5220 goto out_unlock;
5221 }
5222 ins_nr = 0;
5223
5224 err = log_conflicting_inodes(trans, root, path,
5225 ctx, other_ino, other_parent);
5226 if (err)
5227 goto out_unlock;
5228 btrfs_release_path(path);
5229 goto next_key;
5230 }
5231 }
5232
5233 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5234 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
5235 if (ins_nr == 0)
5236 goto next_slot;
5237 ret = copy_items(trans, inode, dst_path, path,
5238 &last_extent, ins_start_slot,
5239 ins_nr, inode_only, logged_isize);
5240 if (ret < 0) {
5241 err = ret;
5242 goto out_unlock;
5243 }
5244 ins_nr = 0;
5245 if (ret) {
5246 btrfs_release_path(path);
5247 continue;
5248 }
5249 goto next_slot;
5250 }
5251
5252 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5253 ins_nr++;
5254 goto next_slot;
5255 } else if (!ins_nr) {
5256 ins_start_slot = path->slots[0];
5257 ins_nr = 1;
5258 goto next_slot;
5259 }
5260
5261 ret = copy_items(trans, inode, dst_path, path, &last_extent,
5262 ins_start_slot, ins_nr, inode_only,
5263 logged_isize);
5264 if (ret < 0) {
5265 err = ret;
5266 goto out_unlock;
5267 }
5268 if (ret) {
5269 ins_nr = 0;
5270 btrfs_release_path(path);
5271 continue;
5272 }
5273 ins_nr = 1;
5274 ins_start_slot = path->slots[0];
5275 next_slot:
5276
5277 nritems = btrfs_header_nritems(path->nodes[0]);
5278 path->slots[0]++;
5279 if (path->slots[0] < nritems) {
5280 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
5281 path->slots[0]);
5282 goto again;
5283 }
5284 if (ins_nr) {
5285 ret = copy_items(trans, inode, dst_path, path,
5286 &last_extent, ins_start_slot,
5287 ins_nr, inode_only, logged_isize);
5288 if (ret < 0) {
5289 err = ret;
5290 goto out_unlock;
5291 }
5292 ret = 0;
5293 ins_nr = 0;
5294 }
5295 btrfs_release_path(path);
5296 next_key:
5297 if (min_key.offset < (u64)-1) {
5298 min_key.offset++;
5299 } else if (min_key.type < max_key.type) {
5300 min_key.type++;
5301 min_key.offset = 0;
5302 } else {
5303 break;
5304 }
5305 }
5306 if (ins_nr) {
5307 ret = copy_items(trans, inode, dst_path, path, &last_extent,
5308 ins_start_slot, ins_nr, inode_only,
5309 logged_isize);
5310 if (ret < 0) {
5311 err = ret;
5312 goto out_unlock;
5313 }
5314 ret = 0;
5315 ins_nr = 0;
5316 }
5317
5318 btrfs_release_path(path);
5319 btrfs_release_path(dst_path);
5320 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5321 if (err)
5322 goto out_unlock;
5323 xattrs_logged = true;
5324 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5325 btrfs_release_path(path);
5326 btrfs_release_path(dst_path);
5327 err = btrfs_log_trailing_hole(trans, root, inode, path);
5328 if (err)
5329 goto out_unlock;
5330 }
5331 log_extents:
5332 btrfs_release_path(path);
5333 btrfs_release_path(dst_path);
5334 if (need_log_inode_item) {
5335 err = log_inode_item(trans, log, dst_path, inode);
5336 if (!err && !xattrs_logged) {
5337 err = btrfs_log_all_xattrs(trans, root, inode, path,
5338 dst_path);
5339 btrfs_release_path(path);
5340 }
5341 if (err)
5342 goto out_unlock;
5343 }
5344 if (fast_search) {
5345 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5346 ctx, start, end);
5347 if (ret) {
5348 err = ret;
5349 goto out_unlock;
5350 }
5351 } else if (inode_only == LOG_INODE_ALL) {
5352 struct extent_map *em, *n;
5353
5354 write_lock(&em_tree->lock);
5355 /*
5356 * We can't just remove every em if we're called for a ranged
5357 * fsync - that is, one that doesn't cover the whole possible
5358 * file range (0 to LLONG_MAX). This is because we can have
5359 * em's that fall outside the range we're logging and therefore
5360 * their ordered operations haven't completed yet
5361 * (btrfs_finish_ordered_io() not invoked yet). This means we
5362 * didn't get their respective file extent item in the fs/subvol
5363 * tree yet, and need to let the next fast fsync (one which
5364 * consults the list of modified extent maps) find the em so
5365 * that it logs a matching file extent item and waits for the
5366 * respective ordered operation to complete (if it's still
5367 * running).
5368 *
5369 * Removing every em outside the range we're logging would make
5370 * the next fast fsync not log their matching file extent items,
5371 * therefore making us lose data after a log replay.
5372 */
5373 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
5374 list) {
5375 const u64 mod_end = em->mod_start + em->mod_len - 1;
5376
5377 if (em->mod_start >= start && mod_end <= end)
5378 list_del_init(&em->list);
5379 }
5380 write_unlock(&em_tree->lock);
5381 }
5382
5383 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5384 ret = log_directory_changes(trans, root, inode, path, dst_path,
5385 ctx);
5386 if (ret) {
5387 err = ret;
5388 goto out_unlock;
5389 }
5390 }
5391
5392 spin_lock(&inode->lock);
5393 inode->logged_trans = trans->transid;
5394 inode->last_log_commit = inode->last_sub_trans;
5395 spin_unlock(&inode->lock);
5396 out_unlock:
5397 mutex_unlock(&inode->log_mutex);
5398
5399 btrfs_free_path(path);
5400 btrfs_free_path(dst_path);
5401 return err;
5402 }
5403
5404 /*
5405 * Check if we must fallback to a transaction commit when logging an inode.
5406 * This must be called after logging the inode and is used only in the context
5407 * when fsyncing an inode requires the need to log some other inode - in which
5408 * case we can't lock the i_mutex of each other inode we need to log as that
5409 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5410 * log inodes up or down in the hierarchy) or rename operations for example. So
5411 * we take the log_mutex of the inode after we have logged it and then check for
5412 * its last_unlink_trans value - this is safe because any task setting
5413 * last_unlink_trans must take the log_mutex and it must do this before it does
5414 * the actual unlink operation, so if we do this check before a concurrent task
5415 * sets last_unlink_trans it means we've logged a consistent version/state of
5416 * all the inode items, otherwise we are not sure and must do a transaction
5417 * commit (the concurrent task might have only updated last_unlink_trans before
5418 * we logged the inode or it might have also done the unlink).
5419 */
5420 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5421 struct btrfs_inode *inode)
5422 {
5423 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5424 bool ret = false;
5425
5426 mutex_lock(&inode->log_mutex);
5427 if (inode->last_unlink_trans > fs_info->last_trans_committed) {
5428 /*
5429 * Make sure any commits to the log are forced to be full
5430 * commits.
5431 */
5432 btrfs_set_log_full_commit(fs_info, trans);
5433 ret = true;
5434 }
5435 mutex_unlock(&inode->log_mutex);
5436
5437 return ret;
5438 }
5439
5440 /*
5441 * follow the dentry parent pointers up the chain and see if any
5442 * of the directories in it require a full commit before they can
5443 * be logged. Returns zero if nothing special needs to be done or 1 if
5444 * a full commit is required.
5445 */
5446 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5447 struct btrfs_inode *inode,
5448 struct dentry *parent,
5449 struct super_block *sb,
5450 u64 last_committed)
5451 {
5452 int ret = 0;
5453 struct dentry *old_parent = NULL;
5454 struct btrfs_inode *orig_inode = inode;
5455
5456 /*
5457 * for regular files, if its inode is already on disk, we don't
5458 * have to worry about the parents at all. This is because
5459 * we can use the last_unlink_trans field to record renames
5460 * and other fun in this file.
5461 */
5462 if (S_ISREG(inode->vfs_inode.i_mode) &&
5463 inode->generation <= last_committed &&
5464 inode->last_unlink_trans <= last_committed)
5465 goto out;
5466
5467 if (!S_ISDIR(inode->vfs_inode.i_mode)) {
5468 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5469 goto out;
5470 inode = BTRFS_I(d_inode(parent));
5471 }
5472
5473 while (1) {
5474 /*
5475 * If we are logging a directory then we start with our inode,
5476 * not our parent's inode, so we need to skip setting the
5477 * logged_trans so that further down in the log code we don't
5478 * think this inode has already been logged.
5479 */
5480 if (inode != orig_inode)
5481 inode->logged_trans = trans->transid;
5482 smp_mb();
5483
5484 if (btrfs_must_commit_transaction(trans, inode)) {
5485 ret = 1;
5486 break;
5487 }
5488
5489 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5490 break;
5491
5492 if (IS_ROOT(parent)) {
5493 inode = BTRFS_I(d_inode(parent));
5494 if (btrfs_must_commit_transaction(trans, inode))
5495 ret = 1;
5496 break;
5497 }
5498
5499 parent = dget_parent(parent);
5500 dput(old_parent);
5501 old_parent = parent;
5502 inode = BTRFS_I(d_inode(parent));
5503
5504 }
5505 dput(old_parent);
5506 out:
5507 return ret;
5508 }
5509
5510 struct btrfs_dir_list {
5511 u64 ino;
5512 struct list_head list;
5513 };
5514
5515 /*
5516 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5517 * details about the why it is needed.
5518 * This is a recursive operation - if an existing dentry corresponds to a
5519 * directory, that directory's new entries are logged too (same behaviour as
5520 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5521 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5522 * complains about the following circular lock dependency / possible deadlock:
5523 *
5524 * CPU0 CPU1
5525 * ---- ----
5526 * lock(&type->i_mutex_dir_key#3/2);
5527 * lock(sb_internal#2);
5528 * lock(&type->i_mutex_dir_key#3/2);
5529 * lock(&sb->s_type->i_mutex_key#14);
5530 *
5531 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5532 * sb_start_intwrite() in btrfs_start_transaction().
5533 * Not locking i_mutex of the inodes is still safe because:
5534 *
5535 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5536 * that while logging the inode new references (names) are added or removed
5537 * from the inode, leaving the logged inode item with a link count that does
5538 * not match the number of logged inode reference items. This is fine because
5539 * at log replay time we compute the real number of links and correct the
5540 * link count in the inode item (see replay_one_buffer() and
5541 * link_to_fixup_dir());
5542 *
5543 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5544 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5545 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5546 * has a size that doesn't match the sum of the lengths of all the logged
5547 * names. This does not result in a problem because if a dir_item key is
5548 * logged but its matching dir_index key is not logged, at log replay time we
5549 * don't use it to replay the respective name (see replay_one_name()). On the
5550 * other hand if only the dir_index key ends up being logged, the respective
5551 * name is added to the fs/subvol tree with both the dir_item and dir_index
5552 * keys created (see replay_one_name()).
5553 * The directory's inode item with a wrong i_size is not a problem as well,
5554 * since we don't use it at log replay time to set the i_size in the inode
5555 * item of the fs/subvol tree (see overwrite_item()).
5556 */
5557 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5558 struct btrfs_root *root,
5559 struct btrfs_inode *start_inode,
5560 struct btrfs_log_ctx *ctx)
5561 {
5562 struct btrfs_fs_info *fs_info = root->fs_info;
5563 struct btrfs_root *log = root->log_root;
5564 struct btrfs_path *path;
5565 LIST_HEAD(dir_list);
5566 struct btrfs_dir_list *dir_elem;
5567 int ret = 0;
5568
5569 path = btrfs_alloc_path();
5570 if (!path)
5571 return -ENOMEM;
5572
5573 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5574 if (!dir_elem) {
5575 btrfs_free_path(path);
5576 return -ENOMEM;
5577 }
5578 dir_elem->ino = btrfs_ino(start_inode);
5579 list_add_tail(&dir_elem->list, &dir_list);
5580
5581 while (!list_empty(&dir_list)) {
5582 struct extent_buffer *leaf;
5583 struct btrfs_key min_key;
5584 int nritems;
5585 int i;
5586
5587 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5588 list);
5589 if (ret)
5590 goto next_dir_inode;
5591
5592 min_key.objectid = dir_elem->ino;
5593 min_key.type = BTRFS_DIR_ITEM_KEY;
5594 min_key.offset = 0;
5595 again:
5596 btrfs_release_path(path);
5597 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5598 if (ret < 0) {
5599 goto next_dir_inode;
5600 } else if (ret > 0) {
5601 ret = 0;
5602 goto next_dir_inode;
5603 }
5604
5605 process_leaf:
5606 leaf = path->nodes[0];
5607 nritems = btrfs_header_nritems(leaf);
5608 for (i = path->slots[0]; i < nritems; i++) {
5609 struct btrfs_dir_item *di;
5610 struct btrfs_key di_key;
5611 struct inode *di_inode;
5612 struct btrfs_dir_list *new_dir_elem;
5613 int log_mode = LOG_INODE_EXISTS;
5614 int type;
5615
5616 btrfs_item_key_to_cpu(leaf, &min_key, i);
5617 if (min_key.objectid != dir_elem->ino ||
5618 min_key.type != BTRFS_DIR_ITEM_KEY)
5619 goto next_dir_inode;
5620
5621 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5622 type = btrfs_dir_type(leaf, di);
5623 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5624 type != BTRFS_FT_DIR)
5625 continue;
5626 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5627 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5628 continue;
5629
5630 btrfs_release_path(path);
5631 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
5632 if (IS_ERR(di_inode)) {
5633 ret = PTR_ERR(di_inode);
5634 goto next_dir_inode;
5635 }
5636
5637 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5638 iput(di_inode);
5639 break;
5640 }
5641
5642 ctx->log_new_dentries = false;
5643 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5644 log_mode = LOG_INODE_ALL;
5645 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5646 log_mode, 0, LLONG_MAX, ctx);
5647 if (!ret &&
5648 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5649 ret = 1;
5650 iput(di_inode);
5651 if (ret)
5652 goto next_dir_inode;
5653 if (ctx->log_new_dentries) {
5654 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5655 GFP_NOFS);
5656 if (!new_dir_elem) {
5657 ret = -ENOMEM;
5658 goto next_dir_inode;
5659 }
5660 new_dir_elem->ino = di_key.objectid;
5661 list_add_tail(&new_dir_elem->list, &dir_list);
5662 }
5663 break;
5664 }
5665 if (i == nritems) {
5666 ret = btrfs_next_leaf(log, path);
5667 if (ret < 0) {
5668 goto next_dir_inode;
5669 } else if (ret > 0) {
5670 ret = 0;
5671 goto next_dir_inode;
5672 }
5673 goto process_leaf;
5674 }
5675 if (min_key.offset < (u64)-1) {
5676 min_key.offset++;
5677 goto again;
5678 }
5679 next_dir_inode:
5680 list_del(&dir_elem->list);
5681 kfree(dir_elem);
5682 }
5683
5684 btrfs_free_path(path);
5685 return ret;
5686 }
5687
5688 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5689 struct btrfs_inode *inode,
5690 struct btrfs_log_ctx *ctx)
5691 {
5692 struct btrfs_fs_info *fs_info = trans->fs_info;
5693 int ret;
5694 struct btrfs_path *path;
5695 struct btrfs_key key;
5696 struct btrfs_root *root = inode->root;
5697 const u64 ino = btrfs_ino(inode);
5698
5699 path = btrfs_alloc_path();
5700 if (!path)
5701 return -ENOMEM;
5702 path->skip_locking = 1;
5703 path->search_commit_root = 1;
5704
5705 key.objectid = ino;
5706 key.type = BTRFS_INODE_REF_KEY;
5707 key.offset = 0;
5708 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5709 if (ret < 0)
5710 goto out;
5711
5712 while (true) {
5713 struct extent_buffer *leaf = path->nodes[0];
5714 int slot = path->slots[0];
5715 u32 cur_offset = 0;
5716 u32 item_size;
5717 unsigned long ptr;
5718
5719 if (slot >= btrfs_header_nritems(leaf)) {
5720 ret = btrfs_next_leaf(root, path);
5721 if (ret < 0)
5722 goto out;
5723 else if (ret > 0)
5724 break;
5725 continue;
5726 }
5727
5728 btrfs_item_key_to_cpu(leaf, &key, slot);
5729 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5730 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5731 break;
5732
5733 item_size = btrfs_item_size_nr(leaf, slot);
5734 ptr = btrfs_item_ptr_offset(leaf, slot);
5735 while (cur_offset < item_size) {
5736 struct btrfs_key inode_key;
5737 struct inode *dir_inode;
5738
5739 inode_key.type = BTRFS_INODE_ITEM_KEY;
5740 inode_key.offset = 0;
5741
5742 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5743 struct btrfs_inode_extref *extref;
5744
5745 extref = (struct btrfs_inode_extref *)
5746 (ptr + cur_offset);
5747 inode_key.objectid = btrfs_inode_extref_parent(
5748 leaf, extref);
5749 cur_offset += sizeof(*extref);
5750 cur_offset += btrfs_inode_extref_name_len(leaf,
5751 extref);
5752 } else {
5753 inode_key.objectid = key.offset;
5754 cur_offset = item_size;
5755 }
5756
5757 dir_inode = btrfs_iget(fs_info->sb, &inode_key,
5758 root, NULL);
5759 /*
5760 * If the parent inode was deleted, return an error to
5761 * fallback to a transaction commit. This is to prevent
5762 * getting an inode that was moved from one parent A to
5763 * a parent B, got its former parent A deleted and then
5764 * it got fsync'ed, from existing at both parents after
5765 * a log replay (and the old parent still existing).
5766 * Example:
5767 *
5768 * mkdir /mnt/A
5769 * mkdir /mnt/B
5770 * touch /mnt/B/bar
5771 * sync
5772 * mv /mnt/B/bar /mnt/A/bar
5773 * mv -T /mnt/A /mnt/B
5774 * fsync /mnt/B/bar
5775 * <power fail>
5776 *
5777 * If we ignore the old parent B which got deleted,
5778 * after a log replay we would have file bar linked
5779 * at both parents and the old parent B would still
5780 * exist.
5781 */
5782 if (IS_ERR(dir_inode)) {
5783 ret = PTR_ERR(dir_inode);
5784 goto out;
5785 }
5786
5787 if (ctx)
5788 ctx->log_new_dentries = false;
5789 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5790 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5791 if (!ret &&
5792 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
5793 ret = 1;
5794 if (!ret && ctx && ctx->log_new_dentries)
5795 ret = log_new_dir_dentries(trans, root,
5796 BTRFS_I(dir_inode), ctx);
5797 iput(dir_inode);
5798 if (ret)
5799 goto out;
5800 }
5801 path->slots[0]++;
5802 }
5803 ret = 0;
5804 out:
5805 btrfs_free_path(path);
5806 return ret;
5807 }
5808
5809 /*
5810 * helper function around btrfs_log_inode to make sure newly created
5811 * parent directories also end up in the log. A minimal inode and backref
5812 * only logging is done of any parent directories that are older than
5813 * the last committed transaction
5814 */
5815 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5816 struct btrfs_inode *inode,
5817 struct dentry *parent,
5818 const loff_t start,
5819 const loff_t end,
5820 int inode_only,
5821 struct btrfs_log_ctx *ctx)
5822 {
5823 struct btrfs_root *root = inode->root;
5824 struct btrfs_fs_info *fs_info = root->fs_info;
5825 struct super_block *sb;
5826 struct dentry *old_parent = NULL;
5827 int ret = 0;
5828 u64 last_committed = fs_info->last_trans_committed;
5829 bool log_dentries = false;
5830 struct btrfs_inode *orig_inode = inode;
5831
5832 sb = inode->vfs_inode.i_sb;
5833
5834 if (btrfs_test_opt(fs_info, NOTREELOG)) {
5835 ret = 1;
5836 goto end_no_trans;
5837 }
5838
5839 /*
5840 * The prev transaction commit doesn't complete, we need do
5841 * full commit by ourselves.
5842 */
5843 if (fs_info->last_trans_log_full_commit >
5844 fs_info->last_trans_committed) {
5845 ret = 1;
5846 goto end_no_trans;
5847 }
5848
5849 if (btrfs_root_refs(&root->root_item) == 0) {
5850 ret = 1;
5851 goto end_no_trans;
5852 }
5853
5854 ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
5855 last_committed);
5856 if (ret)
5857 goto end_no_trans;
5858
5859 /*
5860 * Skip already logged inodes or inodes corresponding to tmpfiles
5861 * (since logging them is pointless, a link count of 0 means they
5862 * will never be accessible).
5863 */
5864 if (btrfs_inode_in_log(inode, trans->transid) ||
5865 inode->vfs_inode.i_nlink == 0) {
5866 ret = BTRFS_NO_LOG_SYNC;
5867 goto end_no_trans;
5868 }
5869
5870 ret = start_log_trans(trans, root, ctx);
5871 if (ret)
5872 goto end_no_trans;
5873
5874 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
5875 if (ret)
5876 goto end_trans;
5877
5878 /*
5879 * for regular files, if its inode is already on disk, we don't
5880 * have to worry about the parents at all. This is because
5881 * we can use the last_unlink_trans field to record renames
5882 * and other fun in this file.
5883 */
5884 if (S_ISREG(inode->vfs_inode.i_mode) &&
5885 inode->generation <= last_committed &&
5886 inode->last_unlink_trans <= last_committed) {
5887 ret = 0;
5888 goto end_trans;
5889 }
5890
5891 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
5892 log_dentries = true;
5893
5894 /*
5895 * On unlink we must make sure all our current and old parent directory
5896 * inodes are fully logged. This is to prevent leaving dangling
5897 * directory index entries in directories that were our parents but are
5898 * not anymore. Not doing this results in old parent directory being
5899 * impossible to delete after log replay (rmdir will always fail with
5900 * error -ENOTEMPTY).
5901 *
5902 * Example 1:
5903 *
5904 * mkdir testdir
5905 * touch testdir/foo
5906 * ln testdir/foo testdir/bar
5907 * sync
5908 * unlink testdir/bar
5909 * xfs_io -c fsync testdir/foo
5910 * <power failure>
5911 * mount fs, triggers log replay
5912 *
5913 * If we don't log the parent directory (testdir), after log replay the
5914 * directory still has an entry pointing to the file inode using the bar
5915 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5916 * the file inode has a link count of 1.
5917 *
5918 * Example 2:
5919 *
5920 * mkdir testdir
5921 * touch foo
5922 * ln foo testdir/foo2
5923 * ln foo testdir/foo3
5924 * sync
5925 * unlink testdir/foo3
5926 * xfs_io -c fsync foo
5927 * <power failure>
5928 * mount fs, triggers log replay
5929 *
5930 * Similar as the first example, after log replay the parent directory
5931 * testdir still has an entry pointing to the inode file with name foo3
5932 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5933 * and has a link count of 2.
5934 */
5935 if (inode->last_unlink_trans > last_committed) {
5936 ret = btrfs_log_all_parents(trans, orig_inode, ctx);
5937 if (ret)
5938 goto end_trans;
5939 }
5940
5941 /*
5942 * If a new hard link was added to the inode in the current transaction
5943 * and its link count is now greater than 1, we need to fallback to a
5944 * transaction commit, otherwise we can end up not logging all its new
5945 * parents for all the hard links. Here just from the dentry used to
5946 * fsync, we can not visit the ancestor inodes for all the other hard
5947 * links to figure out if any is new, so we fallback to a transaction
5948 * commit (instead of adding a lot of complexity of scanning a btree,
5949 * since this scenario is not a common use case).
5950 */
5951 if (inode->vfs_inode.i_nlink > 1 &&
5952 inode->last_link_trans > last_committed) {
5953 ret = -EMLINK;
5954 goto end_trans;
5955 }
5956
5957 while (1) {
5958 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5959 break;
5960
5961 inode = BTRFS_I(d_inode(parent));
5962 if (root != inode->root)
5963 break;
5964
5965 if (inode->generation > last_committed) {
5966 ret = btrfs_log_inode(trans, root, inode,
5967 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx);
5968 if (ret)
5969 goto end_trans;
5970 }
5971 if (IS_ROOT(parent))
5972 break;
5973
5974 parent = dget_parent(parent);
5975 dput(old_parent);
5976 old_parent = parent;
5977 }
5978 if (log_dentries)
5979 ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
5980 else
5981 ret = 0;
5982 end_trans:
5983 dput(old_parent);
5984 if (ret < 0) {
5985 btrfs_set_log_full_commit(fs_info, trans);
5986 ret = 1;
5987 }
5988
5989 if (ret)
5990 btrfs_remove_log_ctx(root, ctx);
5991 btrfs_end_log_trans(root);
5992 end_no_trans:
5993 return ret;
5994 }
5995
5996 /*
5997 * it is not safe to log dentry if the chunk root has added new
5998 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5999 * If this returns 1, you must commit the transaction to safely get your
6000 * data on disk.
6001 */
6002 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
6003 struct dentry *dentry,
6004 const loff_t start,
6005 const loff_t end,
6006 struct btrfs_log_ctx *ctx)
6007 {
6008 struct dentry *parent = dget_parent(dentry);
6009 int ret;
6010
6011 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
6012 start, end, LOG_INODE_ALL, ctx);
6013 dput(parent);
6014
6015 return ret;
6016 }
6017
6018 /*
6019 * should be called during mount to recover any replay any log trees
6020 * from the FS
6021 */
6022 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
6023 {
6024 int ret;
6025 struct btrfs_path *path;
6026 struct btrfs_trans_handle *trans;
6027 struct btrfs_key key;
6028 struct btrfs_key found_key;
6029 struct btrfs_key tmp_key;
6030 struct btrfs_root *log;
6031 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6032 struct walk_control wc = {
6033 .process_func = process_one_buffer,
6034 .stage = 0,
6035 };
6036
6037 path = btrfs_alloc_path();
6038 if (!path)
6039 return -ENOMEM;
6040
6041 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6042
6043 trans = btrfs_start_transaction(fs_info->tree_root, 0);
6044 if (IS_ERR(trans)) {
6045 ret = PTR_ERR(trans);
6046 goto error;
6047 }
6048
6049 wc.trans = trans;
6050 wc.pin = 1;
6051
6052 ret = walk_log_tree(trans, log_root_tree, &wc);
6053 if (ret) {
6054 btrfs_handle_fs_error(fs_info, ret,
6055 "Failed to pin buffers while recovering log root tree.");
6056 goto error;
6057 }
6058
6059 again:
6060 key.objectid = BTRFS_TREE_LOG_OBJECTID;
6061 key.offset = (u64)-1;
6062 key.type = BTRFS_ROOT_ITEM_KEY;
6063
6064 while (1) {
6065 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6066
6067 if (ret < 0) {
6068 btrfs_handle_fs_error(fs_info, ret,
6069 "Couldn't find tree log root.");
6070 goto error;
6071 }
6072 if (ret > 0) {
6073 if (path->slots[0] == 0)
6074 break;
6075 path->slots[0]--;
6076 }
6077 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6078 path->slots[0]);
6079 btrfs_release_path(path);
6080 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6081 break;
6082
6083 log = btrfs_read_fs_root(log_root_tree, &found_key);
6084 if (IS_ERR(log)) {
6085 ret = PTR_ERR(log);
6086 btrfs_handle_fs_error(fs_info, ret,
6087 "Couldn't read tree log root.");
6088 goto error;
6089 }
6090
6091 tmp_key.objectid = found_key.offset;
6092 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
6093 tmp_key.offset = (u64)-1;
6094
6095 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
6096 if (IS_ERR(wc.replay_dest)) {
6097 ret = PTR_ERR(wc.replay_dest);
6098 free_extent_buffer(log->node);
6099 free_extent_buffer(log->commit_root);
6100 kfree(log);
6101 btrfs_handle_fs_error(fs_info, ret,
6102 "Couldn't read target root for tree log recovery.");
6103 goto error;
6104 }
6105
6106 wc.replay_dest->log_root = log;
6107 btrfs_record_root_in_trans(trans, wc.replay_dest);
6108 ret = walk_log_tree(trans, log, &wc);
6109
6110 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6111 ret = fixup_inode_link_counts(trans, wc.replay_dest,
6112 path);
6113 }
6114
6115 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6116 struct btrfs_root *root = wc.replay_dest;
6117
6118 btrfs_release_path(path);
6119
6120 /*
6121 * We have just replayed everything, and the highest
6122 * objectid of fs roots probably has changed in case
6123 * some inode_item's got replayed.
6124 *
6125 * root->objectid_mutex is not acquired as log replay
6126 * could only happen during mount.
6127 */
6128 ret = btrfs_find_highest_objectid(root,
6129 &root->highest_objectid);
6130 }
6131
6132 key.offset = found_key.offset - 1;
6133 wc.replay_dest->log_root = NULL;
6134 free_extent_buffer(log->node);
6135 free_extent_buffer(log->commit_root);
6136 kfree(log);
6137
6138 if (ret)
6139 goto error;
6140
6141 if (found_key.offset == 0)
6142 break;
6143 }
6144 btrfs_release_path(path);
6145
6146 /* step one is to pin it all, step two is to replay just inodes */
6147 if (wc.pin) {
6148 wc.pin = 0;
6149 wc.process_func = replay_one_buffer;
6150 wc.stage = LOG_WALK_REPLAY_INODES;
6151 goto again;
6152 }
6153 /* step three is to replay everything */
6154 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6155 wc.stage++;
6156 goto again;
6157 }
6158
6159 btrfs_free_path(path);
6160
6161 /* step 4: commit the transaction, which also unpins the blocks */
6162 ret = btrfs_commit_transaction(trans);
6163 if (ret)
6164 return ret;
6165
6166 free_extent_buffer(log_root_tree->node);
6167 log_root_tree->log_root = NULL;
6168 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6169 kfree(log_root_tree);
6170
6171 return 0;
6172 error:
6173 if (wc.trans)
6174 btrfs_end_transaction(wc.trans);
6175 btrfs_free_path(path);
6176 return ret;
6177 }
6178
6179 /*
6180 * there are some corner cases where we want to force a full
6181 * commit instead of allowing a directory to be logged.
6182 *
6183 * They revolve around files there were unlinked from the directory, and
6184 * this function updates the parent directory so that a full commit is
6185 * properly done if it is fsync'd later after the unlinks are done.
6186 *
6187 * Must be called before the unlink operations (updates to the subvolume tree,
6188 * inodes, etc) are done.
6189 */
6190 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6191 struct btrfs_inode *dir, struct btrfs_inode *inode,
6192 int for_rename)
6193 {
6194 /*
6195 * when we're logging a file, if it hasn't been renamed
6196 * or unlinked, and its inode is fully committed on disk,
6197 * we don't have to worry about walking up the directory chain
6198 * to log its parents.
6199 *
6200 * So, we use the last_unlink_trans field to put this transid
6201 * into the file. When the file is logged we check it and
6202 * don't log the parents if the file is fully on disk.
6203 */
6204 mutex_lock(&inode->log_mutex);
6205 inode->last_unlink_trans = trans->transid;
6206 mutex_unlock(&inode->log_mutex);
6207
6208 /*
6209 * if this directory was already logged any new
6210 * names for this file/dir will get recorded
6211 */
6212 smp_mb();
6213 if (dir->logged_trans == trans->transid)
6214 return;
6215
6216 /*
6217 * if the inode we're about to unlink was logged,
6218 * the log will be properly updated for any new names
6219 */
6220 if (inode->logged_trans == trans->transid)
6221 return;
6222
6223 /*
6224 * when renaming files across directories, if the directory
6225 * there we're unlinking from gets fsync'd later on, there's
6226 * no way to find the destination directory later and fsync it
6227 * properly. So, we have to be conservative and force commits
6228 * so the new name gets discovered.
6229 */
6230 if (for_rename)
6231 goto record;
6232
6233 /* we can safely do the unlink without any special recording */
6234 return;
6235
6236 record:
6237 mutex_lock(&dir->log_mutex);
6238 dir->last_unlink_trans = trans->transid;
6239 mutex_unlock(&dir->log_mutex);
6240 }
6241
6242 /*
6243 * Make sure that if someone attempts to fsync the parent directory of a deleted
6244 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6245 * that after replaying the log tree of the parent directory's root we will not
6246 * see the snapshot anymore and at log replay time we will not see any log tree
6247 * corresponding to the deleted snapshot's root, which could lead to replaying
6248 * it after replaying the log tree of the parent directory (which would replay
6249 * the snapshot delete operation).
6250 *
6251 * Must be called before the actual snapshot destroy operation (updates to the
6252 * parent root and tree of tree roots trees, etc) are done.
6253 */
6254 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6255 struct btrfs_inode *dir)
6256 {
6257 mutex_lock(&dir->log_mutex);
6258 dir->last_unlink_trans = trans->transid;
6259 mutex_unlock(&dir->log_mutex);
6260 }
6261
6262 /*
6263 * Call this after adding a new name for a file and it will properly
6264 * update the log to reflect the new name.
6265 *
6266 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
6267 * true (because it's not used).
6268 *
6269 * Return value depends on whether @sync_log is true or false.
6270 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6271 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
6272 * otherwise.
6273 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
6274 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
6275 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6276 * committed (without attempting to sync the log).
6277 */
6278 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
6279 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
6280 struct dentry *parent,
6281 bool sync_log, struct btrfs_log_ctx *ctx)
6282 {
6283 struct btrfs_fs_info *fs_info = trans->fs_info;
6284 int ret;
6285
6286 /*
6287 * this will force the logging code to walk the dentry chain
6288 * up for the file
6289 */
6290 if (!S_ISDIR(inode->vfs_inode.i_mode))
6291 inode->last_unlink_trans = trans->transid;
6292
6293 /*
6294 * if this inode hasn't been logged and directory we're renaming it
6295 * from hasn't been logged, we don't need to log it
6296 */
6297 if (inode->logged_trans <= fs_info->last_trans_committed &&
6298 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
6299 return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
6300 BTRFS_DONT_NEED_LOG_SYNC;
6301
6302 if (sync_log) {
6303 struct btrfs_log_ctx ctx2;
6304
6305 btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
6306 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6307 LOG_INODE_EXISTS, &ctx2);
6308 if (ret == BTRFS_NO_LOG_SYNC)
6309 return BTRFS_DONT_NEED_TRANS_COMMIT;
6310 else if (ret)
6311 return BTRFS_NEED_TRANS_COMMIT;
6312
6313 ret = btrfs_sync_log(trans, inode->root, &ctx2);
6314 if (ret)
6315 return BTRFS_NEED_TRANS_COMMIT;
6316 return BTRFS_DONT_NEED_TRANS_COMMIT;
6317 }
6318
6319 ASSERT(ctx);
6320 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6321 LOG_INODE_EXISTS, ctx);
6322 if (ret == BTRFS_NO_LOG_SYNC)
6323 return BTRFS_DONT_NEED_LOG_SYNC;
6324 else if (ret)
6325 return BTRFS_NEED_TRANS_COMMIT;
6326
6327 return BTRFS_NEED_LOG_SYNC;
6328 }
6329