2 * btree.c - NILFS B-tree.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * Written by Koji Sato.
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/pagevec.h>
30 static void __nilfs_btree_init(struct nilfs_bmap
*bmap
);
32 static struct nilfs_btree_path
*nilfs_btree_alloc_path(void)
34 struct nilfs_btree_path
*path
;
35 int level
= NILFS_BTREE_LEVEL_DATA
;
37 path
= kmem_cache_alloc(nilfs_btree_path_cache
, GFP_NOFS
);
41 for (; level
< NILFS_BTREE_LEVEL_MAX
; level
++) {
42 path
[level
].bp_bh
= NULL
;
43 path
[level
].bp_sib_bh
= NULL
;
44 path
[level
].bp_index
= 0;
45 path
[level
].bp_oldreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
46 path
[level
].bp_newreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
47 path
[level
].bp_op
= NULL
;
54 static void nilfs_btree_free_path(struct nilfs_btree_path
*path
)
56 int level
= NILFS_BTREE_LEVEL_DATA
;
58 for (; level
< NILFS_BTREE_LEVEL_MAX
; level
++)
59 brelse(path
[level
].bp_bh
);
61 kmem_cache_free(nilfs_btree_path_cache
, path
);
65 * B-tree node operations
67 static int nilfs_btree_get_new_block(const struct nilfs_bmap
*btree
,
68 __u64 ptr
, struct buffer_head
**bhp
)
70 struct address_space
*btnc
= &NILFS_BMAP_I(btree
)->i_btnode_cache
;
71 struct buffer_head
*bh
;
73 bh
= nilfs_btnode_create_block(btnc
, ptr
);
77 set_buffer_nilfs_volatile(bh
);
82 static int nilfs_btree_node_get_flags(const struct nilfs_btree_node
*node
)
84 return node
->bn_flags
;
88 nilfs_btree_node_set_flags(struct nilfs_btree_node
*node
, int flags
)
90 node
->bn_flags
= flags
;
93 static int nilfs_btree_node_root(const struct nilfs_btree_node
*node
)
95 return nilfs_btree_node_get_flags(node
) & NILFS_BTREE_NODE_ROOT
;
98 static int nilfs_btree_node_get_level(const struct nilfs_btree_node
*node
)
100 return node
->bn_level
;
104 nilfs_btree_node_set_level(struct nilfs_btree_node
*node
, int level
)
106 node
->bn_level
= level
;
109 static int nilfs_btree_node_get_nchildren(const struct nilfs_btree_node
*node
)
111 return le16_to_cpu(node
->bn_nchildren
);
115 nilfs_btree_node_set_nchildren(struct nilfs_btree_node
*node
, int nchildren
)
117 node
->bn_nchildren
= cpu_to_le16(nchildren
);
120 static int nilfs_btree_node_size(const struct nilfs_bmap
*btree
)
122 return 1 << btree
->b_inode
->i_blkbits
;
125 static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap
*btree
)
127 return btree
->b_nchildren_per_block
;
131 nilfs_btree_node_dkeys(const struct nilfs_btree_node
*node
)
133 return (__le64
*)((char *)(node
+ 1) +
134 (nilfs_btree_node_root(node
) ?
135 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE
));
139 nilfs_btree_node_dptrs(const struct nilfs_btree_node
*node
, int ncmax
)
141 return (__le64
*)(nilfs_btree_node_dkeys(node
) + ncmax
);
145 nilfs_btree_node_get_key(const struct nilfs_btree_node
*node
, int index
)
147 return le64_to_cpu(*(nilfs_btree_node_dkeys(node
) + index
));
151 nilfs_btree_node_set_key(struct nilfs_btree_node
*node
, int index
, __u64 key
)
153 *(nilfs_btree_node_dkeys(node
) + index
) = cpu_to_le64(key
);
157 nilfs_btree_node_get_ptr(const struct nilfs_btree_node
*node
, int index
,
160 return le64_to_cpu(*(nilfs_btree_node_dptrs(node
, ncmax
) + index
));
164 nilfs_btree_node_set_ptr(struct nilfs_btree_node
*node
, int index
, __u64 ptr
,
167 *(nilfs_btree_node_dptrs(node
, ncmax
) + index
) = cpu_to_le64(ptr
);
170 static void nilfs_btree_node_init(struct nilfs_btree_node
*node
, int flags
,
171 int level
, int nchildren
, int ncmax
,
172 const __u64
*keys
, const __u64
*ptrs
)
178 nilfs_btree_node_set_flags(node
, flags
);
179 nilfs_btree_node_set_level(node
, level
);
180 nilfs_btree_node_set_nchildren(node
, nchildren
);
182 dkeys
= nilfs_btree_node_dkeys(node
);
183 dptrs
= nilfs_btree_node_dptrs(node
, ncmax
);
184 for (i
= 0; i
< nchildren
; i
++) {
185 dkeys
[i
] = cpu_to_le64(keys
[i
]);
186 dptrs
[i
] = cpu_to_le64(ptrs
[i
]);
190 /* Assume the buffer heads corresponding to left and right are locked. */
191 static void nilfs_btree_node_move_left(struct nilfs_btree_node
*left
,
192 struct nilfs_btree_node
*right
,
193 int n
, int lncmax
, int rncmax
)
195 __le64
*ldkeys
, *rdkeys
;
196 __le64
*ldptrs
, *rdptrs
;
197 int lnchildren
, rnchildren
;
199 ldkeys
= nilfs_btree_node_dkeys(left
);
200 ldptrs
= nilfs_btree_node_dptrs(left
, lncmax
);
201 lnchildren
= nilfs_btree_node_get_nchildren(left
);
203 rdkeys
= nilfs_btree_node_dkeys(right
);
204 rdptrs
= nilfs_btree_node_dptrs(right
, rncmax
);
205 rnchildren
= nilfs_btree_node_get_nchildren(right
);
207 memcpy(ldkeys
+ lnchildren
, rdkeys
, n
* sizeof(*rdkeys
));
208 memcpy(ldptrs
+ lnchildren
, rdptrs
, n
* sizeof(*rdptrs
));
209 memmove(rdkeys
, rdkeys
+ n
, (rnchildren
- n
) * sizeof(*rdkeys
));
210 memmove(rdptrs
, rdptrs
+ n
, (rnchildren
- n
) * sizeof(*rdptrs
));
214 nilfs_btree_node_set_nchildren(left
, lnchildren
);
215 nilfs_btree_node_set_nchildren(right
, rnchildren
);
218 /* Assume that the buffer heads corresponding to left and right are locked. */
219 static void nilfs_btree_node_move_right(struct nilfs_btree_node
*left
,
220 struct nilfs_btree_node
*right
,
221 int n
, int lncmax
, int rncmax
)
223 __le64
*ldkeys
, *rdkeys
;
224 __le64
*ldptrs
, *rdptrs
;
225 int lnchildren
, rnchildren
;
227 ldkeys
= nilfs_btree_node_dkeys(left
);
228 ldptrs
= nilfs_btree_node_dptrs(left
, lncmax
);
229 lnchildren
= nilfs_btree_node_get_nchildren(left
);
231 rdkeys
= nilfs_btree_node_dkeys(right
);
232 rdptrs
= nilfs_btree_node_dptrs(right
, rncmax
);
233 rnchildren
= nilfs_btree_node_get_nchildren(right
);
235 memmove(rdkeys
+ n
, rdkeys
, rnchildren
* sizeof(*rdkeys
));
236 memmove(rdptrs
+ n
, rdptrs
, rnchildren
* sizeof(*rdptrs
));
237 memcpy(rdkeys
, ldkeys
+ lnchildren
- n
, n
* sizeof(*rdkeys
));
238 memcpy(rdptrs
, ldptrs
+ lnchildren
- n
, n
* sizeof(*rdptrs
));
242 nilfs_btree_node_set_nchildren(left
, lnchildren
);
243 nilfs_btree_node_set_nchildren(right
, rnchildren
);
246 /* Assume that the buffer head corresponding to node is locked. */
247 static void nilfs_btree_node_insert(struct nilfs_btree_node
*node
, int index
,
248 __u64 key
, __u64 ptr
, int ncmax
)
254 dkeys
= nilfs_btree_node_dkeys(node
);
255 dptrs
= nilfs_btree_node_dptrs(node
, ncmax
);
256 nchildren
= nilfs_btree_node_get_nchildren(node
);
257 if (index
< nchildren
) {
258 memmove(dkeys
+ index
+ 1, dkeys
+ index
,
259 (nchildren
- index
) * sizeof(*dkeys
));
260 memmove(dptrs
+ index
+ 1, dptrs
+ index
,
261 (nchildren
- index
) * sizeof(*dptrs
));
263 dkeys
[index
] = cpu_to_le64(key
);
264 dptrs
[index
] = cpu_to_le64(ptr
);
266 nilfs_btree_node_set_nchildren(node
, nchildren
);
269 /* Assume that the buffer head corresponding to node is locked. */
270 static void nilfs_btree_node_delete(struct nilfs_btree_node
*node
, int index
,
271 __u64
*keyp
, __u64
*ptrp
, int ncmax
)
279 dkeys
= nilfs_btree_node_dkeys(node
);
280 dptrs
= nilfs_btree_node_dptrs(node
, ncmax
);
281 key
= le64_to_cpu(dkeys
[index
]);
282 ptr
= le64_to_cpu(dptrs
[index
]);
283 nchildren
= nilfs_btree_node_get_nchildren(node
);
289 if (index
< nchildren
- 1) {
290 memmove(dkeys
+ index
, dkeys
+ index
+ 1,
291 (nchildren
- index
- 1) * sizeof(*dkeys
));
292 memmove(dptrs
+ index
, dptrs
+ index
+ 1,
293 (nchildren
- index
- 1) * sizeof(*dptrs
));
296 nilfs_btree_node_set_nchildren(node
, nchildren
);
299 static int nilfs_btree_node_lookup(const struct nilfs_btree_node
*node
,
300 __u64 key
, int *indexp
)
303 int index
, low
, high
, s
;
307 high
= nilfs_btree_node_get_nchildren(node
) - 1;
310 while (low
<= high
) {
311 index
= (low
+ high
) / 2;
312 nkey
= nilfs_btree_node_get_key(node
, index
);
316 } else if (nkey
< key
) {
326 if (nilfs_btree_node_get_level(node
) > NILFS_BTREE_LEVEL_NODE_MIN
) {
327 if (s
> 0 && index
> 0)
339 * nilfs_btree_node_broken - verify consistency of btree node
340 * @node: btree node block to be examined
341 * @size: node size (in bytes)
342 * @blocknr: block number
344 * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
346 static int nilfs_btree_node_broken(const struct nilfs_btree_node
*node
,
347 size_t size
, sector_t blocknr
)
349 int level
, flags
, nchildren
;
352 level
= nilfs_btree_node_get_level(node
);
353 flags
= nilfs_btree_node_get_flags(node
);
354 nchildren
= nilfs_btree_node_get_nchildren(node
);
356 if (unlikely(level
< NILFS_BTREE_LEVEL_NODE_MIN
||
357 level
>= NILFS_BTREE_LEVEL_MAX
||
358 (flags
& NILFS_BTREE_NODE_ROOT
) ||
360 nchildren
> NILFS_BTREE_NODE_NCHILDREN_MAX(size
))) {
361 printk(KERN_CRIT
"NILFS: bad btree node (blocknr=%llu): "
362 "level = %d, flags = 0x%x, nchildren = %d\n",
363 (unsigned long long)blocknr
, level
, flags
, nchildren
);
370 * nilfs_btree_root_broken - verify consistency of btree root node
371 * @node: btree root node to be examined
374 * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
376 static int nilfs_btree_root_broken(const struct nilfs_btree_node
*node
,
379 int level
, flags
, nchildren
;
382 level
= nilfs_btree_node_get_level(node
);
383 flags
= nilfs_btree_node_get_flags(node
);
384 nchildren
= nilfs_btree_node_get_nchildren(node
);
386 if (unlikely(level
< NILFS_BTREE_LEVEL_NODE_MIN
||
387 level
>= NILFS_BTREE_LEVEL_MAX
||
389 nchildren
> NILFS_BTREE_ROOT_NCHILDREN_MAX
)) {
390 pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
391 ino
, level
, flags
, nchildren
);
397 int nilfs_btree_broken_node_block(struct buffer_head
*bh
)
401 if (buffer_nilfs_checked(bh
))
404 ret
= nilfs_btree_node_broken((struct nilfs_btree_node
*)bh
->b_data
,
405 bh
->b_size
, bh
->b_blocknr
);
407 set_buffer_nilfs_checked(bh
);
411 static struct nilfs_btree_node
*
412 nilfs_btree_get_root(const struct nilfs_bmap
*btree
)
414 return (struct nilfs_btree_node
*)btree
->b_u
.u_data
;
417 static struct nilfs_btree_node
*
418 nilfs_btree_get_nonroot_node(const struct nilfs_btree_path
*path
, int level
)
420 return (struct nilfs_btree_node
*)path
[level
].bp_bh
->b_data
;
423 static struct nilfs_btree_node
*
424 nilfs_btree_get_sib_node(const struct nilfs_btree_path
*path
, int level
)
426 return (struct nilfs_btree_node
*)path
[level
].bp_sib_bh
->b_data
;
429 static int nilfs_btree_height(const struct nilfs_bmap
*btree
)
431 return nilfs_btree_node_get_level(nilfs_btree_get_root(btree
)) + 1;
434 static struct nilfs_btree_node
*
435 nilfs_btree_get_node(const struct nilfs_bmap
*btree
,
436 const struct nilfs_btree_path
*path
,
437 int level
, int *ncmaxp
)
439 struct nilfs_btree_node
*node
;
441 if (level
== nilfs_btree_height(btree
) - 1) {
442 node
= nilfs_btree_get_root(btree
);
443 *ncmaxp
= NILFS_BTREE_ROOT_NCHILDREN_MAX
;
445 node
= nilfs_btree_get_nonroot_node(path
, level
);
446 *ncmaxp
= nilfs_btree_nchildren_per_block(btree
);
452 nilfs_btree_bad_node(struct nilfs_btree_node
*node
, int level
)
454 if (unlikely(nilfs_btree_node_get_level(node
) != level
)) {
456 printk(KERN_CRIT
"NILFS: btree level mismatch: %d != %d\n",
457 nilfs_btree_node_get_level(node
), level
);
463 struct nilfs_btree_readahead_info
{
464 struct nilfs_btree_node
*node
; /* parent node */
465 int max_ra_blocks
; /* max nof blocks to read ahead */
466 int index
; /* current index on the parent node */
467 int ncmax
; /* nof children in the parent node */
470 static int __nilfs_btree_get_block(const struct nilfs_bmap
*btree
, __u64 ptr
,
471 struct buffer_head
**bhp
,
472 const struct nilfs_btree_readahead_info
*ra
)
474 struct address_space
*btnc
= &NILFS_BMAP_I(btree
)->i_btnode_cache
;
475 struct buffer_head
*bh
, *ra_bh
;
476 sector_t submit_ptr
= 0;
479 ret
= nilfs_btnode_submit_block(btnc
, ptr
, 0, READ
, &bh
, &submit_ptr
);
490 /* read ahead sibling nodes */
491 for (n
= ra
->max_ra_blocks
, i
= ra
->index
+ 1;
492 n
> 0 && i
< ra
->ncmax
; n
--, i
++) {
493 ptr2
= nilfs_btree_node_get_ptr(ra
->node
, i
, ra
->ncmax
);
495 ret
= nilfs_btnode_submit_block(btnc
, ptr2
, 0, READA
,
496 &ra_bh
, &submit_ptr
);
497 if (likely(!ret
|| ret
== -EEXIST
))
499 else if (ret
!= -EBUSY
)
501 if (!buffer_locked(bh
))
509 if (!buffer_uptodate(bh
)) {
515 if (nilfs_btree_broken_node_block(bh
)) {
516 clear_buffer_uptodate(bh
);
525 static int nilfs_btree_get_block(const struct nilfs_bmap
*btree
, __u64 ptr
,
526 struct buffer_head
**bhp
)
528 return __nilfs_btree_get_block(btree
, ptr
, bhp
, NULL
);
531 static int nilfs_btree_do_lookup(const struct nilfs_bmap
*btree
,
532 struct nilfs_btree_path
*path
,
533 __u64 key
, __u64
*ptrp
, int minlevel
,
536 struct nilfs_btree_node
*node
;
537 struct nilfs_btree_readahead_info p
, *ra
;
539 int level
, index
, found
, ncmax
, ret
;
541 node
= nilfs_btree_get_root(btree
);
542 level
= nilfs_btree_node_get_level(node
);
543 if (level
< minlevel
|| nilfs_btree_node_get_nchildren(node
) <= 0)
546 found
= nilfs_btree_node_lookup(node
, key
, &index
);
547 ptr
= nilfs_btree_node_get_ptr(node
, index
,
548 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
549 path
[level
].bp_bh
= NULL
;
550 path
[level
].bp_index
= index
;
552 ncmax
= nilfs_btree_nchildren_per_block(btree
);
554 while (--level
>= minlevel
) {
556 if (level
== NILFS_BTREE_LEVEL_NODE_MIN
&& readahead
) {
557 p
.node
= nilfs_btree_get_node(btree
, path
, level
+ 1,
563 ret
= __nilfs_btree_get_block(btree
, ptr
, &path
[level
].bp_bh
,
568 node
= nilfs_btree_get_nonroot_node(path
, level
);
569 if (nilfs_btree_bad_node(node
, level
))
572 found
= nilfs_btree_node_lookup(node
, key
, &index
);
576 ptr
= nilfs_btree_node_get_ptr(node
, index
, ncmax
);
578 WARN_ON(found
|| level
!= NILFS_BTREE_LEVEL_NODE_MIN
);
580 ptr
= NILFS_BMAP_INVALID_PTR
;
582 path
[level
].bp_index
= index
;
593 static int nilfs_btree_do_lookup_last(const struct nilfs_bmap
*btree
,
594 struct nilfs_btree_path
*path
,
595 __u64
*keyp
, __u64
*ptrp
)
597 struct nilfs_btree_node
*node
;
599 int index
, level
, ncmax
, ret
;
601 node
= nilfs_btree_get_root(btree
);
602 index
= nilfs_btree_node_get_nchildren(node
) - 1;
605 level
= nilfs_btree_node_get_level(node
);
606 ptr
= nilfs_btree_node_get_ptr(node
, index
,
607 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
608 path
[level
].bp_bh
= NULL
;
609 path
[level
].bp_index
= index
;
610 ncmax
= nilfs_btree_nchildren_per_block(btree
);
612 for (level
--; level
> 0; level
--) {
613 ret
= nilfs_btree_get_block(btree
, ptr
, &path
[level
].bp_bh
);
616 node
= nilfs_btree_get_nonroot_node(path
, level
);
617 if (nilfs_btree_bad_node(node
, level
))
619 index
= nilfs_btree_node_get_nchildren(node
) - 1;
620 ptr
= nilfs_btree_node_get_ptr(node
, index
, ncmax
);
621 path
[level
].bp_index
= index
;
625 *keyp
= nilfs_btree_node_get_key(node
, index
);
633 * nilfs_btree_get_next_key - get next valid key from btree path array
634 * @btree: bmap struct of btree
635 * @path: array of nilfs_btree_path struct
636 * @minlevel: start level
637 * @nextkey: place to store the next valid key
639 * Return Value: If a next key was found, 0 is returned. Otherwise,
640 * -ENOENT is returned.
642 static int nilfs_btree_get_next_key(const struct nilfs_bmap
*btree
,
643 const struct nilfs_btree_path
*path
,
644 int minlevel
, __u64
*nextkey
)
646 struct nilfs_btree_node
*node
;
647 int maxlevel
= nilfs_btree_height(btree
) - 1;
648 int index
, next_adj
, level
;
650 /* Next index is already set to bp_index for leaf nodes. */
652 for (level
= minlevel
; level
<= maxlevel
; level
++) {
653 if (level
== maxlevel
)
654 node
= nilfs_btree_get_root(btree
);
656 node
= nilfs_btree_get_nonroot_node(path
, level
);
658 index
= path
[level
].bp_index
+ next_adj
;
659 if (index
< nilfs_btree_node_get_nchildren(node
)) {
660 /* Next key is in this node */
661 *nextkey
= nilfs_btree_node_get_key(node
, index
);
664 /* For non-leaf nodes, next index is stored at bp_index + 1. */
670 static int nilfs_btree_lookup(const struct nilfs_bmap
*btree
,
671 __u64 key
, int level
, __u64
*ptrp
)
673 struct nilfs_btree_path
*path
;
676 path
= nilfs_btree_alloc_path();
680 ret
= nilfs_btree_do_lookup(btree
, path
, key
, ptrp
, level
, 0);
682 nilfs_btree_free_path(path
);
687 static int nilfs_btree_lookup_contig(const struct nilfs_bmap
*btree
,
688 __u64 key
, __u64
*ptrp
, unsigned maxblocks
)
690 struct nilfs_btree_path
*path
;
691 struct nilfs_btree_node
*node
;
692 struct inode
*dat
= NULL
;
695 int level
= NILFS_BTREE_LEVEL_NODE_MIN
;
696 int ret
, cnt
, index
, maxlevel
, ncmax
;
697 struct nilfs_btree_readahead_info p
;
699 path
= nilfs_btree_alloc_path();
703 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
, 1);
707 if (NILFS_BMAP_USE_VBN(btree
)) {
708 dat
= nilfs_bmap_get_dat(btree
);
709 ret
= nilfs_dat_translate(dat
, ptr
, &blocknr
);
715 if (cnt
== maxblocks
)
718 maxlevel
= nilfs_btree_height(btree
) - 1;
719 node
= nilfs_btree_get_node(btree
, path
, level
, &ncmax
);
720 index
= path
[level
].bp_index
+ 1;
722 while (index
< nilfs_btree_node_get_nchildren(node
)) {
723 if (nilfs_btree_node_get_key(node
, index
) !=
726 ptr2
= nilfs_btree_node_get_ptr(node
, index
, ncmax
);
728 ret
= nilfs_dat_translate(dat
, ptr2
, &blocknr
);
733 if (ptr2
!= ptr
+ cnt
|| ++cnt
== maxblocks
)
738 if (level
== maxlevel
)
741 /* look-up right sibling node */
742 p
.node
= nilfs_btree_get_node(btree
, path
, level
+ 1, &p
.ncmax
);
743 p
.index
= path
[level
+ 1].bp_index
+ 1;
745 if (p
.index
>= nilfs_btree_node_get_nchildren(p
.node
) ||
746 nilfs_btree_node_get_key(p
.node
, p
.index
) != key
+ cnt
)
748 ptr2
= nilfs_btree_node_get_ptr(p
.node
, p
.index
, p
.ncmax
);
749 path
[level
+ 1].bp_index
= p
.index
;
751 brelse(path
[level
].bp_bh
);
752 path
[level
].bp_bh
= NULL
;
754 ret
= __nilfs_btree_get_block(btree
, ptr2
, &path
[level
].bp_bh
,
758 node
= nilfs_btree_get_nonroot_node(path
, level
);
759 ncmax
= nilfs_btree_nchildren_per_block(btree
);
761 path
[level
].bp_index
= index
;
767 nilfs_btree_free_path(path
);
771 static void nilfs_btree_promote_key(struct nilfs_bmap
*btree
,
772 struct nilfs_btree_path
*path
,
773 int level
, __u64 key
)
775 if (level
< nilfs_btree_height(btree
) - 1) {
777 nilfs_btree_node_set_key(
778 nilfs_btree_get_nonroot_node(path
, level
),
779 path
[level
].bp_index
, key
);
780 if (!buffer_dirty(path
[level
].bp_bh
))
781 mark_buffer_dirty(path
[level
].bp_bh
);
782 } while ((path
[level
].bp_index
== 0) &&
783 (++level
< nilfs_btree_height(btree
) - 1));
787 if (level
== nilfs_btree_height(btree
) - 1) {
788 nilfs_btree_node_set_key(nilfs_btree_get_root(btree
),
789 path
[level
].bp_index
, key
);
793 static void nilfs_btree_do_insert(struct nilfs_bmap
*btree
,
794 struct nilfs_btree_path
*path
,
795 int level
, __u64
*keyp
, __u64
*ptrp
)
797 struct nilfs_btree_node
*node
;
800 if (level
< nilfs_btree_height(btree
) - 1) {
801 node
= nilfs_btree_get_nonroot_node(path
, level
);
802 ncblk
= nilfs_btree_nchildren_per_block(btree
);
803 nilfs_btree_node_insert(node
, path
[level
].bp_index
,
804 *keyp
, *ptrp
, ncblk
);
805 if (!buffer_dirty(path
[level
].bp_bh
))
806 mark_buffer_dirty(path
[level
].bp_bh
);
808 if (path
[level
].bp_index
== 0)
809 nilfs_btree_promote_key(btree
, path
, level
+ 1,
810 nilfs_btree_node_get_key(node
,
813 node
= nilfs_btree_get_root(btree
);
814 nilfs_btree_node_insert(node
, path
[level
].bp_index
,
816 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
820 static void nilfs_btree_carry_left(struct nilfs_bmap
*btree
,
821 struct nilfs_btree_path
*path
,
822 int level
, __u64
*keyp
, __u64
*ptrp
)
824 struct nilfs_btree_node
*node
, *left
;
825 int nchildren
, lnchildren
, n
, move
, ncblk
;
827 node
= nilfs_btree_get_nonroot_node(path
, level
);
828 left
= nilfs_btree_get_sib_node(path
, level
);
829 nchildren
= nilfs_btree_node_get_nchildren(node
);
830 lnchildren
= nilfs_btree_node_get_nchildren(left
);
831 ncblk
= nilfs_btree_nchildren_per_block(btree
);
834 n
= (nchildren
+ lnchildren
+ 1) / 2 - lnchildren
;
835 if (n
> path
[level
].bp_index
) {
836 /* move insert point */
841 nilfs_btree_node_move_left(left
, node
, n
, ncblk
, ncblk
);
843 if (!buffer_dirty(path
[level
].bp_bh
))
844 mark_buffer_dirty(path
[level
].bp_bh
);
845 if (!buffer_dirty(path
[level
].bp_sib_bh
))
846 mark_buffer_dirty(path
[level
].bp_sib_bh
);
848 nilfs_btree_promote_key(btree
, path
, level
+ 1,
849 nilfs_btree_node_get_key(node
, 0));
852 brelse(path
[level
].bp_bh
);
853 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
854 path
[level
].bp_sib_bh
= NULL
;
855 path
[level
].bp_index
+= lnchildren
;
856 path
[level
+ 1].bp_index
--;
858 brelse(path
[level
].bp_sib_bh
);
859 path
[level
].bp_sib_bh
= NULL
;
860 path
[level
].bp_index
-= n
;
863 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
866 static void nilfs_btree_carry_right(struct nilfs_bmap
*btree
,
867 struct nilfs_btree_path
*path
,
868 int level
, __u64
*keyp
, __u64
*ptrp
)
870 struct nilfs_btree_node
*node
, *right
;
871 int nchildren
, rnchildren
, n
, move
, ncblk
;
873 node
= nilfs_btree_get_nonroot_node(path
, level
);
874 right
= nilfs_btree_get_sib_node(path
, level
);
875 nchildren
= nilfs_btree_node_get_nchildren(node
);
876 rnchildren
= nilfs_btree_node_get_nchildren(right
);
877 ncblk
= nilfs_btree_nchildren_per_block(btree
);
880 n
= (nchildren
+ rnchildren
+ 1) / 2 - rnchildren
;
881 if (n
> nchildren
- path
[level
].bp_index
) {
882 /* move insert point */
887 nilfs_btree_node_move_right(node
, right
, n
, ncblk
, ncblk
);
889 if (!buffer_dirty(path
[level
].bp_bh
))
890 mark_buffer_dirty(path
[level
].bp_bh
);
891 if (!buffer_dirty(path
[level
].bp_sib_bh
))
892 mark_buffer_dirty(path
[level
].bp_sib_bh
);
894 path
[level
+ 1].bp_index
++;
895 nilfs_btree_promote_key(btree
, path
, level
+ 1,
896 nilfs_btree_node_get_key(right
, 0));
897 path
[level
+ 1].bp_index
--;
900 brelse(path
[level
].bp_bh
);
901 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
902 path
[level
].bp_sib_bh
= NULL
;
903 path
[level
].bp_index
-= nilfs_btree_node_get_nchildren(node
);
904 path
[level
+ 1].bp_index
++;
906 brelse(path
[level
].bp_sib_bh
);
907 path
[level
].bp_sib_bh
= NULL
;
910 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
913 static void nilfs_btree_split(struct nilfs_bmap
*btree
,
914 struct nilfs_btree_path
*path
,
915 int level
, __u64
*keyp
, __u64
*ptrp
)
917 struct nilfs_btree_node
*node
, *right
;
918 int nchildren
, n
, move
, ncblk
;
920 node
= nilfs_btree_get_nonroot_node(path
, level
);
921 right
= nilfs_btree_get_sib_node(path
, level
);
922 nchildren
= nilfs_btree_node_get_nchildren(node
);
923 ncblk
= nilfs_btree_nchildren_per_block(btree
);
926 n
= (nchildren
+ 1) / 2;
927 if (n
> nchildren
- path
[level
].bp_index
) {
932 nilfs_btree_node_move_right(node
, right
, n
, ncblk
, ncblk
);
934 if (!buffer_dirty(path
[level
].bp_bh
))
935 mark_buffer_dirty(path
[level
].bp_bh
);
936 if (!buffer_dirty(path
[level
].bp_sib_bh
))
937 mark_buffer_dirty(path
[level
].bp_sib_bh
);
940 path
[level
].bp_index
-= nilfs_btree_node_get_nchildren(node
);
941 nilfs_btree_node_insert(right
, path
[level
].bp_index
,
942 *keyp
, *ptrp
, ncblk
);
944 *keyp
= nilfs_btree_node_get_key(right
, 0);
945 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
947 brelse(path
[level
].bp_bh
);
948 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
949 path
[level
].bp_sib_bh
= NULL
;
951 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
953 *keyp
= nilfs_btree_node_get_key(right
, 0);
954 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
956 brelse(path
[level
].bp_sib_bh
);
957 path
[level
].bp_sib_bh
= NULL
;
960 path
[level
+ 1].bp_index
++;
963 static void nilfs_btree_grow(struct nilfs_bmap
*btree
,
964 struct nilfs_btree_path
*path
,
965 int level
, __u64
*keyp
, __u64
*ptrp
)
967 struct nilfs_btree_node
*root
, *child
;
970 root
= nilfs_btree_get_root(btree
);
971 child
= nilfs_btree_get_sib_node(path
, level
);
972 ncblk
= nilfs_btree_nchildren_per_block(btree
);
974 n
= nilfs_btree_node_get_nchildren(root
);
976 nilfs_btree_node_move_right(root
, child
, n
,
977 NILFS_BTREE_ROOT_NCHILDREN_MAX
, ncblk
);
978 nilfs_btree_node_set_level(root
, level
+ 1);
980 if (!buffer_dirty(path
[level
].bp_sib_bh
))
981 mark_buffer_dirty(path
[level
].bp_sib_bh
);
983 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
984 path
[level
].bp_sib_bh
= NULL
;
986 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
988 *keyp
= nilfs_btree_node_get_key(child
, 0);
989 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
992 static __u64
nilfs_btree_find_near(const struct nilfs_bmap
*btree
,
993 const struct nilfs_btree_path
*path
)
995 struct nilfs_btree_node
*node
;
999 return NILFS_BMAP_INVALID_PTR
;
1002 level
= NILFS_BTREE_LEVEL_NODE_MIN
;
1003 if (path
[level
].bp_index
> 0) {
1004 node
= nilfs_btree_get_node(btree
, path
, level
, &ncmax
);
1005 return nilfs_btree_node_get_ptr(node
,
1006 path
[level
].bp_index
- 1,
1011 level
= NILFS_BTREE_LEVEL_NODE_MIN
+ 1;
1012 if (level
<= nilfs_btree_height(btree
) - 1) {
1013 node
= nilfs_btree_get_node(btree
, path
, level
, &ncmax
);
1014 return nilfs_btree_node_get_ptr(node
, path
[level
].bp_index
,
1018 return NILFS_BMAP_INVALID_PTR
;
1021 static __u64
nilfs_btree_find_target_v(const struct nilfs_bmap
*btree
,
1022 const struct nilfs_btree_path
*path
,
1027 ptr
= nilfs_bmap_find_target_seq(btree
, key
);
1028 if (ptr
!= NILFS_BMAP_INVALID_PTR
)
1029 /* sequential access */
1032 ptr
= nilfs_btree_find_near(btree
, path
);
1033 if (ptr
!= NILFS_BMAP_INVALID_PTR
)
1038 return nilfs_bmap_find_target_in_group(btree
);
1041 static int nilfs_btree_prepare_insert(struct nilfs_bmap
*btree
,
1042 struct nilfs_btree_path
*path
,
1043 int *levelp
, __u64 key
, __u64 ptr
,
1044 struct nilfs_bmap_stats
*stats
)
1046 struct buffer_head
*bh
;
1047 struct nilfs_btree_node
*node
, *parent
, *sib
;
1049 int pindex
, level
, ncmax
, ncblk
, ret
;
1050 struct inode
*dat
= NULL
;
1052 stats
->bs_nblocks
= 0;
1053 level
= NILFS_BTREE_LEVEL_DATA
;
1055 /* allocate a new ptr for data block */
1056 if (NILFS_BMAP_USE_VBN(btree
)) {
1057 path
[level
].bp_newreq
.bpr_ptr
=
1058 nilfs_btree_find_target_v(btree
, path
, key
);
1059 dat
= nilfs_bmap_get_dat(btree
);
1062 ret
= nilfs_bmap_prepare_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1066 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1068 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
1069 level
< nilfs_btree_height(btree
) - 1;
1071 node
= nilfs_btree_get_nonroot_node(path
, level
);
1072 if (nilfs_btree_node_get_nchildren(node
) < ncblk
) {
1073 path
[level
].bp_op
= nilfs_btree_do_insert
;
1074 stats
->bs_nblocks
++;
1078 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1079 pindex
= path
[level
+ 1].bp_index
;
1083 sibptr
= nilfs_btree_node_get_ptr(parent
, pindex
- 1,
1085 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1087 goto err_out_child_node
;
1088 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1089 if (nilfs_btree_node_get_nchildren(sib
) < ncblk
) {
1090 path
[level
].bp_sib_bh
= bh
;
1091 path
[level
].bp_op
= nilfs_btree_carry_left
;
1092 stats
->bs_nblocks
++;
1100 if (pindex
< nilfs_btree_node_get_nchildren(parent
) - 1) {
1101 sibptr
= nilfs_btree_node_get_ptr(parent
, pindex
+ 1,
1103 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1105 goto err_out_child_node
;
1106 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1107 if (nilfs_btree_node_get_nchildren(sib
) < ncblk
) {
1108 path
[level
].bp_sib_bh
= bh
;
1109 path
[level
].bp_op
= nilfs_btree_carry_right
;
1110 stats
->bs_nblocks
++;
1118 path
[level
].bp_newreq
.bpr_ptr
=
1119 path
[level
- 1].bp_newreq
.bpr_ptr
+ 1;
1120 ret
= nilfs_bmap_prepare_alloc_ptr(btree
,
1121 &path
[level
].bp_newreq
, dat
);
1123 goto err_out_child_node
;
1124 ret
= nilfs_btree_get_new_block(btree
,
1125 path
[level
].bp_newreq
.bpr_ptr
,
1128 goto err_out_curr_node
;
1130 stats
->bs_nblocks
++;
1132 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1133 nilfs_btree_node_init(sib
, 0, level
, 0, ncblk
, NULL
, NULL
);
1134 path
[level
].bp_sib_bh
= bh
;
1135 path
[level
].bp_op
= nilfs_btree_split
;
1139 node
= nilfs_btree_get_root(btree
);
1140 if (nilfs_btree_node_get_nchildren(node
) <
1141 NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1142 path
[level
].bp_op
= nilfs_btree_do_insert
;
1143 stats
->bs_nblocks
++;
1148 path
[level
].bp_newreq
.bpr_ptr
= path
[level
- 1].bp_newreq
.bpr_ptr
+ 1;
1149 ret
= nilfs_bmap_prepare_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1151 goto err_out_child_node
;
1152 ret
= nilfs_btree_get_new_block(btree
, path
[level
].bp_newreq
.bpr_ptr
,
1155 goto err_out_curr_node
;
1157 nilfs_btree_node_init((struct nilfs_btree_node
*)bh
->b_data
,
1158 0, level
, 0, ncblk
, NULL
, NULL
);
1159 path
[level
].bp_sib_bh
= bh
;
1160 path
[level
].bp_op
= nilfs_btree_grow
;
1163 path
[level
].bp_op
= nilfs_btree_do_insert
;
1165 /* a newly-created node block and a data block are added */
1166 stats
->bs_nblocks
+= 2;
1175 nilfs_bmap_abort_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1177 for (level
--; level
> NILFS_BTREE_LEVEL_DATA
; level
--) {
1178 nilfs_btnode_delete(path
[level
].bp_sib_bh
);
1179 nilfs_bmap_abort_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1183 nilfs_bmap_abort_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1186 stats
->bs_nblocks
= 0;
1190 static void nilfs_btree_commit_insert(struct nilfs_bmap
*btree
,
1191 struct nilfs_btree_path
*path
,
1192 int maxlevel
, __u64 key
, __u64 ptr
)
1194 struct inode
*dat
= NULL
;
1197 set_buffer_nilfs_volatile((struct buffer_head
*)((unsigned long)ptr
));
1198 ptr
= path
[NILFS_BTREE_LEVEL_DATA
].bp_newreq
.bpr_ptr
;
1199 if (NILFS_BMAP_USE_VBN(btree
)) {
1200 nilfs_bmap_set_target_v(btree
, key
, ptr
);
1201 dat
= nilfs_bmap_get_dat(btree
);
1204 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
; level
<= maxlevel
; level
++) {
1205 nilfs_bmap_commit_alloc_ptr(btree
,
1206 &path
[level
- 1].bp_newreq
, dat
);
1207 path
[level
].bp_op(btree
, path
, level
, &key
, &ptr
);
1210 if (!nilfs_bmap_dirty(btree
))
1211 nilfs_bmap_set_dirty(btree
);
1214 static int nilfs_btree_insert(struct nilfs_bmap
*btree
, __u64 key
, __u64 ptr
)
1216 struct nilfs_btree_path
*path
;
1217 struct nilfs_bmap_stats stats
;
1220 path
= nilfs_btree_alloc_path();
1224 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
,
1225 NILFS_BTREE_LEVEL_NODE_MIN
, 0);
1226 if (ret
!= -ENOENT
) {
1232 ret
= nilfs_btree_prepare_insert(btree
, path
, &level
, key
, ptr
, &stats
);
1235 nilfs_btree_commit_insert(btree
, path
, level
, key
, ptr
);
1236 nilfs_inode_add_blocks(btree
->b_inode
, stats
.bs_nblocks
);
1239 nilfs_btree_free_path(path
);
1243 static void nilfs_btree_do_delete(struct nilfs_bmap
*btree
,
1244 struct nilfs_btree_path
*path
,
1245 int level
, __u64
*keyp
, __u64
*ptrp
)
1247 struct nilfs_btree_node
*node
;
1250 if (level
< nilfs_btree_height(btree
) - 1) {
1251 node
= nilfs_btree_get_nonroot_node(path
, level
);
1252 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1253 nilfs_btree_node_delete(node
, path
[level
].bp_index
,
1255 if (!buffer_dirty(path
[level
].bp_bh
))
1256 mark_buffer_dirty(path
[level
].bp_bh
);
1257 if (path
[level
].bp_index
== 0)
1258 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1259 nilfs_btree_node_get_key(node
, 0));
1261 node
= nilfs_btree_get_root(btree
);
1262 nilfs_btree_node_delete(node
, path
[level
].bp_index
,
1264 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1268 static void nilfs_btree_borrow_left(struct nilfs_bmap
*btree
,
1269 struct nilfs_btree_path
*path
,
1270 int level
, __u64
*keyp
, __u64
*ptrp
)
1272 struct nilfs_btree_node
*node
, *left
;
1273 int nchildren
, lnchildren
, n
, ncblk
;
1275 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1277 node
= nilfs_btree_get_nonroot_node(path
, level
);
1278 left
= nilfs_btree_get_sib_node(path
, level
);
1279 nchildren
= nilfs_btree_node_get_nchildren(node
);
1280 lnchildren
= nilfs_btree_node_get_nchildren(left
);
1281 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1283 n
= (nchildren
+ lnchildren
) / 2 - nchildren
;
1285 nilfs_btree_node_move_right(left
, node
, n
, ncblk
, ncblk
);
1287 if (!buffer_dirty(path
[level
].bp_bh
))
1288 mark_buffer_dirty(path
[level
].bp_bh
);
1289 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1290 mark_buffer_dirty(path
[level
].bp_sib_bh
);
1292 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1293 nilfs_btree_node_get_key(node
, 0));
1295 brelse(path
[level
].bp_sib_bh
);
1296 path
[level
].bp_sib_bh
= NULL
;
1297 path
[level
].bp_index
+= n
;
1300 static void nilfs_btree_borrow_right(struct nilfs_bmap
*btree
,
1301 struct nilfs_btree_path
*path
,
1302 int level
, __u64
*keyp
, __u64
*ptrp
)
1304 struct nilfs_btree_node
*node
, *right
;
1305 int nchildren
, rnchildren
, n
, ncblk
;
1307 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1309 node
= nilfs_btree_get_nonroot_node(path
, level
);
1310 right
= nilfs_btree_get_sib_node(path
, level
);
1311 nchildren
= nilfs_btree_node_get_nchildren(node
);
1312 rnchildren
= nilfs_btree_node_get_nchildren(right
);
1313 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1315 n
= (nchildren
+ rnchildren
) / 2 - nchildren
;
1317 nilfs_btree_node_move_left(node
, right
, n
, ncblk
, ncblk
);
1319 if (!buffer_dirty(path
[level
].bp_bh
))
1320 mark_buffer_dirty(path
[level
].bp_bh
);
1321 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1322 mark_buffer_dirty(path
[level
].bp_sib_bh
);
1324 path
[level
+ 1].bp_index
++;
1325 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1326 nilfs_btree_node_get_key(right
, 0));
1327 path
[level
+ 1].bp_index
--;
1329 brelse(path
[level
].bp_sib_bh
);
1330 path
[level
].bp_sib_bh
= NULL
;
1333 static void nilfs_btree_concat_left(struct nilfs_bmap
*btree
,
1334 struct nilfs_btree_path
*path
,
1335 int level
, __u64
*keyp
, __u64
*ptrp
)
1337 struct nilfs_btree_node
*node
, *left
;
1340 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1342 node
= nilfs_btree_get_nonroot_node(path
, level
);
1343 left
= nilfs_btree_get_sib_node(path
, level
);
1344 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1346 n
= nilfs_btree_node_get_nchildren(node
);
1348 nilfs_btree_node_move_left(left
, node
, n
, ncblk
, ncblk
);
1350 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1351 mark_buffer_dirty(path
[level
].bp_sib_bh
);
1353 nilfs_btnode_delete(path
[level
].bp_bh
);
1354 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
1355 path
[level
].bp_sib_bh
= NULL
;
1356 path
[level
].bp_index
+= nilfs_btree_node_get_nchildren(left
);
1359 static void nilfs_btree_concat_right(struct nilfs_bmap
*btree
,
1360 struct nilfs_btree_path
*path
,
1361 int level
, __u64
*keyp
, __u64
*ptrp
)
1363 struct nilfs_btree_node
*node
, *right
;
1366 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1368 node
= nilfs_btree_get_nonroot_node(path
, level
);
1369 right
= nilfs_btree_get_sib_node(path
, level
);
1370 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1372 n
= nilfs_btree_node_get_nchildren(right
);
1374 nilfs_btree_node_move_left(node
, right
, n
, ncblk
, ncblk
);
1376 if (!buffer_dirty(path
[level
].bp_bh
))
1377 mark_buffer_dirty(path
[level
].bp_bh
);
1379 nilfs_btnode_delete(path
[level
].bp_sib_bh
);
1380 path
[level
].bp_sib_bh
= NULL
;
1381 path
[level
+ 1].bp_index
++;
1384 static void nilfs_btree_shrink(struct nilfs_bmap
*btree
,
1385 struct nilfs_btree_path
*path
,
1386 int level
, __u64
*keyp
, __u64
*ptrp
)
1388 struct nilfs_btree_node
*root
, *child
;
1391 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1393 root
= nilfs_btree_get_root(btree
);
1394 child
= nilfs_btree_get_nonroot_node(path
, level
);
1395 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1397 nilfs_btree_node_delete(root
, 0, NULL
, NULL
,
1398 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1399 nilfs_btree_node_set_level(root
, level
);
1400 n
= nilfs_btree_node_get_nchildren(child
);
1401 nilfs_btree_node_move_left(root
, child
, n
,
1402 NILFS_BTREE_ROOT_NCHILDREN_MAX
, ncblk
);
1404 nilfs_btnode_delete(path
[level
].bp_bh
);
1405 path
[level
].bp_bh
= NULL
;
1408 static void nilfs_btree_nop(struct nilfs_bmap
*btree
,
1409 struct nilfs_btree_path
*path
,
1410 int level
, __u64
*keyp
, __u64
*ptrp
)
1414 static int nilfs_btree_prepare_delete(struct nilfs_bmap
*btree
,
1415 struct nilfs_btree_path
*path
,
1417 struct nilfs_bmap_stats
*stats
,
1420 struct buffer_head
*bh
;
1421 struct nilfs_btree_node
*node
, *parent
, *sib
;
1423 int pindex
, dindex
, level
, ncmin
, ncmax
, ncblk
, ret
;
1426 stats
->bs_nblocks
= 0;
1427 ncmin
= NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree
));
1428 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1430 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
, dindex
= path
[level
].bp_index
;
1431 level
< nilfs_btree_height(btree
) - 1;
1433 node
= nilfs_btree_get_nonroot_node(path
, level
);
1434 path
[level
].bp_oldreq
.bpr_ptr
=
1435 nilfs_btree_node_get_ptr(node
, dindex
, ncblk
);
1436 ret
= nilfs_bmap_prepare_end_ptr(btree
,
1437 &path
[level
].bp_oldreq
, dat
);
1439 goto err_out_child_node
;
1441 if (nilfs_btree_node_get_nchildren(node
) > ncmin
) {
1442 path
[level
].bp_op
= nilfs_btree_do_delete
;
1443 stats
->bs_nblocks
++;
1447 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1448 pindex
= path
[level
+ 1].bp_index
;
1453 sibptr
= nilfs_btree_node_get_ptr(parent
, pindex
- 1,
1455 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1457 goto err_out_curr_node
;
1458 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1459 if (nilfs_btree_node_get_nchildren(sib
) > ncmin
) {
1460 path
[level
].bp_sib_bh
= bh
;
1461 path
[level
].bp_op
= nilfs_btree_borrow_left
;
1462 stats
->bs_nblocks
++;
1465 path
[level
].bp_sib_bh
= bh
;
1466 path
[level
].bp_op
= nilfs_btree_concat_left
;
1467 stats
->bs_nblocks
++;
1471 nilfs_btree_node_get_nchildren(parent
) - 1) {
1473 sibptr
= nilfs_btree_node_get_ptr(parent
, pindex
+ 1,
1475 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1477 goto err_out_curr_node
;
1478 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1479 if (nilfs_btree_node_get_nchildren(sib
) > ncmin
) {
1480 path
[level
].bp_sib_bh
= bh
;
1481 path
[level
].bp_op
= nilfs_btree_borrow_right
;
1482 stats
->bs_nblocks
++;
1485 path
[level
].bp_sib_bh
= bh
;
1486 path
[level
].bp_op
= nilfs_btree_concat_right
;
1487 stats
->bs_nblocks
++;
1489 * When merging right sibling node
1490 * into the current node, pointer to
1491 * the right sibling node must be
1492 * terminated instead. The adjustment
1493 * below is required for that.
1495 dindex
= pindex
+ 1;
1500 /* the only child of the root node */
1501 WARN_ON(level
!= nilfs_btree_height(btree
) - 2);
1502 if (nilfs_btree_node_get_nchildren(node
) - 1 <=
1503 NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1504 path
[level
].bp_op
= nilfs_btree_shrink
;
1505 stats
->bs_nblocks
+= 2;
1507 path
[level
].bp_op
= nilfs_btree_nop
;
1508 goto shrink_root_child
;
1510 path
[level
].bp_op
= nilfs_btree_do_delete
;
1511 stats
->bs_nblocks
++;
1517 /* child of the root node is deleted */
1518 path
[level
].bp_op
= nilfs_btree_do_delete
;
1519 stats
->bs_nblocks
++;
1522 node
= nilfs_btree_get_root(btree
);
1523 path
[level
].bp_oldreq
.bpr_ptr
=
1524 nilfs_btree_node_get_ptr(node
, dindex
,
1525 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1527 ret
= nilfs_bmap_prepare_end_ptr(btree
, &path
[level
].bp_oldreq
, dat
);
1529 goto err_out_child_node
;
1538 nilfs_bmap_abort_end_ptr(btree
, &path
[level
].bp_oldreq
, dat
);
1540 for (level
--; level
>= NILFS_BTREE_LEVEL_NODE_MIN
; level
--) {
1541 brelse(path
[level
].bp_sib_bh
);
1542 nilfs_bmap_abort_end_ptr(btree
, &path
[level
].bp_oldreq
, dat
);
1545 stats
->bs_nblocks
= 0;
1549 static void nilfs_btree_commit_delete(struct nilfs_bmap
*btree
,
1550 struct nilfs_btree_path
*path
,
1551 int maxlevel
, struct inode
*dat
)
1555 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
; level
<= maxlevel
; level
++) {
1556 nilfs_bmap_commit_end_ptr(btree
, &path
[level
].bp_oldreq
, dat
);
1557 path
[level
].bp_op(btree
, path
, level
, NULL
, NULL
);
1560 if (!nilfs_bmap_dirty(btree
))
1561 nilfs_bmap_set_dirty(btree
);
1564 static int nilfs_btree_delete(struct nilfs_bmap
*btree
, __u64 key
)
1567 struct nilfs_btree_path
*path
;
1568 struct nilfs_bmap_stats stats
;
1572 path
= nilfs_btree_alloc_path();
1576 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
,
1577 NILFS_BTREE_LEVEL_NODE_MIN
, 0);
1582 dat
= NILFS_BMAP_USE_VBN(btree
) ? nilfs_bmap_get_dat(btree
) : NULL
;
1584 ret
= nilfs_btree_prepare_delete(btree
, path
, &level
, &stats
, dat
);
1587 nilfs_btree_commit_delete(btree
, path
, level
, dat
);
1588 nilfs_inode_sub_blocks(btree
->b_inode
, stats
.bs_nblocks
);
1591 nilfs_btree_free_path(path
);
1595 static int nilfs_btree_seek_key(const struct nilfs_bmap
*btree
, __u64 start
,
1598 struct nilfs_btree_path
*path
;
1599 const int minlevel
= NILFS_BTREE_LEVEL_NODE_MIN
;
1602 path
= nilfs_btree_alloc_path();
1606 ret
= nilfs_btree_do_lookup(btree
, path
, start
, NULL
, minlevel
, 0);
1609 else if (ret
== -ENOENT
)
1610 ret
= nilfs_btree_get_next_key(btree
, path
, minlevel
, keyp
);
1612 nilfs_btree_free_path(path
);
1616 static int nilfs_btree_last_key(const struct nilfs_bmap
*btree
, __u64
*keyp
)
1618 struct nilfs_btree_path
*path
;
1621 path
= nilfs_btree_alloc_path();
1625 ret
= nilfs_btree_do_lookup_last(btree
, path
, keyp
, NULL
);
1627 nilfs_btree_free_path(path
);
1632 static int nilfs_btree_check_delete(struct nilfs_bmap
*btree
, __u64 key
)
1634 struct buffer_head
*bh
;
1635 struct nilfs_btree_node
*root
, *node
;
1636 __u64 maxkey
, nextmaxkey
;
1640 root
= nilfs_btree_get_root(btree
);
1641 switch (nilfs_btree_height(btree
)) {
1647 nchildren
= nilfs_btree_node_get_nchildren(root
);
1650 ptr
= nilfs_btree_node_get_ptr(root
, nchildren
- 1,
1651 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1652 ret
= nilfs_btree_get_block(btree
, ptr
, &bh
);
1655 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1661 nchildren
= nilfs_btree_node_get_nchildren(node
);
1662 maxkey
= nilfs_btree_node_get_key(node
, nchildren
- 1);
1663 nextmaxkey
= (nchildren
> 1) ?
1664 nilfs_btree_node_get_key(node
, nchildren
- 2) : 0;
1668 return (maxkey
== key
) && (nextmaxkey
< NILFS_BMAP_LARGE_LOW
);
1671 static int nilfs_btree_gather_data(struct nilfs_bmap
*btree
,
1672 __u64
*keys
, __u64
*ptrs
, int nitems
)
1674 struct buffer_head
*bh
;
1675 struct nilfs_btree_node
*node
, *root
;
1679 int nchildren
, ncmax
, i
, ret
;
1681 root
= nilfs_btree_get_root(btree
);
1682 switch (nilfs_btree_height(btree
)) {
1686 ncmax
= NILFS_BTREE_ROOT_NCHILDREN_MAX
;
1689 nchildren
= nilfs_btree_node_get_nchildren(root
);
1690 WARN_ON(nchildren
> 1);
1691 ptr
= nilfs_btree_node_get_ptr(root
, nchildren
- 1,
1692 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1693 ret
= nilfs_btree_get_block(btree
, ptr
, &bh
);
1696 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1697 ncmax
= nilfs_btree_nchildren_per_block(btree
);
1704 nchildren
= nilfs_btree_node_get_nchildren(node
);
1705 if (nchildren
< nitems
)
1707 dkeys
= nilfs_btree_node_dkeys(node
);
1708 dptrs
= nilfs_btree_node_dptrs(node
, ncmax
);
1709 for (i
= 0; i
< nitems
; i
++) {
1710 keys
[i
] = le64_to_cpu(dkeys
[i
]);
1711 ptrs
[i
] = le64_to_cpu(dptrs
[i
]);
1721 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap
*btree
, __u64 key
,
1722 union nilfs_bmap_ptr_req
*dreq
,
1723 union nilfs_bmap_ptr_req
*nreq
,
1724 struct buffer_head
**bhp
,
1725 struct nilfs_bmap_stats
*stats
)
1727 struct buffer_head
*bh
;
1728 struct inode
*dat
= NULL
;
1731 stats
->bs_nblocks
= 0;
1734 /* cannot find near ptr */
1735 if (NILFS_BMAP_USE_VBN(btree
)) {
1736 dreq
->bpr_ptr
= nilfs_btree_find_target_v(btree
, NULL
, key
);
1737 dat
= nilfs_bmap_get_dat(btree
);
1740 ret
= nilfs_bmap_prepare_alloc_ptr(btree
, dreq
, dat
);
1745 stats
->bs_nblocks
++;
1747 nreq
->bpr_ptr
= dreq
->bpr_ptr
+ 1;
1748 ret
= nilfs_bmap_prepare_alloc_ptr(btree
, nreq
, dat
);
1752 ret
= nilfs_btree_get_new_block(btree
, nreq
->bpr_ptr
, &bh
);
1757 stats
->bs_nblocks
++;
1765 nilfs_bmap_abort_alloc_ptr(btree
, nreq
, dat
);
1767 nilfs_bmap_abort_alloc_ptr(btree
, dreq
, dat
);
1768 stats
->bs_nblocks
= 0;
1774 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap
*btree
,
1775 __u64 key
, __u64 ptr
,
1776 const __u64
*keys
, const __u64
*ptrs
,
1778 union nilfs_bmap_ptr_req
*dreq
,
1779 union nilfs_bmap_ptr_req
*nreq
,
1780 struct buffer_head
*bh
)
1782 struct nilfs_btree_node
*node
;
1787 /* free resources */
1788 if (btree
->b_ops
->bop_clear
!= NULL
)
1789 btree
->b_ops
->bop_clear(btree
);
1791 /* ptr must be a pointer to a buffer head. */
1792 set_buffer_nilfs_volatile((struct buffer_head
*)((unsigned long)ptr
));
1794 /* convert and insert */
1795 dat
= NILFS_BMAP_USE_VBN(btree
) ? nilfs_bmap_get_dat(btree
) : NULL
;
1796 __nilfs_btree_init(btree
);
1798 nilfs_bmap_commit_alloc_ptr(btree
, dreq
, dat
);
1799 nilfs_bmap_commit_alloc_ptr(btree
, nreq
, dat
);
1801 /* create child node at level 1 */
1802 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1803 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1804 nilfs_btree_node_init(node
, 0, 1, n
, ncblk
, keys
, ptrs
);
1805 nilfs_btree_node_insert(node
, n
, key
, dreq
->bpr_ptr
, ncblk
);
1806 if (!buffer_dirty(bh
))
1807 mark_buffer_dirty(bh
);
1808 if (!nilfs_bmap_dirty(btree
))
1809 nilfs_bmap_set_dirty(btree
);
1813 /* create root node at level 2 */
1814 node
= nilfs_btree_get_root(btree
);
1815 tmpptr
= nreq
->bpr_ptr
;
1816 nilfs_btree_node_init(node
, NILFS_BTREE_NODE_ROOT
, 2, 1,
1817 NILFS_BTREE_ROOT_NCHILDREN_MAX
,
1820 nilfs_bmap_commit_alloc_ptr(btree
, dreq
, dat
);
1822 /* create root node at level 1 */
1823 node
= nilfs_btree_get_root(btree
);
1824 nilfs_btree_node_init(node
, NILFS_BTREE_NODE_ROOT
, 1, n
,
1825 NILFS_BTREE_ROOT_NCHILDREN_MAX
,
1827 nilfs_btree_node_insert(node
, n
, key
, dreq
->bpr_ptr
,
1828 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1829 if (!nilfs_bmap_dirty(btree
))
1830 nilfs_bmap_set_dirty(btree
);
1833 if (NILFS_BMAP_USE_VBN(btree
))
1834 nilfs_bmap_set_target_v(btree
, key
, dreq
->bpr_ptr
);
1838 * nilfs_btree_convert_and_insert -
1846 int nilfs_btree_convert_and_insert(struct nilfs_bmap
*btree
,
1847 __u64 key
, __u64 ptr
,
1848 const __u64
*keys
, const __u64
*ptrs
, int n
)
1850 struct buffer_head
*bh
= NULL
;
1851 union nilfs_bmap_ptr_req dreq
, nreq
, *di
, *ni
;
1852 struct nilfs_bmap_stats stats
;
1855 if (n
+ 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1858 } else if ((n
+ 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX(
1859 1 << btree
->b_inode
->i_blkbits
)) {
1868 ret
= nilfs_btree_prepare_convert_and_insert(btree
, key
, di
, ni
, &bh
,
1872 nilfs_btree_commit_convert_and_insert(btree
, key
, ptr
, keys
, ptrs
, n
,
1874 nilfs_inode_add_blocks(btree
->b_inode
, stats
.bs_nblocks
);
1878 static int nilfs_btree_propagate_p(struct nilfs_bmap
*btree
,
1879 struct nilfs_btree_path
*path
,
1881 struct buffer_head
*bh
)
1883 while ((++level
< nilfs_btree_height(btree
) - 1) &&
1884 !buffer_dirty(path
[level
].bp_bh
))
1885 mark_buffer_dirty(path
[level
].bp_bh
);
1890 static int nilfs_btree_prepare_update_v(struct nilfs_bmap
*btree
,
1891 struct nilfs_btree_path
*path
,
1892 int level
, struct inode
*dat
)
1894 struct nilfs_btree_node
*parent
;
1897 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1898 path
[level
].bp_oldreq
.bpr_ptr
=
1899 nilfs_btree_node_get_ptr(parent
, path
[level
+ 1].bp_index
,
1901 path
[level
].bp_newreq
.bpr_ptr
= path
[level
].bp_oldreq
.bpr_ptr
+ 1;
1902 ret
= nilfs_dat_prepare_update(dat
, &path
[level
].bp_oldreq
.bpr_req
,
1903 &path
[level
].bp_newreq
.bpr_req
);
1907 if (buffer_nilfs_node(path
[level
].bp_bh
)) {
1908 path
[level
].bp_ctxt
.oldkey
= path
[level
].bp_oldreq
.bpr_ptr
;
1909 path
[level
].bp_ctxt
.newkey
= path
[level
].bp_newreq
.bpr_ptr
;
1910 path
[level
].bp_ctxt
.bh
= path
[level
].bp_bh
;
1911 ret
= nilfs_btnode_prepare_change_key(
1912 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
1913 &path
[level
].bp_ctxt
);
1915 nilfs_dat_abort_update(dat
,
1916 &path
[level
].bp_oldreq
.bpr_req
,
1917 &path
[level
].bp_newreq
.bpr_req
);
1925 static void nilfs_btree_commit_update_v(struct nilfs_bmap
*btree
,
1926 struct nilfs_btree_path
*path
,
1927 int level
, struct inode
*dat
)
1929 struct nilfs_btree_node
*parent
;
1932 nilfs_dat_commit_update(dat
, &path
[level
].bp_oldreq
.bpr_req
,
1933 &path
[level
].bp_newreq
.bpr_req
,
1934 btree
->b_ptr_type
== NILFS_BMAP_PTR_VS
);
1936 if (buffer_nilfs_node(path
[level
].bp_bh
)) {
1937 nilfs_btnode_commit_change_key(
1938 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
1939 &path
[level
].bp_ctxt
);
1940 path
[level
].bp_bh
= path
[level
].bp_ctxt
.bh
;
1942 set_buffer_nilfs_volatile(path
[level
].bp_bh
);
1944 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1945 nilfs_btree_node_set_ptr(parent
, path
[level
+ 1].bp_index
,
1946 path
[level
].bp_newreq
.bpr_ptr
, ncmax
);
1949 static void nilfs_btree_abort_update_v(struct nilfs_bmap
*btree
,
1950 struct nilfs_btree_path
*path
,
1951 int level
, struct inode
*dat
)
1953 nilfs_dat_abort_update(dat
, &path
[level
].bp_oldreq
.bpr_req
,
1954 &path
[level
].bp_newreq
.bpr_req
);
1955 if (buffer_nilfs_node(path
[level
].bp_bh
))
1956 nilfs_btnode_abort_change_key(
1957 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
1958 &path
[level
].bp_ctxt
);
1961 static int nilfs_btree_prepare_propagate_v(struct nilfs_bmap
*btree
,
1962 struct nilfs_btree_path
*path
,
1963 int minlevel
, int *maxlevelp
,
1969 if (!buffer_nilfs_volatile(path
[level
].bp_bh
)) {
1970 ret
= nilfs_btree_prepare_update_v(btree
, path
, level
, dat
);
1974 while ((++level
< nilfs_btree_height(btree
) - 1) &&
1975 !buffer_dirty(path
[level
].bp_bh
)) {
1977 WARN_ON(buffer_nilfs_volatile(path
[level
].bp_bh
));
1978 ret
= nilfs_btree_prepare_update_v(btree
, path
, level
, dat
);
1984 *maxlevelp
= level
- 1;
1989 while (--level
> minlevel
)
1990 nilfs_btree_abort_update_v(btree
, path
, level
, dat
);
1991 if (!buffer_nilfs_volatile(path
[level
].bp_bh
))
1992 nilfs_btree_abort_update_v(btree
, path
, level
, dat
);
1996 static void nilfs_btree_commit_propagate_v(struct nilfs_bmap
*btree
,
1997 struct nilfs_btree_path
*path
,
1998 int minlevel
, int maxlevel
,
1999 struct buffer_head
*bh
,
2004 if (!buffer_nilfs_volatile(path
[minlevel
].bp_bh
))
2005 nilfs_btree_commit_update_v(btree
, path
, minlevel
, dat
);
2007 for (level
= minlevel
+ 1; level
<= maxlevel
; level
++)
2008 nilfs_btree_commit_update_v(btree
, path
, level
, dat
);
2011 static int nilfs_btree_propagate_v(struct nilfs_bmap
*btree
,
2012 struct nilfs_btree_path
*path
,
2013 int level
, struct buffer_head
*bh
)
2015 int maxlevel
= 0, ret
;
2016 struct nilfs_btree_node
*parent
;
2017 struct inode
*dat
= nilfs_bmap_get_dat(btree
);
2022 path
[level
].bp_bh
= bh
;
2023 ret
= nilfs_btree_prepare_propagate_v(btree
, path
, level
, &maxlevel
,
2028 if (buffer_nilfs_volatile(path
[level
].bp_bh
)) {
2029 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
2030 ptr
= nilfs_btree_node_get_ptr(parent
,
2031 path
[level
+ 1].bp_index
,
2033 ret
= nilfs_dat_mark_dirty(dat
, ptr
);
2038 nilfs_btree_commit_propagate_v(btree
, path
, level
, maxlevel
, bh
, dat
);
2041 brelse(path
[level
].bp_bh
);
2042 path
[level
].bp_bh
= NULL
;
2046 static int nilfs_btree_propagate(struct nilfs_bmap
*btree
,
2047 struct buffer_head
*bh
)
2049 struct nilfs_btree_path
*path
;
2050 struct nilfs_btree_node
*node
;
2054 WARN_ON(!buffer_dirty(bh
));
2056 path
= nilfs_btree_alloc_path();
2060 if (buffer_nilfs_node(bh
)) {
2061 node
= (struct nilfs_btree_node
*)bh
->b_data
;
2062 key
= nilfs_btree_node_get_key(node
, 0);
2063 level
= nilfs_btree_node_get_level(node
);
2065 key
= nilfs_bmap_data_get_key(btree
, bh
);
2066 level
= NILFS_BTREE_LEVEL_DATA
;
2069 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
, level
+ 1, 0);
2071 if (unlikely(ret
== -ENOENT
))
2072 printk(KERN_CRIT
"%s: key = %llu, level == %d\n",
2073 __func__
, (unsigned long long)key
, level
);
2077 ret
= NILFS_BMAP_USE_VBN(btree
) ?
2078 nilfs_btree_propagate_v(btree
, path
, level
, bh
) :
2079 nilfs_btree_propagate_p(btree
, path
, level
, bh
);
2082 nilfs_btree_free_path(path
);
2087 static int nilfs_btree_propagate_gc(struct nilfs_bmap
*btree
,
2088 struct buffer_head
*bh
)
2090 return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(btree
), bh
->b_blocknr
);
2093 static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap
*btree
,
2094 struct list_head
*lists
,
2095 struct buffer_head
*bh
)
2097 struct list_head
*head
;
2098 struct buffer_head
*cbh
;
2099 struct nilfs_btree_node
*node
, *cnode
;
2104 node
= (struct nilfs_btree_node
*)bh
->b_data
;
2105 key
= nilfs_btree_node_get_key(node
, 0);
2106 level
= nilfs_btree_node_get_level(node
);
2107 if (level
< NILFS_BTREE_LEVEL_NODE_MIN
||
2108 level
>= NILFS_BTREE_LEVEL_MAX
) {
2111 "%s: invalid btree level: %d (key=%llu, ino=%lu, "
2113 __func__
, level
, (unsigned long long)key
,
2114 NILFS_BMAP_I(btree
)->vfs_inode
.i_ino
,
2115 (unsigned long long)bh
->b_blocknr
);
2119 list_for_each(head
, &lists
[level
]) {
2120 cbh
= list_entry(head
, struct buffer_head
, b_assoc_buffers
);
2121 cnode
= (struct nilfs_btree_node
*)cbh
->b_data
;
2122 ckey
= nilfs_btree_node_get_key(cnode
, 0);
2126 list_add_tail(&bh
->b_assoc_buffers
, head
);
2129 static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap
*btree
,
2130 struct list_head
*listp
)
2132 struct address_space
*btcache
= &NILFS_BMAP_I(btree
)->i_btnode_cache
;
2133 struct list_head lists
[NILFS_BTREE_LEVEL_MAX
];
2134 struct pagevec pvec
;
2135 struct buffer_head
*bh
, *head
;
2139 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
2140 level
< NILFS_BTREE_LEVEL_MAX
;
2142 INIT_LIST_HEAD(&lists
[level
]);
2144 pagevec_init(&pvec
, 0);
2146 while (pagevec_lookup_tag(&pvec
, btcache
, &index
, PAGECACHE_TAG_DIRTY
,
2148 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
2149 bh
= head
= page_buffers(pvec
.pages
[i
]);
2151 if (buffer_dirty(bh
))
2152 nilfs_btree_add_dirty_buffer(btree
,
2154 } while ((bh
= bh
->b_this_page
) != head
);
2156 pagevec_release(&pvec
);
2160 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
2161 level
< NILFS_BTREE_LEVEL_MAX
;
2163 list_splice_tail(&lists
[level
], listp
);
2166 static int nilfs_btree_assign_p(struct nilfs_bmap
*btree
,
2167 struct nilfs_btree_path
*path
,
2169 struct buffer_head
**bh
,
2171 union nilfs_binfo
*binfo
)
2173 struct nilfs_btree_node
*parent
;
2178 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
2179 ptr
= nilfs_btree_node_get_ptr(parent
, path
[level
+ 1].bp_index
,
2181 if (buffer_nilfs_node(*bh
)) {
2182 path
[level
].bp_ctxt
.oldkey
= ptr
;
2183 path
[level
].bp_ctxt
.newkey
= blocknr
;
2184 path
[level
].bp_ctxt
.bh
= *bh
;
2185 ret
= nilfs_btnode_prepare_change_key(
2186 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
2187 &path
[level
].bp_ctxt
);
2190 nilfs_btnode_commit_change_key(
2191 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
2192 &path
[level
].bp_ctxt
);
2193 *bh
= path
[level
].bp_ctxt
.bh
;
2196 nilfs_btree_node_set_ptr(parent
, path
[level
+ 1].bp_index
, blocknr
,
2199 key
= nilfs_btree_node_get_key(parent
, path
[level
+ 1].bp_index
);
2200 /* on-disk format */
2201 binfo
->bi_dat
.bi_blkoff
= cpu_to_le64(key
);
2202 binfo
->bi_dat
.bi_level
= level
;
2207 static int nilfs_btree_assign_v(struct nilfs_bmap
*btree
,
2208 struct nilfs_btree_path
*path
,
2210 struct buffer_head
**bh
,
2212 union nilfs_binfo
*binfo
)
2214 struct nilfs_btree_node
*parent
;
2215 struct inode
*dat
= nilfs_bmap_get_dat(btree
);
2218 union nilfs_bmap_ptr_req req
;
2221 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
2222 ptr
= nilfs_btree_node_get_ptr(parent
, path
[level
+ 1].bp_index
,
2225 ret
= nilfs_dat_prepare_start(dat
, &req
.bpr_req
);
2228 nilfs_dat_commit_start(dat
, &req
.bpr_req
, blocknr
);
2230 key
= nilfs_btree_node_get_key(parent
, path
[level
+ 1].bp_index
);
2231 /* on-disk format */
2232 binfo
->bi_v
.bi_vblocknr
= cpu_to_le64(ptr
);
2233 binfo
->bi_v
.bi_blkoff
= cpu_to_le64(key
);
2238 static int nilfs_btree_assign(struct nilfs_bmap
*btree
,
2239 struct buffer_head
**bh
,
2241 union nilfs_binfo
*binfo
)
2243 struct nilfs_btree_path
*path
;
2244 struct nilfs_btree_node
*node
;
2248 path
= nilfs_btree_alloc_path();
2252 if (buffer_nilfs_node(*bh
)) {
2253 node
= (struct nilfs_btree_node
*)(*bh
)->b_data
;
2254 key
= nilfs_btree_node_get_key(node
, 0);
2255 level
= nilfs_btree_node_get_level(node
);
2257 key
= nilfs_bmap_data_get_key(btree
, *bh
);
2258 level
= NILFS_BTREE_LEVEL_DATA
;
2261 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
, level
+ 1, 0);
2263 WARN_ON(ret
== -ENOENT
);
2267 ret
= NILFS_BMAP_USE_VBN(btree
) ?
2268 nilfs_btree_assign_v(btree
, path
, level
, bh
, blocknr
, binfo
) :
2269 nilfs_btree_assign_p(btree
, path
, level
, bh
, blocknr
, binfo
);
2272 nilfs_btree_free_path(path
);
2277 static int nilfs_btree_assign_gc(struct nilfs_bmap
*btree
,
2278 struct buffer_head
**bh
,
2280 union nilfs_binfo
*binfo
)
2282 struct nilfs_btree_node
*node
;
2286 ret
= nilfs_dat_move(nilfs_bmap_get_dat(btree
), (*bh
)->b_blocknr
,
2291 if (buffer_nilfs_node(*bh
)) {
2292 node
= (struct nilfs_btree_node
*)(*bh
)->b_data
;
2293 key
= nilfs_btree_node_get_key(node
, 0);
2295 key
= nilfs_bmap_data_get_key(btree
, *bh
);
2297 /* on-disk format */
2298 binfo
->bi_v
.bi_vblocknr
= cpu_to_le64((*bh
)->b_blocknr
);
2299 binfo
->bi_v
.bi_blkoff
= cpu_to_le64(key
);
2304 static int nilfs_btree_mark(struct nilfs_bmap
*btree
, __u64 key
, int level
)
2306 struct buffer_head
*bh
;
2307 struct nilfs_btree_path
*path
;
2311 path
= nilfs_btree_alloc_path();
2315 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
+ 1, 0);
2317 WARN_ON(ret
== -ENOENT
);
2320 ret
= nilfs_btree_get_block(btree
, ptr
, &bh
);
2322 WARN_ON(ret
== -ENOENT
);
2326 if (!buffer_dirty(bh
))
2327 mark_buffer_dirty(bh
);
2329 if (!nilfs_bmap_dirty(btree
))
2330 nilfs_bmap_set_dirty(btree
);
2333 nilfs_btree_free_path(path
);
2337 static const struct nilfs_bmap_operations nilfs_btree_ops
= {
2338 .bop_lookup
= nilfs_btree_lookup
,
2339 .bop_lookup_contig
= nilfs_btree_lookup_contig
,
2340 .bop_insert
= nilfs_btree_insert
,
2341 .bop_delete
= nilfs_btree_delete
,
2344 .bop_propagate
= nilfs_btree_propagate
,
2346 .bop_lookup_dirty_buffers
= nilfs_btree_lookup_dirty_buffers
,
2348 .bop_assign
= nilfs_btree_assign
,
2349 .bop_mark
= nilfs_btree_mark
,
2351 .bop_seek_key
= nilfs_btree_seek_key
,
2352 .bop_last_key
= nilfs_btree_last_key
,
2354 .bop_check_insert
= NULL
,
2355 .bop_check_delete
= nilfs_btree_check_delete
,
2356 .bop_gather_data
= nilfs_btree_gather_data
,
2359 static const struct nilfs_bmap_operations nilfs_btree_ops_gc
= {
2361 .bop_lookup_contig
= NULL
,
2366 .bop_propagate
= nilfs_btree_propagate_gc
,
2368 .bop_lookup_dirty_buffers
= nilfs_btree_lookup_dirty_buffers
,
2370 .bop_assign
= nilfs_btree_assign_gc
,
2373 .bop_seek_key
= NULL
,
2374 .bop_last_key
= NULL
,
2376 .bop_check_insert
= NULL
,
2377 .bop_check_delete
= NULL
,
2378 .bop_gather_data
= NULL
,
2381 static void __nilfs_btree_init(struct nilfs_bmap
*bmap
)
2383 bmap
->b_ops
= &nilfs_btree_ops
;
2384 bmap
->b_nchildren_per_block
=
2385 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap
));
2388 int nilfs_btree_init(struct nilfs_bmap
*bmap
)
2392 __nilfs_btree_init(bmap
);
2394 if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap
),
2395 bmap
->b_inode
->i_ino
))
2400 void nilfs_btree_init_gc(struct nilfs_bmap
*bmap
)
2402 bmap
->b_ops
= &nilfs_btree_ops_gc
;
2403 bmap
->b_nchildren_per_block
=
2404 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap
));