2 * btree.c - NILFS B-tree.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/pagevec.h>
34 static struct nilfs_btree_path
*nilfs_btree_alloc_path(void)
36 struct nilfs_btree_path
*path
;
37 int level
= NILFS_BTREE_LEVEL_DATA
;
39 path
= kmem_cache_alloc(nilfs_btree_path_cache
, GFP_NOFS
);
43 for (; level
< NILFS_BTREE_LEVEL_MAX
; level
++) {
44 path
[level
].bp_bh
= NULL
;
45 path
[level
].bp_sib_bh
= NULL
;
46 path
[level
].bp_index
= 0;
47 path
[level
].bp_oldreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
48 path
[level
].bp_newreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
49 path
[level
].bp_op
= NULL
;
56 static void nilfs_btree_free_path(struct nilfs_btree_path
*path
)
58 int level
= NILFS_BTREE_LEVEL_DATA
;
60 for (; level
< NILFS_BTREE_LEVEL_MAX
; level
++)
61 brelse(path
[level
].bp_bh
);
63 kmem_cache_free(nilfs_btree_path_cache
, path
);
67 * B-tree node operations
69 static int nilfs_btree_get_block(const struct nilfs_btree
*btree
, __u64 ptr
,
70 struct buffer_head
**bhp
)
72 struct address_space
*btnc
=
73 &NILFS_BMAP_I((struct nilfs_bmap
*)btree
)->i_btnode_cache
;
74 struct buffer_head
*bh
;
77 err
= nilfs_btnode_submit_block(btnc
, ptr
, 0, bhp
);
79 return err
== -EEXIST
? 0 : err
;
83 if (!buffer_uptodate(bh
)) {
87 if (nilfs_btree_broken_node_block(bh
)) {
88 clear_buffer_uptodate(bh
);
95 static int nilfs_btree_get_new_block(const struct nilfs_btree
*btree
,
96 __u64 ptr
, struct buffer_head
**bhp
)
98 struct address_space
*btnc
=
99 &NILFS_BMAP_I((struct nilfs_bmap
*)btree
)->i_btnode_cache
;
100 struct buffer_head
*bh
;
102 bh
= nilfs_btnode_create_block(btnc
, ptr
);
106 set_buffer_nilfs_volatile(bh
);
112 nilfs_btree_node_get_flags(const struct nilfs_btree_node
*node
)
114 return node
->bn_flags
;
118 nilfs_btree_node_set_flags(struct nilfs_btree_node
*node
, int flags
)
120 node
->bn_flags
= flags
;
123 static inline int nilfs_btree_node_root(const struct nilfs_btree_node
*node
)
125 return nilfs_btree_node_get_flags(node
) & NILFS_BTREE_NODE_ROOT
;
129 nilfs_btree_node_get_level(const struct nilfs_btree_node
*node
)
131 return node
->bn_level
;
135 nilfs_btree_node_set_level(struct nilfs_btree_node
*node
, int level
)
137 node
->bn_level
= level
;
141 nilfs_btree_node_get_nchildren(const struct nilfs_btree_node
*node
)
143 return le16_to_cpu(node
->bn_nchildren
);
147 nilfs_btree_node_set_nchildren(struct nilfs_btree_node
*node
, int nchildren
)
149 node
->bn_nchildren
= cpu_to_le16(nchildren
);
152 static inline int nilfs_btree_node_size(const struct nilfs_btree
*btree
)
154 return 1 << btree
->bt_bmap
.b_inode
->i_blkbits
;
158 nilfs_btree_node_nchildren_min(const struct nilfs_btree_node
*node
,
159 const struct nilfs_btree
*btree
)
161 return nilfs_btree_node_root(node
) ?
162 NILFS_BTREE_ROOT_NCHILDREN_MIN
:
163 NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree
));
167 nilfs_btree_node_nchildren_max(const struct nilfs_btree_node
*node
,
168 const struct nilfs_btree
*btree
)
170 return nilfs_btree_node_root(node
) ?
171 NILFS_BTREE_ROOT_NCHILDREN_MAX
:
172 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree
));
175 static inline __le64
*
176 nilfs_btree_node_dkeys(const struct nilfs_btree_node
*node
)
178 return (__le64
*)((char *)(node
+ 1) +
179 (nilfs_btree_node_root(node
) ?
180 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE
));
183 static inline __le64
*
184 nilfs_btree_node_dptrs(const struct nilfs_btree_node
*node
,
185 const struct nilfs_btree
*btree
)
187 return (__le64
*)(nilfs_btree_node_dkeys(node
) +
188 nilfs_btree_node_nchildren_max(node
, btree
));
192 nilfs_btree_node_get_key(const struct nilfs_btree_node
*node
, int index
)
194 return le64_to_cpu(*(nilfs_btree_node_dkeys(node
) + index
));
198 nilfs_btree_node_set_key(struct nilfs_btree_node
*node
, int index
, __u64 key
)
200 *(nilfs_btree_node_dkeys(node
) + index
) = cpu_to_le64(key
);
204 nilfs_btree_node_get_ptr(const struct nilfs_btree
*btree
,
205 const struct nilfs_btree_node
*node
, int index
)
207 return le64_to_cpu(*(nilfs_btree_node_dptrs(node
, btree
) + index
));
211 nilfs_btree_node_set_ptr(struct nilfs_btree
*btree
,
212 struct nilfs_btree_node
*node
, int index
, __u64 ptr
)
214 *(nilfs_btree_node_dptrs(node
, btree
) + index
) = cpu_to_le64(ptr
);
217 static void nilfs_btree_node_init(struct nilfs_btree
*btree
,
218 struct nilfs_btree_node
*node
,
219 int flags
, int level
, int nchildren
,
220 const __u64
*keys
, const __u64
*ptrs
)
226 nilfs_btree_node_set_flags(node
, flags
);
227 nilfs_btree_node_set_level(node
, level
);
228 nilfs_btree_node_set_nchildren(node
, nchildren
);
230 dkeys
= nilfs_btree_node_dkeys(node
);
231 dptrs
= nilfs_btree_node_dptrs(node
, btree
);
232 for (i
= 0; i
< nchildren
; i
++) {
233 dkeys
[i
] = cpu_to_le64(keys
[i
]);
234 dptrs
[i
] = cpu_to_le64(ptrs
[i
]);
238 /* Assume the buffer heads corresponding to left and right are locked. */
239 static void nilfs_btree_node_move_left(struct nilfs_btree
*btree
,
240 struct nilfs_btree_node
*left
,
241 struct nilfs_btree_node
*right
,
244 __le64
*ldkeys
, *rdkeys
;
245 __le64
*ldptrs
, *rdptrs
;
246 int lnchildren
, rnchildren
;
248 ldkeys
= nilfs_btree_node_dkeys(left
);
249 ldptrs
= nilfs_btree_node_dptrs(left
, btree
);
250 lnchildren
= nilfs_btree_node_get_nchildren(left
);
252 rdkeys
= nilfs_btree_node_dkeys(right
);
253 rdptrs
= nilfs_btree_node_dptrs(right
, btree
);
254 rnchildren
= nilfs_btree_node_get_nchildren(right
);
256 memcpy(ldkeys
+ lnchildren
, rdkeys
, n
* sizeof(*rdkeys
));
257 memcpy(ldptrs
+ lnchildren
, rdptrs
, n
* sizeof(*rdptrs
));
258 memmove(rdkeys
, rdkeys
+ n
, (rnchildren
- n
) * sizeof(*rdkeys
));
259 memmove(rdptrs
, rdptrs
+ n
, (rnchildren
- n
) * sizeof(*rdptrs
));
263 nilfs_btree_node_set_nchildren(left
, lnchildren
);
264 nilfs_btree_node_set_nchildren(right
, rnchildren
);
267 /* Assume that the buffer heads corresponding to left and right are locked. */
268 static void nilfs_btree_node_move_right(struct nilfs_btree
*btree
,
269 struct nilfs_btree_node
*left
,
270 struct nilfs_btree_node
*right
,
273 __le64
*ldkeys
, *rdkeys
;
274 __le64
*ldptrs
, *rdptrs
;
275 int lnchildren
, rnchildren
;
277 ldkeys
= nilfs_btree_node_dkeys(left
);
278 ldptrs
= nilfs_btree_node_dptrs(left
, btree
);
279 lnchildren
= nilfs_btree_node_get_nchildren(left
);
281 rdkeys
= nilfs_btree_node_dkeys(right
);
282 rdptrs
= nilfs_btree_node_dptrs(right
, btree
);
283 rnchildren
= nilfs_btree_node_get_nchildren(right
);
285 memmove(rdkeys
+ n
, rdkeys
, rnchildren
* sizeof(*rdkeys
));
286 memmove(rdptrs
+ n
, rdptrs
, rnchildren
* sizeof(*rdptrs
));
287 memcpy(rdkeys
, ldkeys
+ lnchildren
- n
, n
* sizeof(*rdkeys
));
288 memcpy(rdptrs
, ldptrs
+ lnchildren
- n
, n
* sizeof(*rdptrs
));
292 nilfs_btree_node_set_nchildren(left
, lnchildren
);
293 nilfs_btree_node_set_nchildren(right
, rnchildren
);
296 /* Assume that the buffer head corresponding to node is locked. */
297 static void nilfs_btree_node_insert(struct nilfs_btree
*btree
,
298 struct nilfs_btree_node
*node
,
299 __u64 key
, __u64 ptr
, int index
)
305 dkeys
= nilfs_btree_node_dkeys(node
);
306 dptrs
= nilfs_btree_node_dptrs(node
, btree
);
307 nchildren
= nilfs_btree_node_get_nchildren(node
);
308 if (index
< nchildren
) {
309 memmove(dkeys
+ index
+ 1, dkeys
+ index
,
310 (nchildren
- index
) * sizeof(*dkeys
));
311 memmove(dptrs
+ index
+ 1, dptrs
+ index
,
312 (nchildren
- index
) * sizeof(*dptrs
));
314 dkeys
[index
] = cpu_to_le64(key
);
315 dptrs
[index
] = cpu_to_le64(ptr
);
317 nilfs_btree_node_set_nchildren(node
, nchildren
);
320 /* Assume that the buffer head corresponding to node is locked. */
321 static void nilfs_btree_node_delete(struct nilfs_btree
*btree
,
322 struct nilfs_btree_node
*node
,
323 __u64
*keyp
, __u64
*ptrp
, int index
)
331 dkeys
= nilfs_btree_node_dkeys(node
);
332 dptrs
= nilfs_btree_node_dptrs(node
, btree
);
333 key
= le64_to_cpu(dkeys
[index
]);
334 ptr
= le64_to_cpu(dptrs
[index
]);
335 nchildren
= nilfs_btree_node_get_nchildren(node
);
341 if (index
< nchildren
- 1) {
342 memmove(dkeys
+ index
, dkeys
+ index
+ 1,
343 (nchildren
- index
- 1) * sizeof(*dkeys
));
344 memmove(dptrs
+ index
, dptrs
+ index
+ 1,
345 (nchildren
- index
- 1) * sizeof(*dptrs
));
348 nilfs_btree_node_set_nchildren(node
, nchildren
);
351 static int nilfs_btree_node_lookup(const struct nilfs_btree_node
*node
,
352 __u64 key
, int *indexp
)
355 int index
, low
, high
, s
;
359 high
= nilfs_btree_node_get_nchildren(node
) - 1;
362 while (low
<= high
) {
363 index
= (low
+ high
) / 2;
364 nkey
= nilfs_btree_node_get_key(node
, index
);
368 } else if (nkey
< key
) {
378 if (nilfs_btree_node_get_level(node
) > NILFS_BTREE_LEVEL_NODE_MIN
) {
379 if (s
> 0 && index
> 0)
391 * nilfs_btree_node_broken - verify consistency of btree node
392 * @node: btree node block to be examined
393 * @size: node size (in bytes)
394 * @blocknr: block number
396 * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
398 static int nilfs_btree_node_broken(const struct nilfs_btree_node
*node
,
399 size_t size
, sector_t blocknr
)
401 int level
, flags
, nchildren
;
404 level
= nilfs_btree_node_get_level(node
);
405 flags
= nilfs_btree_node_get_flags(node
);
406 nchildren
= nilfs_btree_node_get_nchildren(node
);
408 if (unlikely(level
< NILFS_BTREE_LEVEL_NODE_MIN
||
409 level
>= NILFS_BTREE_LEVEL_MAX
||
410 (flags
& NILFS_BTREE_NODE_ROOT
) ||
412 nchildren
> NILFS_BTREE_NODE_NCHILDREN_MAX(size
))) {
413 printk(KERN_CRIT
"NILFS: bad btree node (blocknr=%llu): "
414 "level = %d, flags = 0x%x, nchildren = %d\n",
415 (unsigned long long)blocknr
, level
, flags
, nchildren
);
421 int nilfs_btree_broken_node_block(struct buffer_head
*bh
)
423 return nilfs_btree_node_broken((struct nilfs_btree_node
*)bh
->b_data
,
424 bh
->b_size
, bh
->b_blocknr
);
427 static inline struct nilfs_btree_node
*
428 nilfs_btree_get_root(const struct nilfs_btree
*btree
)
430 return (struct nilfs_btree_node
*)btree
->bt_bmap
.b_u
.u_data
;
433 static inline struct nilfs_btree_node
*
434 nilfs_btree_get_nonroot_node(const struct nilfs_btree_path
*path
, int level
)
436 return (struct nilfs_btree_node
*)path
[level
].bp_bh
->b_data
;
439 static inline struct nilfs_btree_node
*
440 nilfs_btree_get_sib_node(const struct nilfs_btree_path
*path
, int level
)
442 return (struct nilfs_btree_node
*)path
[level
].bp_sib_bh
->b_data
;
445 static inline int nilfs_btree_height(const struct nilfs_btree
*btree
)
447 return nilfs_btree_node_get_level(nilfs_btree_get_root(btree
)) + 1;
450 static inline struct nilfs_btree_node
*
451 nilfs_btree_get_node(const struct nilfs_btree
*btree
,
452 const struct nilfs_btree_path
*path
,
455 return (level
== nilfs_btree_height(btree
) - 1) ?
456 nilfs_btree_get_root(btree
) :
457 nilfs_btree_get_nonroot_node(path
, level
);
461 nilfs_btree_bad_node(struct nilfs_btree_node
*node
, int level
)
463 if (unlikely(nilfs_btree_node_get_level(node
) != level
)) {
465 printk(KERN_CRIT
"NILFS: btree level mismatch: %d != %d\n",
466 nilfs_btree_node_get_level(node
), level
);
472 static int nilfs_btree_do_lookup(const struct nilfs_btree
*btree
,
473 struct nilfs_btree_path
*path
,
474 __u64 key
, __u64
*ptrp
, int minlevel
)
476 struct nilfs_btree_node
*node
;
478 int level
, index
, found
, ret
;
480 node
= nilfs_btree_get_root(btree
);
481 level
= nilfs_btree_node_get_level(node
);
482 if (level
< minlevel
|| nilfs_btree_node_get_nchildren(node
) <= 0)
485 found
= nilfs_btree_node_lookup(node
, key
, &index
);
486 ptr
= nilfs_btree_node_get_ptr(btree
, node
, index
);
487 path
[level
].bp_bh
= NULL
;
488 path
[level
].bp_index
= index
;
490 for (level
--; level
>= minlevel
; level
--) {
491 ret
= nilfs_btree_get_block(btree
, ptr
, &path
[level
].bp_bh
);
494 node
= nilfs_btree_get_nonroot_node(path
, level
);
495 if (nilfs_btree_bad_node(node
, level
))
498 found
= nilfs_btree_node_lookup(node
, key
, &index
);
501 if (index
< nilfs_btree_node_nchildren_max(node
, btree
))
502 ptr
= nilfs_btree_node_get_ptr(btree
, node
, index
);
504 WARN_ON(found
|| level
!= NILFS_BTREE_LEVEL_NODE_MIN
);
506 ptr
= NILFS_BMAP_INVALID_PTR
;
508 path
[level
].bp_index
= index
;
519 static int nilfs_btree_do_lookup_last(const struct nilfs_btree
*btree
,
520 struct nilfs_btree_path
*path
,
521 __u64
*keyp
, __u64
*ptrp
)
523 struct nilfs_btree_node
*node
;
525 int index
, level
, ret
;
527 node
= nilfs_btree_get_root(btree
);
528 index
= nilfs_btree_node_get_nchildren(node
) - 1;
531 level
= nilfs_btree_node_get_level(node
);
532 ptr
= nilfs_btree_node_get_ptr(btree
, node
, index
);
533 path
[level
].bp_bh
= NULL
;
534 path
[level
].bp_index
= index
;
536 for (level
--; level
> 0; level
--) {
537 ret
= nilfs_btree_get_block(btree
, ptr
, &path
[level
].bp_bh
);
540 node
= nilfs_btree_get_nonroot_node(path
, level
);
541 if (nilfs_btree_bad_node(node
, level
))
543 index
= nilfs_btree_node_get_nchildren(node
) - 1;
544 ptr
= nilfs_btree_node_get_ptr(btree
, node
, index
);
545 path
[level
].bp_index
= index
;
549 *keyp
= nilfs_btree_node_get_key(node
, index
);
556 static int nilfs_btree_lookup(const struct nilfs_bmap
*bmap
,
557 __u64 key
, int level
, __u64
*ptrp
)
559 struct nilfs_btree
*btree
;
560 struct nilfs_btree_path
*path
;
564 btree
= (struct nilfs_btree
*)bmap
;
565 path
= nilfs_btree_alloc_path();
569 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
);
574 nilfs_btree_free_path(path
);
579 static int nilfs_btree_lookup_contig(const struct nilfs_bmap
*bmap
,
580 __u64 key
, __u64
*ptrp
, unsigned maxblocks
)
582 struct nilfs_btree
*btree
= (struct nilfs_btree
*)bmap
;
583 struct nilfs_btree_path
*path
;
584 struct nilfs_btree_node
*node
;
585 struct inode
*dat
= NULL
;
588 int level
= NILFS_BTREE_LEVEL_NODE_MIN
;
589 int ret
, cnt
, index
, maxlevel
;
591 path
= nilfs_btree_alloc_path();
595 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
);
599 if (NILFS_BMAP_USE_VBN(bmap
)) {
600 dat
= nilfs_bmap_get_dat(bmap
);
601 ret
= nilfs_dat_translate(dat
, ptr
, &blocknr
);
607 if (cnt
== maxblocks
)
610 maxlevel
= nilfs_btree_height(btree
) - 1;
611 node
= nilfs_btree_get_node(btree
, path
, level
);
612 index
= path
[level
].bp_index
+ 1;
614 while (index
< nilfs_btree_node_get_nchildren(node
)) {
615 if (nilfs_btree_node_get_key(node
, index
) !=
618 ptr2
= nilfs_btree_node_get_ptr(btree
, node
, index
);
620 ret
= nilfs_dat_translate(dat
, ptr2
, &blocknr
);
625 if (ptr2
!= ptr
+ cnt
|| ++cnt
== maxblocks
)
630 if (level
== maxlevel
)
633 /* look-up right sibling node */
634 node
= nilfs_btree_get_node(btree
, path
, level
+ 1);
635 index
= path
[level
+ 1].bp_index
+ 1;
636 if (index
>= nilfs_btree_node_get_nchildren(node
) ||
637 nilfs_btree_node_get_key(node
, index
) != key
+ cnt
)
639 ptr2
= nilfs_btree_node_get_ptr(btree
, node
, index
);
640 path
[level
+ 1].bp_index
= index
;
642 brelse(path
[level
].bp_bh
);
643 path
[level
].bp_bh
= NULL
;
644 ret
= nilfs_btree_get_block(btree
, ptr2
, &path
[level
].bp_bh
);
647 node
= nilfs_btree_get_nonroot_node(path
, level
);
649 path
[level
].bp_index
= index
;
655 nilfs_btree_free_path(path
);
659 static void nilfs_btree_promote_key(struct nilfs_btree
*btree
,
660 struct nilfs_btree_path
*path
,
661 int level
, __u64 key
)
663 if (level
< nilfs_btree_height(btree
) - 1) {
665 nilfs_btree_node_set_key(
666 nilfs_btree_get_nonroot_node(path
, level
),
667 path
[level
].bp_index
, key
);
668 if (!buffer_dirty(path
[level
].bp_bh
))
669 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
670 } while ((path
[level
].bp_index
== 0) &&
671 (++level
< nilfs_btree_height(btree
) - 1));
675 if (level
== nilfs_btree_height(btree
) - 1) {
676 nilfs_btree_node_set_key(nilfs_btree_get_root(btree
),
677 path
[level
].bp_index
, key
);
681 static void nilfs_btree_do_insert(struct nilfs_btree
*btree
,
682 struct nilfs_btree_path
*path
,
683 int level
, __u64
*keyp
, __u64
*ptrp
)
685 struct nilfs_btree_node
*node
;
687 if (level
< nilfs_btree_height(btree
) - 1) {
688 node
= nilfs_btree_get_nonroot_node(path
, level
);
689 nilfs_btree_node_insert(btree
, node
, *keyp
, *ptrp
,
690 path
[level
].bp_index
);
691 if (!buffer_dirty(path
[level
].bp_bh
))
692 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
694 if (path
[level
].bp_index
== 0)
695 nilfs_btree_promote_key(btree
, path
, level
+ 1,
696 nilfs_btree_node_get_key(node
,
699 node
= nilfs_btree_get_root(btree
);
700 nilfs_btree_node_insert(btree
, node
, *keyp
, *ptrp
,
701 path
[level
].bp_index
);
705 static void nilfs_btree_carry_left(struct nilfs_btree
*btree
,
706 struct nilfs_btree_path
*path
,
707 int level
, __u64
*keyp
, __u64
*ptrp
)
709 struct nilfs_btree_node
*node
, *left
;
710 int nchildren
, lnchildren
, n
, move
;
712 node
= nilfs_btree_get_nonroot_node(path
, level
);
713 left
= nilfs_btree_get_sib_node(path
, level
);
714 nchildren
= nilfs_btree_node_get_nchildren(node
);
715 lnchildren
= nilfs_btree_node_get_nchildren(left
);
718 n
= (nchildren
+ lnchildren
+ 1) / 2 - lnchildren
;
719 if (n
> path
[level
].bp_index
) {
720 /* move insert point */
725 nilfs_btree_node_move_left(btree
, left
, node
, n
);
727 if (!buffer_dirty(path
[level
].bp_bh
))
728 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
729 if (!buffer_dirty(path
[level
].bp_sib_bh
))
730 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
732 nilfs_btree_promote_key(btree
, path
, level
+ 1,
733 nilfs_btree_node_get_key(node
, 0));
736 brelse(path
[level
].bp_bh
);
737 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
738 path
[level
].bp_sib_bh
= NULL
;
739 path
[level
].bp_index
+= lnchildren
;
740 path
[level
+ 1].bp_index
--;
742 brelse(path
[level
].bp_sib_bh
);
743 path
[level
].bp_sib_bh
= NULL
;
744 path
[level
].bp_index
-= n
;
747 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
750 static void nilfs_btree_carry_right(struct nilfs_btree
*btree
,
751 struct nilfs_btree_path
*path
,
752 int level
, __u64
*keyp
, __u64
*ptrp
)
754 struct nilfs_btree_node
*node
, *right
;
755 int nchildren
, rnchildren
, n
, move
;
757 node
= nilfs_btree_get_nonroot_node(path
, level
);
758 right
= nilfs_btree_get_sib_node(path
, level
);
759 nchildren
= nilfs_btree_node_get_nchildren(node
);
760 rnchildren
= nilfs_btree_node_get_nchildren(right
);
763 n
= (nchildren
+ rnchildren
+ 1) / 2 - rnchildren
;
764 if (n
> nchildren
- path
[level
].bp_index
) {
765 /* move insert point */
770 nilfs_btree_node_move_right(btree
, node
, right
, n
);
772 if (!buffer_dirty(path
[level
].bp_bh
))
773 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
774 if (!buffer_dirty(path
[level
].bp_sib_bh
))
775 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
777 path
[level
+ 1].bp_index
++;
778 nilfs_btree_promote_key(btree
, path
, level
+ 1,
779 nilfs_btree_node_get_key(right
, 0));
780 path
[level
+ 1].bp_index
--;
783 brelse(path
[level
].bp_bh
);
784 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
785 path
[level
].bp_sib_bh
= NULL
;
786 path
[level
].bp_index
-= nilfs_btree_node_get_nchildren(node
);
787 path
[level
+ 1].bp_index
++;
789 brelse(path
[level
].bp_sib_bh
);
790 path
[level
].bp_sib_bh
= NULL
;
793 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
796 static void nilfs_btree_split(struct nilfs_btree
*btree
,
797 struct nilfs_btree_path
*path
,
798 int level
, __u64
*keyp
, __u64
*ptrp
)
800 struct nilfs_btree_node
*node
, *right
;
803 int nchildren
, n
, move
;
805 node
= nilfs_btree_get_nonroot_node(path
, level
);
806 right
= nilfs_btree_get_sib_node(path
, level
);
807 nchildren
= nilfs_btree_node_get_nchildren(node
);
810 n
= (nchildren
+ 1) / 2;
811 if (n
> nchildren
- path
[level
].bp_index
) {
816 nilfs_btree_node_move_right(btree
, node
, right
, n
);
818 if (!buffer_dirty(path
[level
].bp_bh
))
819 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
820 if (!buffer_dirty(path
[level
].bp_sib_bh
))
821 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
823 newkey
= nilfs_btree_node_get_key(right
, 0);
824 newptr
= path
[level
].bp_newreq
.bpr_ptr
;
827 path
[level
].bp_index
-= nilfs_btree_node_get_nchildren(node
);
828 nilfs_btree_node_insert(btree
, right
, *keyp
, *ptrp
,
829 path
[level
].bp_index
);
831 *keyp
= nilfs_btree_node_get_key(right
, 0);
832 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
834 brelse(path
[level
].bp_bh
);
835 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
836 path
[level
].bp_sib_bh
= NULL
;
838 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
840 *keyp
= nilfs_btree_node_get_key(right
, 0);
841 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
843 brelse(path
[level
].bp_sib_bh
);
844 path
[level
].bp_sib_bh
= NULL
;
847 path
[level
+ 1].bp_index
++;
850 static void nilfs_btree_grow(struct nilfs_btree
*btree
,
851 struct nilfs_btree_path
*path
,
852 int level
, __u64
*keyp
, __u64
*ptrp
)
854 struct nilfs_btree_node
*root
, *child
;
857 root
= nilfs_btree_get_root(btree
);
858 child
= nilfs_btree_get_sib_node(path
, level
);
860 n
= nilfs_btree_node_get_nchildren(root
);
862 nilfs_btree_node_move_right(btree
, root
, child
, n
);
863 nilfs_btree_node_set_level(root
, level
+ 1);
865 if (!buffer_dirty(path
[level
].bp_sib_bh
))
866 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
868 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
869 path
[level
].bp_sib_bh
= NULL
;
871 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
873 *keyp
= nilfs_btree_node_get_key(child
, 0);
874 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
877 static __u64
nilfs_btree_find_near(const struct nilfs_btree
*btree
,
878 const struct nilfs_btree_path
*path
)
880 struct nilfs_btree_node
*node
;
884 return NILFS_BMAP_INVALID_PTR
;
887 level
= NILFS_BTREE_LEVEL_NODE_MIN
;
888 if (path
[level
].bp_index
> 0) {
889 node
= nilfs_btree_get_node(btree
, path
, level
);
890 return nilfs_btree_node_get_ptr(btree
, node
,
891 path
[level
].bp_index
- 1);
895 level
= NILFS_BTREE_LEVEL_NODE_MIN
+ 1;
896 if (level
<= nilfs_btree_height(btree
) - 1) {
897 node
= nilfs_btree_get_node(btree
, path
, level
);
898 return nilfs_btree_node_get_ptr(btree
, node
,
899 path
[level
].bp_index
);
902 return NILFS_BMAP_INVALID_PTR
;
905 static __u64
nilfs_btree_find_target_v(const struct nilfs_btree
*btree
,
906 const struct nilfs_btree_path
*path
,
911 ptr
= nilfs_bmap_find_target_seq(&btree
->bt_bmap
, key
);
912 if (ptr
!= NILFS_BMAP_INVALID_PTR
)
913 /* sequential access */
916 ptr
= nilfs_btree_find_near(btree
, path
);
917 if (ptr
!= NILFS_BMAP_INVALID_PTR
)
922 return nilfs_bmap_find_target_in_group(&btree
->bt_bmap
);
925 static void nilfs_btree_set_target_v(struct nilfs_btree
*btree
, __u64 key
,
928 btree
->bt_bmap
.b_last_allocated_key
= key
;
929 btree
->bt_bmap
.b_last_allocated_ptr
= ptr
;
932 static int nilfs_btree_prepare_insert(struct nilfs_btree
*btree
,
933 struct nilfs_btree_path
*path
,
934 int *levelp
, __u64 key
, __u64 ptr
,
935 struct nilfs_bmap_stats
*stats
)
937 struct buffer_head
*bh
;
938 struct nilfs_btree_node
*node
, *parent
, *sib
;
940 int pindex
, level
, ret
;
941 struct inode
*dat
= NULL
;
943 stats
->bs_nblocks
= 0;
944 level
= NILFS_BTREE_LEVEL_DATA
;
946 /* allocate a new ptr for data block */
947 if (NILFS_BMAP_USE_VBN(&btree
->bt_bmap
)) {
948 path
[level
].bp_newreq
.bpr_ptr
=
949 nilfs_btree_find_target_v(btree
, path
, key
);
950 dat
= nilfs_bmap_get_dat(&btree
->bt_bmap
);
953 ret
= nilfs_bmap_prepare_alloc_ptr(&btree
->bt_bmap
,
954 &path
[level
].bp_newreq
, dat
);
958 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
959 level
< nilfs_btree_height(btree
) - 1;
961 node
= nilfs_btree_get_nonroot_node(path
, level
);
962 if (nilfs_btree_node_get_nchildren(node
) <
963 nilfs_btree_node_nchildren_max(node
, btree
)) {
964 path
[level
].bp_op
= nilfs_btree_do_insert
;
969 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
970 pindex
= path
[level
+ 1].bp_index
;
974 sibptr
= nilfs_btree_node_get_ptr(btree
, parent
,
976 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
978 goto err_out_child_node
;
979 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
980 if (nilfs_btree_node_get_nchildren(sib
) <
981 nilfs_btree_node_nchildren_max(sib
, btree
)) {
982 path
[level
].bp_sib_bh
= bh
;
983 path
[level
].bp_op
= nilfs_btree_carry_left
;
992 nilfs_btree_node_get_nchildren(parent
) - 1) {
993 sibptr
= nilfs_btree_node_get_ptr(btree
, parent
,
995 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
997 goto err_out_child_node
;
998 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
999 if (nilfs_btree_node_get_nchildren(sib
) <
1000 nilfs_btree_node_nchildren_max(sib
, btree
)) {
1001 path
[level
].bp_sib_bh
= bh
;
1002 path
[level
].bp_op
= nilfs_btree_carry_right
;
1003 stats
->bs_nblocks
++;
1010 path
[level
].bp_newreq
.bpr_ptr
=
1011 path
[level
- 1].bp_newreq
.bpr_ptr
+ 1;
1012 ret
= nilfs_bmap_prepare_alloc_ptr(&btree
->bt_bmap
,
1013 &path
[level
].bp_newreq
, dat
);
1015 goto err_out_child_node
;
1016 ret
= nilfs_btree_get_new_block(btree
,
1017 path
[level
].bp_newreq
.bpr_ptr
,
1020 goto err_out_curr_node
;
1022 stats
->bs_nblocks
++;
1024 nilfs_btree_node_init(btree
,
1025 (struct nilfs_btree_node
*)bh
->b_data
,
1026 0, level
, 0, NULL
, NULL
);
1027 path
[level
].bp_sib_bh
= bh
;
1028 path
[level
].bp_op
= nilfs_btree_split
;
1032 node
= nilfs_btree_get_root(btree
);
1033 if (nilfs_btree_node_get_nchildren(node
) <
1034 nilfs_btree_node_nchildren_max(node
, btree
)) {
1035 path
[level
].bp_op
= nilfs_btree_do_insert
;
1036 stats
->bs_nblocks
++;
1041 path
[level
].bp_newreq
.bpr_ptr
= path
[level
- 1].bp_newreq
.bpr_ptr
+ 1;
1042 ret
= nilfs_bmap_prepare_alloc_ptr(&btree
->bt_bmap
,
1043 &path
[level
].bp_newreq
, dat
);
1045 goto err_out_child_node
;
1046 ret
= nilfs_btree_get_new_block(btree
, path
[level
].bp_newreq
.bpr_ptr
,
1049 goto err_out_curr_node
;
1051 nilfs_btree_node_init(btree
, (struct nilfs_btree_node
*)bh
->b_data
,
1052 0, level
, 0, NULL
, NULL
);
1053 path
[level
].bp_sib_bh
= bh
;
1054 path
[level
].bp_op
= nilfs_btree_grow
;
1057 path
[level
].bp_op
= nilfs_btree_do_insert
;
1059 /* a newly-created node block and a data block are added */
1060 stats
->bs_nblocks
+= 2;
1069 nilfs_bmap_abort_alloc_ptr(&btree
->bt_bmap
, &path
[level
].bp_newreq
,
1072 for (level
--; level
> NILFS_BTREE_LEVEL_DATA
; level
--) {
1073 nilfs_btnode_delete(path
[level
].bp_sib_bh
);
1074 nilfs_bmap_abort_alloc_ptr(&btree
->bt_bmap
,
1075 &path
[level
].bp_newreq
, dat
);
1079 nilfs_bmap_abort_alloc_ptr(&btree
->bt_bmap
, &path
[level
].bp_newreq
,
1083 stats
->bs_nblocks
= 0;
1087 static void nilfs_btree_commit_insert(struct nilfs_btree
*btree
,
1088 struct nilfs_btree_path
*path
,
1089 int maxlevel
, __u64 key
, __u64 ptr
)
1091 struct inode
*dat
= NULL
;
1094 set_buffer_nilfs_volatile((struct buffer_head
*)((unsigned long)ptr
));
1095 ptr
= path
[NILFS_BTREE_LEVEL_DATA
].bp_newreq
.bpr_ptr
;
1096 if (NILFS_BMAP_USE_VBN(&btree
->bt_bmap
)) {
1097 nilfs_btree_set_target_v(btree
, key
, ptr
);
1098 dat
= nilfs_bmap_get_dat(&btree
->bt_bmap
);
1101 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
; level
<= maxlevel
; level
++) {
1102 nilfs_bmap_commit_alloc_ptr(&btree
->bt_bmap
,
1103 &path
[level
- 1].bp_newreq
, dat
);
1104 path
[level
].bp_op(btree
, path
, level
, &key
, &ptr
);
1107 if (!nilfs_bmap_dirty(&btree
->bt_bmap
))
1108 nilfs_bmap_set_dirty(&btree
->bt_bmap
);
1111 static int nilfs_btree_insert(struct nilfs_bmap
*bmap
, __u64 key
, __u64 ptr
)
1113 struct nilfs_btree
*btree
;
1114 struct nilfs_btree_path
*path
;
1115 struct nilfs_bmap_stats stats
;
1118 btree
= (struct nilfs_btree
*)bmap
;
1119 path
= nilfs_btree_alloc_path();
1123 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
,
1124 NILFS_BTREE_LEVEL_NODE_MIN
);
1125 if (ret
!= -ENOENT
) {
1131 ret
= nilfs_btree_prepare_insert(btree
, path
, &level
, key
, ptr
, &stats
);
1134 nilfs_btree_commit_insert(btree
, path
, level
, key
, ptr
);
1135 nilfs_bmap_add_blocks(bmap
, stats
.bs_nblocks
);
1138 nilfs_btree_free_path(path
);
1142 static void nilfs_btree_do_delete(struct nilfs_btree
*btree
,
1143 struct nilfs_btree_path
*path
,
1144 int level
, __u64
*keyp
, __u64
*ptrp
)
1146 struct nilfs_btree_node
*node
;
1148 if (level
< nilfs_btree_height(btree
) - 1) {
1149 node
= nilfs_btree_get_nonroot_node(path
, level
);
1150 nilfs_btree_node_delete(btree
, node
, keyp
, ptrp
,
1151 path
[level
].bp_index
);
1152 if (!buffer_dirty(path
[level
].bp_bh
))
1153 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1154 if (path
[level
].bp_index
== 0)
1155 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1156 nilfs_btree_node_get_key(node
, 0));
1158 node
= nilfs_btree_get_root(btree
);
1159 nilfs_btree_node_delete(btree
, node
, keyp
, ptrp
,
1160 path
[level
].bp_index
);
1164 static void nilfs_btree_borrow_left(struct nilfs_btree
*btree
,
1165 struct nilfs_btree_path
*path
,
1166 int level
, __u64
*keyp
, __u64
*ptrp
)
1168 struct nilfs_btree_node
*node
, *left
;
1169 int nchildren
, lnchildren
, n
;
1171 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1173 node
= nilfs_btree_get_nonroot_node(path
, level
);
1174 left
= nilfs_btree_get_sib_node(path
, level
);
1175 nchildren
= nilfs_btree_node_get_nchildren(node
);
1176 lnchildren
= nilfs_btree_node_get_nchildren(left
);
1178 n
= (nchildren
+ lnchildren
) / 2 - nchildren
;
1180 nilfs_btree_node_move_right(btree
, left
, node
, n
);
1182 if (!buffer_dirty(path
[level
].bp_bh
))
1183 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1184 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1185 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
1187 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1188 nilfs_btree_node_get_key(node
, 0));
1190 brelse(path
[level
].bp_sib_bh
);
1191 path
[level
].bp_sib_bh
= NULL
;
1192 path
[level
].bp_index
+= n
;
1195 static void nilfs_btree_borrow_right(struct nilfs_btree
*btree
,
1196 struct nilfs_btree_path
*path
,
1197 int level
, __u64
*keyp
, __u64
*ptrp
)
1199 struct nilfs_btree_node
*node
, *right
;
1200 int nchildren
, rnchildren
, n
;
1202 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1204 node
= nilfs_btree_get_nonroot_node(path
, level
);
1205 right
= nilfs_btree_get_sib_node(path
, level
);
1206 nchildren
= nilfs_btree_node_get_nchildren(node
);
1207 rnchildren
= nilfs_btree_node_get_nchildren(right
);
1209 n
= (nchildren
+ rnchildren
) / 2 - nchildren
;
1211 nilfs_btree_node_move_left(btree
, node
, right
, n
);
1213 if (!buffer_dirty(path
[level
].bp_bh
))
1214 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1215 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1216 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
1218 path
[level
+ 1].bp_index
++;
1219 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1220 nilfs_btree_node_get_key(right
, 0));
1221 path
[level
+ 1].bp_index
--;
1223 brelse(path
[level
].bp_sib_bh
);
1224 path
[level
].bp_sib_bh
= NULL
;
1227 static void nilfs_btree_concat_left(struct nilfs_btree
*btree
,
1228 struct nilfs_btree_path
*path
,
1229 int level
, __u64
*keyp
, __u64
*ptrp
)
1231 struct nilfs_btree_node
*node
, *left
;
1234 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1236 node
= nilfs_btree_get_nonroot_node(path
, level
);
1237 left
= nilfs_btree_get_sib_node(path
, level
);
1239 n
= nilfs_btree_node_get_nchildren(node
);
1241 nilfs_btree_node_move_left(btree
, left
, node
, n
);
1243 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1244 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
1246 nilfs_btnode_delete(path
[level
].bp_bh
);
1247 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
1248 path
[level
].bp_sib_bh
= NULL
;
1249 path
[level
].bp_index
+= nilfs_btree_node_get_nchildren(left
);
1252 static void nilfs_btree_concat_right(struct nilfs_btree
*btree
,
1253 struct nilfs_btree_path
*path
,
1254 int level
, __u64
*keyp
, __u64
*ptrp
)
1256 struct nilfs_btree_node
*node
, *right
;
1259 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1261 node
= nilfs_btree_get_nonroot_node(path
, level
);
1262 right
= nilfs_btree_get_sib_node(path
, level
);
1264 n
= nilfs_btree_node_get_nchildren(right
);
1266 nilfs_btree_node_move_left(btree
, node
, right
, n
);
1268 if (!buffer_dirty(path
[level
].bp_bh
))
1269 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1271 nilfs_btnode_delete(path
[level
].bp_sib_bh
);
1272 path
[level
].bp_sib_bh
= NULL
;
1273 path
[level
+ 1].bp_index
++;
1276 static void nilfs_btree_shrink(struct nilfs_btree
*btree
,
1277 struct nilfs_btree_path
*path
,
1278 int level
, __u64
*keyp
, __u64
*ptrp
)
1280 struct nilfs_btree_node
*root
, *child
;
1283 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1285 root
= nilfs_btree_get_root(btree
);
1286 child
= nilfs_btree_get_nonroot_node(path
, level
);
1288 nilfs_btree_node_delete(btree
, root
, NULL
, NULL
, 0);
1289 nilfs_btree_node_set_level(root
, level
);
1290 n
= nilfs_btree_node_get_nchildren(child
);
1291 nilfs_btree_node_move_left(btree
, root
, child
, n
);
1293 nilfs_btnode_delete(path
[level
].bp_bh
);
1294 path
[level
].bp_bh
= NULL
;
1298 static int nilfs_btree_prepare_delete(struct nilfs_btree
*btree
,
1299 struct nilfs_btree_path
*path
,
1301 struct nilfs_bmap_stats
*stats
,
1304 struct buffer_head
*bh
;
1305 struct nilfs_btree_node
*node
, *parent
, *sib
;
1307 int pindex
, level
, ret
;
1310 stats
->bs_nblocks
= 0;
1311 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
1312 level
< nilfs_btree_height(btree
) - 1;
1314 node
= nilfs_btree_get_nonroot_node(path
, level
);
1315 path
[level
].bp_oldreq
.bpr_ptr
=
1316 nilfs_btree_node_get_ptr(btree
, node
,
1317 path
[level
].bp_index
);
1318 ret
= nilfs_bmap_prepare_end_ptr(&btree
->bt_bmap
,
1319 &path
[level
].bp_oldreq
, dat
);
1321 goto err_out_child_node
;
1323 if (nilfs_btree_node_get_nchildren(node
) >
1324 nilfs_btree_node_nchildren_min(node
, btree
)) {
1325 path
[level
].bp_op
= nilfs_btree_do_delete
;
1326 stats
->bs_nblocks
++;
1330 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
1331 pindex
= path
[level
+ 1].bp_index
;
1335 sibptr
= nilfs_btree_node_get_ptr(btree
, parent
,
1337 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1339 goto err_out_curr_node
;
1340 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1341 if (nilfs_btree_node_get_nchildren(sib
) >
1342 nilfs_btree_node_nchildren_min(sib
, btree
)) {
1343 path
[level
].bp_sib_bh
= bh
;
1344 path
[level
].bp_op
= nilfs_btree_borrow_left
;
1345 stats
->bs_nblocks
++;
1348 path
[level
].bp_sib_bh
= bh
;
1349 path
[level
].bp_op
= nilfs_btree_concat_left
;
1350 stats
->bs_nblocks
++;
1354 nilfs_btree_node_get_nchildren(parent
) - 1) {
1356 sibptr
= nilfs_btree_node_get_ptr(btree
, parent
,
1358 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1360 goto err_out_curr_node
;
1361 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1362 if (nilfs_btree_node_get_nchildren(sib
) >
1363 nilfs_btree_node_nchildren_min(sib
, btree
)) {
1364 path
[level
].bp_sib_bh
= bh
;
1365 path
[level
].bp_op
= nilfs_btree_borrow_right
;
1366 stats
->bs_nblocks
++;
1369 path
[level
].bp_sib_bh
= bh
;
1370 path
[level
].bp_op
= nilfs_btree_concat_right
;
1371 stats
->bs_nblocks
++;
1376 /* the only child of the root node */
1377 WARN_ON(level
!= nilfs_btree_height(btree
) - 2);
1378 if (nilfs_btree_node_get_nchildren(node
) - 1 <=
1379 NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1380 path
[level
].bp_op
= nilfs_btree_shrink
;
1381 stats
->bs_nblocks
+= 2;
1383 path
[level
].bp_op
= nilfs_btree_do_delete
;
1384 stats
->bs_nblocks
++;
1392 node
= nilfs_btree_get_root(btree
);
1393 path
[level
].bp_oldreq
.bpr_ptr
=
1394 nilfs_btree_node_get_ptr(btree
, node
, path
[level
].bp_index
);
1396 ret
= nilfs_bmap_prepare_end_ptr(&btree
->bt_bmap
,
1397 &path
[level
].bp_oldreq
, dat
);
1399 goto err_out_child_node
;
1401 /* child of the root node is deleted */
1402 path
[level
].bp_op
= nilfs_btree_do_delete
;
1403 stats
->bs_nblocks
++;
1412 nilfs_bmap_abort_end_ptr(&btree
->bt_bmap
, &path
[level
].bp_oldreq
, dat
);
1414 for (level
--; level
>= NILFS_BTREE_LEVEL_NODE_MIN
; level
--) {
1415 brelse(path
[level
].bp_sib_bh
);
1416 nilfs_bmap_abort_end_ptr(&btree
->bt_bmap
,
1417 &path
[level
].bp_oldreq
, dat
);
1420 stats
->bs_nblocks
= 0;
1424 static void nilfs_btree_commit_delete(struct nilfs_btree
*btree
,
1425 struct nilfs_btree_path
*path
,
1426 int maxlevel
, struct inode
*dat
)
1430 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
; level
<= maxlevel
; level
++) {
1431 nilfs_bmap_commit_end_ptr(&btree
->bt_bmap
,
1432 &path
[level
].bp_oldreq
, dat
);
1433 path
[level
].bp_op(btree
, path
, level
, NULL
, NULL
);
1436 if (!nilfs_bmap_dirty(&btree
->bt_bmap
))
1437 nilfs_bmap_set_dirty(&btree
->bt_bmap
);
1440 static int nilfs_btree_delete(struct nilfs_bmap
*bmap
, __u64 key
)
1443 struct nilfs_btree
*btree
;
1444 struct nilfs_btree_path
*path
;
1445 struct nilfs_bmap_stats stats
;
1449 btree
= (struct nilfs_btree
*)bmap
;
1450 path
= nilfs_btree_alloc_path();
1454 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
,
1455 NILFS_BTREE_LEVEL_NODE_MIN
);
1460 dat
= NILFS_BMAP_USE_VBN(&btree
->bt_bmap
) ?
1461 nilfs_bmap_get_dat(&btree
->bt_bmap
) : NULL
;
1463 ret
= nilfs_btree_prepare_delete(btree
, path
, &level
, &stats
, dat
);
1466 nilfs_btree_commit_delete(btree
, path
, level
, dat
);
1467 nilfs_bmap_sub_blocks(bmap
, stats
.bs_nblocks
);
1470 nilfs_btree_free_path(path
);
1474 static int nilfs_btree_last_key(const struct nilfs_bmap
*bmap
, __u64
*keyp
)
1476 struct nilfs_btree
*btree
;
1477 struct nilfs_btree_path
*path
;
1480 btree
= (struct nilfs_btree
*)bmap
;
1481 path
= nilfs_btree_alloc_path();
1485 ret
= nilfs_btree_do_lookup_last(btree
, path
, keyp
, NULL
);
1487 nilfs_btree_free_path(path
);
1492 static int nilfs_btree_check_delete(struct nilfs_bmap
*bmap
, __u64 key
)
1494 struct buffer_head
*bh
;
1495 struct nilfs_btree
*btree
;
1496 struct nilfs_btree_node
*root
, *node
;
1497 __u64 maxkey
, nextmaxkey
;
1501 btree
= (struct nilfs_btree
*)bmap
;
1502 root
= nilfs_btree_get_root(btree
);
1503 switch (nilfs_btree_height(btree
)) {
1509 nchildren
= nilfs_btree_node_get_nchildren(root
);
1512 ptr
= nilfs_btree_node_get_ptr(btree
, root
, nchildren
- 1);
1513 ret
= nilfs_btree_get_block(btree
, ptr
, &bh
);
1516 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1522 nchildren
= nilfs_btree_node_get_nchildren(node
);
1523 maxkey
= nilfs_btree_node_get_key(node
, nchildren
- 1);
1524 nextmaxkey
= (nchildren
> 1) ?
1525 nilfs_btree_node_get_key(node
, nchildren
- 2) : 0;
1529 return (maxkey
== key
) && (nextmaxkey
< NILFS_BMAP_LARGE_LOW
);
1532 static int nilfs_btree_gather_data(struct nilfs_bmap
*bmap
,
1533 __u64
*keys
, __u64
*ptrs
, int nitems
)
1535 struct buffer_head
*bh
;
1536 struct nilfs_btree
*btree
;
1537 struct nilfs_btree_node
*node
, *root
;
1541 int nchildren
, i
, ret
;
1543 btree
= (struct nilfs_btree
*)bmap
;
1544 root
= nilfs_btree_get_root(btree
);
1545 switch (nilfs_btree_height(btree
)) {
1551 nchildren
= nilfs_btree_node_get_nchildren(root
);
1552 WARN_ON(nchildren
> 1);
1553 ptr
= nilfs_btree_node_get_ptr(btree
, root
, nchildren
- 1);
1554 ret
= nilfs_btree_get_block(btree
, ptr
, &bh
);
1557 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1564 nchildren
= nilfs_btree_node_get_nchildren(node
);
1565 if (nchildren
< nitems
)
1567 dkeys
= nilfs_btree_node_dkeys(node
);
1568 dptrs
= nilfs_btree_node_dptrs(node
, btree
);
1569 for (i
= 0; i
< nitems
; i
++) {
1570 keys
[i
] = le64_to_cpu(dkeys
[i
]);
1571 ptrs
[i
] = le64_to_cpu(dptrs
[i
]);
1581 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap
*bmap
, __u64 key
,
1582 union nilfs_bmap_ptr_req
*dreq
,
1583 union nilfs_bmap_ptr_req
*nreq
,
1584 struct buffer_head
**bhp
,
1585 struct nilfs_bmap_stats
*stats
)
1587 struct buffer_head
*bh
;
1588 struct nilfs_btree
*btree
= (struct nilfs_btree
*)bmap
;
1589 struct inode
*dat
= NULL
;
1592 stats
->bs_nblocks
= 0;
1595 /* cannot find near ptr */
1596 if (NILFS_BMAP_USE_VBN(bmap
)) {
1597 dreq
->bpr_ptr
= nilfs_btree_find_target_v(btree
, NULL
, key
);
1598 dat
= nilfs_bmap_get_dat(bmap
);
1601 ret
= nilfs_bmap_prepare_alloc_ptr(bmap
, dreq
, dat
);
1606 stats
->bs_nblocks
++;
1608 nreq
->bpr_ptr
= dreq
->bpr_ptr
+ 1;
1609 ret
= nilfs_bmap_prepare_alloc_ptr(bmap
, nreq
, dat
);
1613 ret
= nilfs_btree_get_new_block(btree
, nreq
->bpr_ptr
, &bh
);
1618 stats
->bs_nblocks
++;
1626 nilfs_bmap_abort_alloc_ptr(bmap
, nreq
, dat
);
1628 nilfs_bmap_abort_alloc_ptr(bmap
, dreq
, dat
);
1629 stats
->bs_nblocks
= 0;
1635 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap
*bmap
,
1636 __u64 key
, __u64 ptr
,
1637 const __u64
*keys
, const __u64
*ptrs
,
1639 union nilfs_bmap_ptr_req
*dreq
,
1640 union nilfs_bmap_ptr_req
*nreq
,
1641 struct buffer_head
*bh
)
1643 struct nilfs_btree
*btree
= (struct nilfs_btree
*)bmap
;
1644 struct nilfs_btree_node
*node
;
1648 /* free resources */
1649 if (bmap
->b_ops
->bop_clear
!= NULL
)
1650 bmap
->b_ops
->bop_clear(bmap
);
1652 /* ptr must be a pointer to a buffer head. */
1653 set_buffer_nilfs_volatile((struct buffer_head
*)((unsigned long)ptr
));
1655 /* convert and insert */
1656 dat
= NILFS_BMAP_USE_VBN(bmap
) ? nilfs_bmap_get_dat(bmap
) : NULL
;
1657 nilfs_btree_init(bmap
);
1659 nilfs_bmap_commit_alloc_ptr(bmap
, dreq
, dat
);
1660 nilfs_bmap_commit_alloc_ptr(bmap
, nreq
, dat
);
1662 /* create child node at level 1 */
1663 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1664 nilfs_btree_node_init(btree
, node
, 0, 1, n
, keys
, ptrs
);
1665 nilfs_btree_node_insert(btree
, node
,
1666 key
, dreq
->bpr_ptr
, n
);
1667 if (!buffer_dirty(bh
))
1668 nilfs_btnode_mark_dirty(bh
);
1669 if (!nilfs_bmap_dirty(bmap
))
1670 nilfs_bmap_set_dirty(bmap
);
1674 /* create root node at level 2 */
1675 node
= nilfs_btree_get_root(btree
);
1676 tmpptr
= nreq
->bpr_ptr
;
1677 nilfs_btree_node_init(btree
, node
, NILFS_BTREE_NODE_ROOT
,
1678 2, 1, &keys
[0], &tmpptr
);
1680 nilfs_bmap_commit_alloc_ptr(bmap
, dreq
, dat
);
1682 /* create root node at level 1 */
1683 node
= nilfs_btree_get_root(btree
);
1684 nilfs_btree_node_init(btree
, node
, NILFS_BTREE_NODE_ROOT
,
1686 nilfs_btree_node_insert(btree
, node
,
1687 key
, dreq
->bpr_ptr
, n
);
1688 if (!nilfs_bmap_dirty(bmap
))
1689 nilfs_bmap_set_dirty(bmap
);
1692 if (NILFS_BMAP_USE_VBN(bmap
))
1693 nilfs_btree_set_target_v(btree
, key
, dreq
->bpr_ptr
);
1697 * nilfs_btree_convert_and_insert -
1705 int nilfs_btree_convert_and_insert(struct nilfs_bmap
*bmap
,
1706 __u64 key
, __u64 ptr
,
1707 const __u64
*keys
, const __u64
*ptrs
, int n
)
1709 struct buffer_head
*bh
;
1710 union nilfs_bmap_ptr_req dreq
, nreq
, *di
, *ni
;
1711 struct nilfs_bmap_stats stats
;
1714 if (n
+ 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1717 } else if ((n
+ 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX(
1718 1 << bmap
->b_inode
->i_blkbits
)) {
1727 ret
= nilfs_btree_prepare_convert_and_insert(bmap
, key
, di
, ni
, &bh
,
1731 nilfs_btree_commit_convert_and_insert(bmap
, key
, ptr
, keys
, ptrs
, n
,
1733 nilfs_bmap_add_blocks(bmap
, stats
.bs_nblocks
);
1737 static int nilfs_btree_propagate_p(struct nilfs_btree
*btree
,
1738 struct nilfs_btree_path
*path
,
1740 struct buffer_head
*bh
)
1742 while ((++level
< nilfs_btree_height(btree
) - 1) &&
1743 !buffer_dirty(path
[level
].bp_bh
))
1744 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1749 static int nilfs_btree_prepare_update_v(struct nilfs_btree
*btree
,
1750 struct nilfs_btree_path
*path
,
1751 int level
, struct inode
*dat
)
1753 struct nilfs_btree_node
*parent
;
1756 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
1757 path
[level
].bp_oldreq
.bpr_ptr
=
1758 nilfs_btree_node_get_ptr(btree
, parent
,
1759 path
[level
+ 1].bp_index
);
1760 path
[level
].bp_newreq
.bpr_ptr
= path
[level
].bp_oldreq
.bpr_ptr
+ 1;
1761 ret
= nilfs_dat_prepare_update(dat
, &path
[level
].bp_oldreq
.bpr_req
,
1762 &path
[level
].bp_newreq
.bpr_req
);
1766 if (buffer_nilfs_node(path
[level
].bp_bh
)) {
1767 path
[level
].bp_ctxt
.oldkey
= path
[level
].bp_oldreq
.bpr_ptr
;
1768 path
[level
].bp_ctxt
.newkey
= path
[level
].bp_newreq
.bpr_ptr
;
1769 path
[level
].bp_ctxt
.bh
= path
[level
].bp_bh
;
1770 ret
= nilfs_btnode_prepare_change_key(
1771 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
1772 &path
[level
].bp_ctxt
);
1774 nilfs_dat_abort_update(dat
,
1775 &path
[level
].bp_oldreq
.bpr_req
,
1776 &path
[level
].bp_newreq
.bpr_req
);
1784 static void nilfs_btree_commit_update_v(struct nilfs_btree
*btree
,
1785 struct nilfs_btree_path
*path
,
1786 int level
, struct inode
*dat
)
1788 struct nilfs_btree_node
*parent
;
1790 nilfs_dat_commit_update(dat
, &path
[level
].bp_oldreq
.bpr_req
,
1791 &path
[level
].bp_newreq
.bpr_req
,
1792 btree
->bt_bmap
.b_ptr_type
== NILFS_BMAP_PTR_VS
);
1794 if (buffer_nilfs_node(path
[level
].bp_bh
)) {
1795 nilfs_btnode_commit_change_key(
1796 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
1797 &path
[level
].bp_ctxt
);
1798 path
[level
].bp_bh
= path
[level
].bp_ctxt
.bh
;
1800 set_buffer_nilfs_volatile(path
[level
].bp_bh
);
1802 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
1803 nilfs_btree_node_set_ptr(btree
, parent
, path
[level
+ 1].bp_index
,
1804 path
[level
].bp_newreq
.bpr_ptr
);
1807 static void nilfs_btree_abort_update_v(struct nilfs_btree
*btree
,
1808 struct nilfs_btree_path
*path
,
1809 int level
, struct inode
*dat
)
1811 nilfs_dat_abort_update(dat
, &path
[level
].bp_oldreq
.bpr_req
,
1812 &path
[level
].bp_newreq
.bpr_req
);
1813 if (buffer_nilfs_node(path
[level
].bp_bh
))
1814 nilfs_btnode_abort_change_key(
1815 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
1816 &path
[level
].bp_ctxt
);
1819 static int nilfs_btree_prepare_propagate_v(struct nilfs_btree
*btree
,
1820 struct nilfs_btree_path
*path
,
1821 int minlevel
, int *maxlevelp
,
1827 if (!buffer_nilfs_volatile(path
[level
].bp_bh
)) {
1828 ret
= nilfs_btree_prepare_update_v(btree
, path
, level
, dat
);
1832 while ((++level
< nilfs_btree_height(btree
) - 1) &&
1833 !buffer_dirty(path
[level
].bp_bh
)) {
1835 WARN_ON(buffer_nilfs_volatile(path
[level
].bp_bh
));
1836 ret
= nilfs_btree_prepare_update_v(btree
, path
, level
, dat
);
1842 *maxlevelp
= level
- 1;
1847 while (--level
> minlevel
)
1848 nilfs_btree_abort_update_v(btree
, path
, level
, dat
);
1849 if (!buffer_nilfs_volatile(path
[level
].bp_bh
))
1850 nilfs_btree_abort_update_v(btree
, path
, level
, dat
);
1854 static void nilfs_btree_commit_propagate_v(struct nilfs_btree
*btree
,
1855 struct nilfs_btree_path
*path
,
1856 int minlevel
, int maxlevel
,
1857 struct buffer_head
*bh
,
1862 if (!buffer_nilfs_volatile(path
[minlevel
].bp_bh
))
1863 nilfs_btree_commit_update_v(btree
, path
, minlevel
, dat
);
1865 for (level
= minlevel
+ 1; level
<= maxlevel
; level
++)
1866 nilfs_btree_commit_update_v(btree
, path
, level
, dat
);
1869 static int nilfs_btree_propagate_v(struct nilfs_btree
*btree
,
1870 struct nilfs_btree_path
*path
,
1871 int level
, struct buffer_head
*bh
)
1873 int maxlevel
= 0, ret
;
1874 struct nilfs_btree_node
*parent
;
1875 struct inode
*dat
= nilfs_bmap_get_dat(&btree
->bt_bmap
);
1879 path
[level
].bp_bh
= bh
;
1880 ret
= nilfs_btree_prepare_propagate_v(btree
, path
, level
, &maxlevel
,
1885 if (buffer_nilfs_volatile(path
[level
].bp_bh
)) {
1886 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
1887 ptr
= nilfs_btree_node_get_ptr(btree
, parent
,
1888 path
[level
+ 1].bp_index
);
1889 ret
= nilfs_dat_mark_dirty(dat
, ptr
);
1894 nilfs_btree_commit_propagate_v(btree
, path
, level
, maxlevel
, bh
, dat
);
1897 brelse(path
[level
].bp_bh
);
1898 path
[level
].bp_bh
= NULL
;
1902 static int nilfs_btree_propagate(const struct nilfs_bmap
*bmap
,
1903 struct buffer_head
*bh
)
1905 struct nilfs_btree
*btree
;
1906 struct nilfs_btree_path
*path
;
1907 struct nilfs_btree_node
*node
;
1911 WARN_ON(!buffer_dirty(bh
));
1913 btree
= (struct nilfs_btree
*)bmap
;
1914 path
= nilfs_btree_alloc_path();
1918 if (buffer_nilfs_node(bh
)) {
1919 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1920 key
= nilfs_btree_node_get_key(node
, 0);
1921 level
= nilfs_btree_node_get_level(node
);
1923 key
= nilfs_bmap_data_get_key(bmap
, bh
);
1924 level
= NILFS_BTREE_LEVEL_DATA
;
1927 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
, level
+ 1);
1929 if (unlikely(ret
== -ENOENT
))
1930 printk(KERN_CRIT
"%s: key = %llu, level == %d\n",
1931 __func__
, (unsigned long long)key
, level
);
1935 ret
= NILFS_BMAP_USE_VBN(bmap
) ?
1936 nilfs_btree_propagate_v(btree
, path
, level
, bh
) :
1937 nilfs_btree_propagate_p(btree
, path
, level
, bh
);
1940 nilfs_btree_free_path(path
);
1945 static int nilfs_btree_propagate_gc(const struct nilfs_bmap
*bmap
,
1946 struct buffer_head
*bh
)
1948 return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap
), bh
->b_blocknr
);
1951 static void nilfs_btree_add_dirty_buffer(struct nilfs_btree
*btree
,
1952 struct list_head
*lists
,
1953 struct buffer_head
*bh
)
1955 struct list_head
*head
;
1956 struct buffer_head
*cbh
;
1957 struct nilfs_btree_node
*node
, *cnode
;
1962 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1963 key
= nilfs_btree_node_get_key(node
, 0);
1964 level
= nilfs_btree_node_get_level(node
);
1965 if (level
< NILFS_BTREE_LEVEL_NODE_MIN
||
1966 level
>= NILFS_BTREE_LEVEL_MAX
) {
1969 "%s: invalid btree level: %d (key=%llu, ino=%lu, "
1971 __func__
, level
, (unsigned long long)key
,
1972 NILFS_BMAP_I(&btree
->bt_bmap
)->vfs_inode
.i_ino
,
1973 (unsigned long long)bh
->b_blocknr
);
1977 list_for_each(head
, &lists
[level
]) {
1978 cbh
= list_entry(head
, struct buffer_head
, b_assoc_buffers
);
1979 cnode
= (struct nilfs_btree_node
*)cbh
->b_data
;
1980 ckey
= nilfs_btree_node_get_key(cnode
, 0);
1984 list_add_tail(&bh
->b_assoc_buffers
, head
);
1987 static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap
*bmap
,
1988 struct list_head
*listp
)
1990 struct nilfs_btree
*btree
= (struct nilfs_btree
*)bmap
;
1991 struct address_space
*btcache
= &NILFS_BMAP_I(bmap
)->i_btnode_cache
;
1992 struct list_head lists
[NILFS_BTREE_LEVEL_MAX
];
1993 struct pagevec pvec
;
1994 struct buffer_head
*bh
, *head
;
1998 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
1999 level
< NILFS_BTREE_LEVEL_MAX
;
2001 INIT_LIST_HEAD(&lists
[level
]);
2003 pagevec_init(&pvec
, 0);
2005 while (pagevec_lookup_tag(&pvec
, btcache
, &index
, PAGECACHE_TAG_DIRTY
,
2007 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
2008 bh
= head
= page_buffers(pvec
.pages
[i
]);
2010 if (buffer_dirty(bh
))
2011 nilfs_btree_add_dirty_buffer(btree
,
2013 } while ((bh
= bh
->b_this_page
) != head
);
2015 pagevec_release(&pvec
);
2019 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
2020 level
< NILFS_BTREE_LEVEL_MAX
;
2022 list_splice_tail(&lists
[level
], listp
);
2025 static int nilfs_btree_assign_p(struct nilfs_btree
*btree
,
2026 struct nilfs_btree_path
*path
,
2028 struct buffer_head
**bh
,
2030 union nilfs_binfo
*binfo
)
2032 struct nilfs_btree_node
*parent
;
2037 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
2038 ptr
= nilfs_btree_node_get_ptr(btree
, parent
,
2039 path
[level
+ 1].bp_index
);
2040 if (buffer_nilfs_node(*bh
)) {
2041 path
[level
].bp_ctxt
.oldkey
= ptr
;
2042 path
[level
].bp_ctxt
.newkey
= blocknr
;
2043 path
[level
].bp_ctxt
.bh
= *bh
;
2044 ret
= nilfs_btnode_prepare_change_key(
2045 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
2046 &path
[level
].bp_ctxt
);
2049 nilfs_btnode_commit_change_key(
2050 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
2051 &path
[level
].bp_ctxt
);
2052 *bh
= path
[level
].bp_ctxt
.bh
;
2055 nilfs_btree_node_set_ptr(btree
, parent
,
2056 path
[level
+ 1].bp_index
, blocknr
);
2058 key
= nilfs_btree_node_get_key(parent
, path
[level
+ 1].bp_index
);
2059 /* on-disk format */
2060 binfo
->bi_dat
.bi_blkoff
= cpu_to_le64(key
);
2061 binfo
->bi_dat
.bi_level
= level
;
2066 static int nilfs_btree_assign_v(struct nilfs_btree
*btree
,
2067 struct nilfs_btree_path
*path
,
2069 struct buffer_head
**bh
,
2071 union nilfs_binfo
*binfo
)
2073 struct nilfs_btree_node
*parent
;
2074 struct inode
*dat
= nilfs_bmap_get_dat(&btree
->bt_bmap
);
2077 union nilfs_bmap_ptr_req req
;
2080 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
2081 ptr
= nilfs_btree_node_get_ptr(btree
, parent
,
2082 path
[level
+ 1].bp_index
);
2084 ret
= nilfs_dat_prepare_start(dat
, &req
.bpr_req
);
2087 nilfs_dat_commit_start(dat
, &req
.bpr_req
, blocknr
);
2089 key
= nilfs_btree_node_get_key(parent
, path
[level
+ 1].bp_index
);
2090 /* on-disk format */
2091 binfo
->bi_v
.bi_vblocknr
= cpu_to_le64(ptr
);
2092 binfo
->bi_v
.bi_blkoff
= cpu_to_le64(key
);
2097 static int nilfs_btree_assign(struct nilfs_bmap
*bmap
,
2098 struct buffer_head
**bh
,
2100 union nilfs_binfo
*binfo
)
2102 struct nilfs_btree
*btree
;
2103 struct nilfs_btree_path
*path
;
2104 struct nilfs_btree_node
*node
;
2108 btree
= (struct nilfs_btree
*)bmap
;
2109 path
= nilfs_btree_alloc_path();
2113 if (buffer_nilfs_node(*bh
)) {
2114 node
= (struct nilfs_btree_node
*)(*bh
)->b_data
;
2115 key
= nilfs_btree_node_get_key(node
, 0);
2116 level
= nilfs_btree_node_get_level(node
);
2118 key
= nilfs_bmap_data_get_key(bmap
, *bh
);
2119 level
= NILFS_BTREE_LEVEL_DATA
;
2122 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
, level
+ 1);
2124 WARN_ON(ret
== -ENOENT
);
2128 ret
= NILFS_BMAP_USE_VBN(bmap
) ?
2129 nilfs_btree_assign_v(btree
, path
, level
, bh
, blocknr
, binfo
) :
2130 nilfs_btree_assign_p(btree
, path
, level
, bh
, blocknr
, binfo
);
2133 nilfs_btree_free_path(path
);
2138 static int nilfs_btree_assign_gc(struct nilfs_bmap
*bmap
,
2139 struct buffer_head
**bh
,
2141 union nilfs_binfo
*binfo
)
2143 struct nilfs_btree_node
*node
;
2147 ret
= nilfs_dat_move(nilfs_bmap_get_dat(bmap
), (*bh
)->b_blocknr
,
2152 if (buffer_nilfs_node(*bh
)) {
2153 node
= (struct nilfs_btree_node
*)(*bh
)->b_data
;
2154 key
= nilfs_btree_node_get_key(node
, 0);
2156 key
= nilfs_bmap_data_get_key(bmap
, *bh
);
2158 /* on-disk format */
2159 binfo
->bi_v
.bi_vblocknr
= cpu_to_le64((*bh
)->b_blocknr
);
2160 binfo
->bi_v
.bi_blkoff
= cpu_to_le64(key
);
2165 static int nilfs_btree_mark(struct nilfs_bmap
*bmap
, __u64 key
, int level
)
2167 struct buffer_head
*bh
;
2168 struct nilfs_btree
*btree
;
2169 struct nilfs_btree_path
*path
;
2173 btree
= (struct nilfs_btree
*)bmap
;
2174 path
= nilfs_btree_alloc_path();
2178 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
+ 1);
2180 WARN_ON(ret
== -ENOENT
);
2183 ret
= nilfs_btree_get_block(btree
, ptr
, &bh
);
2185 WARN_ON(ret
== -ENOENT
);
2189 if (!buffer_dirty(bh
))
2190 nilfs_btnode_mark_dirty(bh
);
2192 if (!nilfs_bmap_dirty(&btree
->bt_bmap
))
2193 nilfs_bmap_set_dirty(&btree
->bt_bmap
);
2196 nilfs_btree_free_path(path
);
2200 static const struct nilfs_bmap_operations nilfs_btree_ops
= {
2201 .bop_lookup
= nilfs_btree_lookup
,
2202 .bop_lookup_contig
= nilfs_btree_lookup_contig
,
2203 .bop_insert
= nilfs_btree_insert
,
2204 .bop_delete
= nilfs_btree_delete
,
2207 .bop_propagate
= nilfs_btree_propagate
,
2209 .bop_lookup_dirty_buffers
= nilfs_btree_lookup_dirty_buffers
,
2211 .bop_assign
= nilfs_btree_assign
,
2212 .bop_mark
= nilfs_btree_mark
,
2214 .bop_last_key
= nilfs_btree_last_key
,
2215 .bop_check_insert
= NULL
,
2216 .bop_check_delete
= nilfs_btree_check_delete
,
2217 .bop_gather_data
= nilfs_btree_gather_data
,
2220 static const struct nilfs_bmap_operations nilfs_btree_ops_gc
= {
2222 .bop_lookup_contig
= NULL
,
2227 .bop_propagate
= nilfs_btree_propagate_gc
,
2229 .bop_lookup_dirty_buffers
= nilfs_btree_lookup_dirty_buffers
,
2231 .bop_assign
= nilfs_btree_assign_gc
,
2234 .bop_last_key
= NULL
,
2235 .bop_check_insert
= NULL
,
2236 .bop_check_delete
= NULL
,
2237 .bop_gather_data
= NULL
,
2240 int nilfs_btree_init(struct nilfs_bmap
*bmap
)
2242 bmap
->b_ops
= &nilfs_btree_ops
;
2246 void nilfs_btree_init_gc(struct nilfs_bmap
*bmap
)
2248 bmap
->b_ops
= &nilfs_btree_ops_gc
;