1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 STRATO. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sizes.h>
17 #include "transaction.h"
22 #include "extent_io.h"
27 * - subvol delete -> delete when ref goes to 0? delete limits also?
31 * - copy also limits on subvol creation
34 * - performance benchmarks
35 * - check all ioctl parameters
39 * Helpers to access qgroup reservation
41 * Callers should ensure the lock context and type are valid
44 static u64
qgroup_rsv_total(const struct btrfs_qgroup
*qgroup
)
49 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
50 ret
+= qgroup
->rsv
.values
[i
];
55 #ifdef CONFIG_BTRFS_DEBUG
56 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type
)
58 if (type
== BTRFS_QGROUP_RSV_DATA
)
60 if (type
== BTRFS_QGROUP_RSV_META_PERTRANS
)
61 return "meta_pertrans";
62 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
)
63 return "meta_prealloc";
68 static void qgroup_rsv_add(struct btrfs_fs_info
*fs_info
,
69 struct btrfs_qgroup
*qgroup
, u64 num_bytes
,
70 enum btrfs_qgroup_rsv_type type
)
72 trace_qgroup_update_reserve(fs_info
, qgroup
, num_bytes
, type
);
73 qgroup
->rsv
.values
[type
] += num_bytes
;
76 static void qgroup_rsv_release(struct btrfs_fs_info
*fs_info
,
77 struct btrfs_qgroup
*qgroup
, u64 num_bytes
,
78 enum btrfs_qgroup_rsv_type type
)
80 trace_qgroup_update_reserve(fs_info
, qgroup
, -(s64
)num_bytes
, type
);
81 if (qgroup
->rsv
.values
[type
] >= num_bytes
) {
82 qgroup
->rsv
.values
[type
] -= num_bytes
;
85 #ifdef CONFIG_BTRFS_DEBUG
87 "qgroup %llu %s reserved space underflow, have %llu to free %llu",
88 qgroup
->qgroupid
, qgroup_rsv_type_str(type
),
89 qgroup
->rsv
.values
[type
], num_bytes
);
91 qgroup
->rsv
.values
[type
] = 0;
94 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info
*fs_info
,
95 struct btrfs_qgroup
*dest
,
96 struct btrfs_qgroup
*src
)
100 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
101 qgroup_rsv_add(fs_info
, dest
, src
->rsv
.values
[i
], i
);
104 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info
*fs_info
,
105 struct btrfs_qgroup
*dest
,
106 struct btrfs_qgroup
*src
)
110 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
111 qgroup_rsv_release(fs_info
, dest
, src
->rsv
.values
[i
], i
);
114 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup
*qg
, u64 seq
,
117 if (qg
->old_refcnt
< seq
)
118 qg
->old_refcnt
= seq
;
119 qg
->old_refcnt
+= mod
;
122 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup
*qg
, u64 seq
,
125 if (qg
->new_refcnt
< seq
)
126 qg
->new_refcnt
= seq
;
127 qg
->new_refcnt
+= mod
;
130 static inline u64
btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup
*qg
, u64 seq
)
132 if (qg
->old_refcnt
< seq
)
134 return qg
->old_refcnt
- seq
;
137 static inline u64
btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup
*qg
, u64 seq
)
139 if (qg
->new_refcnt
< seq
)
141 return qg
->new_refcnt
- seq
;
145 * glue structure to represent the relations between qgroups.
147 struct btrfs_qgroup_list
{
148 struct list_head next_group
;
149 struct list_head next_member
;
150 struct btrfs_qgroup
*group
;
151 struct btrfs_qgroup
*member
;
154 static inline u64
qgroup_to_aux(struct btrfs_qgroup
*qg
)
156 return (u64
)(uintptr_t)qg
;
159 static inline struct btrfs_qgroup
* unode_aux_to_qgroup(struct ulist_node
*n
)
161 return (struct btrfs_qgroup
*)(uintptr_t)n
->aux
;
165 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
167 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
);
169 /* must be called with qgroup_ioctl_lock held */
170 static struct btrfs_qgroup
*find_qgroup_rb(struct btrfs_fs_info
*fs_info
,
173 struct rb_node
*n
= fs_info
->qgroup_tree
.rb_node
;
174 struct btrfs_qgroup
*qgroup
;
177 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
178 if (qgroup
->qgroupid
< qgroupid
)
180 else if (qgroup
->qgroupid
> qgroupid
)
188 /* must be called with qgroup_lock held */
189 static struct btrfs_qgroup
*add_qgroup_rb(struct btrfs_fs_info
*fs_info
,
192 struct rb_node
**p
= &fs_info
->qgroup_tree
.rb_node
;
193 struct rb_node
*parent
= NULL
;
194 struct btrfs_qgroup
*qgroup
;
198 qgroup
= rb_entry(parent
, struct btrfs_qgroup
, node
);
200 if (qgroup
->qgroupid
< qgroupid
)
202 else if (qgroup
->qgroupid
> qgroupid
)
208 qgroup
= kzalloc(sizeof(*qgroup
), GFP_ATOMIC
);
210 return ERR_PTR(-ENOMEM
);
212 qgroup
->qgroupid
= qgroupid
;
213 INIT_LIST_HEAD(&qgroup
->groups
);
214 INIT_LIST_HEAD(&qgroup
->members
);
215 INIT_LIST_HEAD(&qgroup
->dirty
);
217 rb_link_node(&qgroup
->node
, parent
, p
);
218 rb_insert_color(&qgroup
->node
, &fs_info
->qgroup_tree
);
223 static void __del_qgroup_rb(struct btrfs_qgroup
*qgroup
)
225 struct btrfs_qgroup_list
*list
;
227 list_del(&qgroup
->dirty
);
228 while (!list_empty(&qgroup
->groups
)) {
229 list
= list_first_entry(&qgroup
->groups
,
230 struct btrfs_qgroup_list
, next_group
);
231 list_del(&list
->next_group
);
232 list_del(&list
->next_member
);
236 while (!list_empty(&qgroup
->members
)) {
237 list
= list_first_entry(&qgroup
->members
,
238 struct btrfs_qgroup_list
, next_member
);
239 list_del(&list
->next_group
);
240 list_del(&list
->next_member
);
246 /* must be called with qgroup_lock held */
247 static int del_qgroup_rb(struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
249 struct btrfs_qgroup
*qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
254 rb_erase(&qgroup
->node
, &fs_info
->qgroup_tree
);
255 __del_qgroup_rb(qgroup
);
259 /* must be called with qgroup_lock held */
260 static int add_relation_rb(struct btrfs_fs_info
*fs_info
,
261 u64 memberid
, u64 parentid
)
263 struct btrfs_qgroup
*member
;
264 struct btrfs_qgroup
*parent
;
265 struct btrfs_qgroup_list
*list
;
267 member
= find_qgroup_rb(fs_info
, memberid
);
268 parent
= find_qgroup_rb(fs_info
, parentid
);
269 if (!member
|| !parent
)
272 list
= kzalloc(sizeof(*list
), GFP_ATOMIC
);
276 list
->group
= parent
;
277 list
->member
= member
;
278 list_add_tail(&list
->next_group
, &member
->groups
);
279 list_add_tail(&list
->next_member
, &parent
->members
);
284 /* must be called with qgroup_lock held */
285 static int del_relation_rb(struct btrfs_fs_info
*fs_info
,
286 u64 memberid
, u64 parentid
)
288 struct btrfs_qgroup
*member
;
289 struct btrfs_qgroup
*parent
;
290 struct btrfs_qgroup_list
*list
;
292 member
= find_qgroup_rb(fs_info
, memberid
);
293 parent
= find_qgroup_rb(fs_info
, parentid
);
294 if (!member
|| !parent
)
297 list_for_each_entry(list
, &member
->groups
, next_group
) {
298 if (list
->group
== parent
) {
299 list_del(&list
->next_group
);
300 list_del(&list
->next_member
);
308 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
309 int btrfs_verify_qgroup_counts(struct btrfs_fs_info
*fs_info
, u64 qgroupid
,
312 struct btrfs_qgroup
*qgroup
;
314 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
317 if (qgroup
->rfer
!= rfer
|| qgroup
->excl
!= excl
)
324 * The full config is read in one go, only called from open_ctree()
325 * It doesn't use any locking, as at this point we're still single-threaded
327 int btrfs_read_qgroup_config(struct btrfs_fs_info
*fs_info
)
329 struct btrfs_key key
;
330 struct btrfs_key found_key
;
331 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
332 struct btrfs_path
*path
= NULL
;
333 struct extent_buffer
*l
;
337 u64 rescan_progress
= 0;
339 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
342 fs_info
->qgroup_ulist
= ulist_alloc(GFP_KERNEL
);
343 if (!fs_info
->qgroup_ulist
) {
348 path
= btrfs_alloc_path();
354 /* default this to quota off, in case no status key is found */
355 fs_info
->qgroup_flags
= 0;
358 * pass 1: read status, all qgroup infos and limits
363 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 1);
368 struct btrfs_qgroup
*qgroup
;
370 slot
= path
->slots
[0];
372 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
374 if (found_key
.type
== BTRFS_QGROUP_STATUS_KEY
) {
375 struct btrfs_qgroup_status_item
*ptr
;
377 ptr
= btrfs_item_ptr(l
, slot
,
378 struct btrfs_qgroup_status_item
);
380 if (btrfs_qgroup_status_version(l
, ptr
) !=
381 BTRFS_QGROUP_STATUS_VERSION
) {
383 "old qgroup version, quota disabled");
386 if (btrfs_qgroup_status_generation(l
, ptr
) !=
387 fs_info
->generation
) {
388 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
390 "qgroup generation mismatch, marked as inconsistent");
392 fs_info
->qgroup_flags
= btrfs_qgroup_status_flags(l
,
394 rescan_progress
= btrfs_qgroup_status_rescan(l
, ptr
);
398 if (found_key
.type
!= BTRFS_QGROUP_INFO_KEY
&&
399 found_key
.type
!= BTRFS_QGROUP_LIMIT_KEY
)
402 qgroup
= find_qgroup_rb(fs_info
, found_key
.offset
);
403 if ((qgroup
&& found_key
.type
== BTRFS_QGROUP_INFO_KEY
) ||
404 (!qgroup
&& found_key
.type
== BTRFS_QGROUP_LIMIT_KEY
)) {
405 btrfs_err(fs_info
, "inconsistent qgroup config");
406 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
409 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
410 if (IS_ERR(qgroup
)) {
411 ret
= PTR_ERR(qgroup
);
415 switch (found_key
.type
) {
416 case BTRFS_QGROUP_INFO_KEY
: {
417 struct btrfs_qgroup_info_item
*ptr
;
419 ptr
= btrfs_item_ptr(l
, slot
,
420 struct btrfs_qgroup_info_item
);
421 qgroup
->rfer
= btrfs_qgroup_info_rfer(l
, ptr
);
422 qgroup
->rfer_cmpr
= btrfs_qgroup_info_rfer_cmpr(l
, ptr
);
423 qgroup
->excl
= btrfs_qgroup_info_excl(l
, ptr
);
424 qgroup
->excl_cmpr
= btrfs_qgroup_info_excl_cmpr(l
, ptr
);
425 /* generation currently unused */
428 case BTRFS_QGROUP_LIMIT_KEY
: {
429 struct btrfs_qgroup_limit_item
*ptr
;
431 ptr
= btrfs_item_ptr(l
, slot
,
432 struct btrfs_qgroup_limit_item
);
433 qgroup
->lim_flags
= btrfs_qgroup_limit_flags(l
, ptr
);
434 qgroup
->max_rfer
= btrfs_qgroup_limit_max_rfer(l
, ptr
);
435 qgroup
->max_excl
= btrfs_qgroup_limit_max_excl(l
, ptr
);
436 qgroup
->rsv_rfer
= btrfs_qgroup_limit_rsv_rfer(l
, ptr
);
437 qgroup
->rsv_excl
= btrfs_qgroup_limit_rsv_excl(l
, ptr
);
442 ret
= btrfs_next_item(quota_root
, path
);
448 btrfs_release_path(path
);
451 * pass 2: read all qgroup relations
454 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
456 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 0);
460 slot
= path
->slots
[0];
462 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
464 if (found_key
.type
!= BTRFS_QGROUP_RELATION_KEY
)
467 if (found_key
.objectid
> found_key
.offset
) {
468 /* parent <- member, not needed to build config */
469 /* FIXME should we omit the key completely? */
473 ret
= add_relation_rb(fs_info
, found_key
.objectid
,
475 if (ret
== -ENOENT
) {
477 "orphan qgroup relation 0x%llx->0x%llx",
478 found_key
.objectid
, found_key
.offset
);
479 ret
= 0; /* ignore the error */
484 ret
= btrfs_next_item(quota_root
, path
);
491 fs_info
->qgroup_flags
|= flags
;
492 if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))
493 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
494 else if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
&&
496 ret
= qgroup_rescan_init(fs_info
, rescan_progress
, 0);
497 btrfs_free_path(path
);
500 ulist_free(fs_info
->qgroup_ulist
);
501 fs_info
->qgroup_ulist
= NULL
;
502 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
505 return ret
< 0 ? ret
: 0;
509 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
510 * first two are in single-threaded paths.And for the third one, we have set
511 * quota_root to be null with qgroup_lock held before, so it is safe to clean
512 * up the in-memory structures without qgroup_lock held.
514 void btrfs_free_qgroup_config(struct btrfs_fs_info
*fs_info
)
517 struct btrfs_qgroup
*qgroup
;
519 while ((n
= rb_first(&fs_info
->qgroup_tree
))) {
520 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
521 rb_erase(n
, &fs_info
->qgroup_tree
);
522 __del_qgroup_rb(qgroup
);
525 * We call btrfs_free_qgroup_config() when unmounting
526 * filesystem and disabling quota, so we set qgroup_ulist
527 * to be null here to avoid double free.
529 ulist_free(fs_info
->qgroup_ulist
);
530 fs_info
->qgroup_ulist
= NULL
;
533 static int add_qgroup_relation_item(struct btrfs_trans_handle
*trans
, u64 src
,
537 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
538 struct btrfs_path
*path
;
539 struct btrfs_key key
;
541 path
= btrfs_alloc_path();
546 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
549 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
, 0);
551 btrfs_mark_buffer_dirty(path
->nodes
[0]);
553 btrfs_free_path(path
);
557 static int del_qgroup_relation_item(struct btrfs_trans_handle
*trans
, u64 src
,
561 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
562 struct btrfs_path
*path
;
563 struct btrfs_key key
;
565 path
= btrfs_alloc_path();
570 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
573 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
582 ret
= btrfs_del_item(trans
, quota_root
, path
);
584 btrfs_free_path(path
);
588 static int add_qgroup_item(struct btrfs_trans_handle
*trans
,
589 struct btrfs_root
*quota_root
, u64 qgroupid
)
592 struct btrfs_path
*path
;
593 struct btrfs_qgroup_info_item
*qgroup_info
;
594 struct btrfs_qgroup_limit_item
*qgroup_limit
;
595 struct extent_buffer
*leaf
;
596 struct btrfs_key key
;
598 if (btrfs_is_testing(quota_root
->fs_info
))
601 path
= btrfs_alloc_path();
606 key
.type
= BTRFS_QGROUP_INFO_KEY
;
607 key
.offset
= qgroupid
;
610 * Avoid a transaction abort by catching -EEXIST here. In that
611 * case, we proceed by re-initializing the existing structure
615 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
616 sizeof(*qgroup_info
));
617 if (ret
&& ret
!= -EEXIST
)
620 leaf
= path
->nodes
[0];
621 qgroup_info
= btrfs_item_ptr(leaf
, path
->slots
[0],
622 struct btrfs_qgroup_info_item
);
623 btrfs_set_qgroup_info_generation(leaf
, qgroup_info
, trans
->transid
);
624 btrfs_set_qgroup_info_rfer(leaf
, qgroup_info
, 0);
625 btrfs_set_qgroup_info_rfer_cmpr(leaf
, qgroup_info
, 0);
626 btrfs_set_qgroup_info_excl(leaf
, qgroup_info
, 0);
627 btrfs_set_qgroup_info_excl_cmpr(leaf
, qgroup_info
, 0);
629 btrfs_mark_buffer_dirty(leaf
);
631 btrfs_release_path(path
);
633 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
634 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
635 sizeof(*qgroup_limit
));
636 if (ret
&& ret
!= -EEXIST
)
639 leaf
= path
->nodes
[0];
640 qgroup_limit
= btrfs_item_ptr(leaf
, path
->slots
[0],
641 struct btrfs_qgroup_limit_item
);
642 btrfs_set_qgroup_limit_flags(leaf
, qgroup_limit
, 0);
643 btrfs_set_qgroup_limit_max_rfer(leaf
, qgroup_limit
, 0);
644 btrfs_set_qgroup_limit_max_excl(leaf
, qgroup_limit
, 0);
645 btrfs_set_qgroup_limit_rsv_rfer(leaf
, qgroup_limit
, 0);
646 btrfs_set_qgroup_limit_rsv_excl(leaf
, qgroup_limit
, 0);
648 btrfs_mark_buffer_dirty(leaf
);
652 btrfs_free_path(path
);
656 static int del_qgroup_item(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
659 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
660 struct btrfs_path
*path
;
661 struct btrfs_key key
;
663 path
= btrfs_alloc_path();
668 key
.type
= BTRFS_QGROUP_INFO_KEY
;
669 key
.offset
= qgroupid
;
670 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
679 ret
= btrfs_del_item(trans
, quota_root
, path
);
683 btrfs_release_path(path
);
685 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
686 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
695 ret
= btrfs_del_item(trans
, quota_root
, path
);
698 btrfs_free_path(path
);
702 static int update_qgroup_limit_item(struct btrfs_trans_handle
*trans
,
703 struct btrfs_qgroup
*qgroup
)
705 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
706 struct btrfs_path
*path
;
707 struct btrfs_key key
;
708 struct extent_buffer
*l
;
709 struct btrfs_qgroup_limit_item
*qgroup_limit
;
714 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
715 key
.offset
= qgroup
->qgroupid
;
717 path
= btrfs_alloc_path();
721 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
729 slot
= path
->slots
[0];
730 qgroup_limit
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_limit_item
);
731 btrfs_set_qgroup_limit_flags(l
, qgroup_limit
, qgroup
->lim_flags
);
732 btrfs_set_qgroup_limit_max_rfer(l
, qgroup_limit
, qgroup
->max_rfer
);
733 btrfs_set_qgroup_limit_max_excl(l
, qgroup_limit
, qgroup
->max_excl
);
734 btrfs_set_qgroup_limit_rsv_rfer(l
, qgroup_limit
, qgroup
->rsv_rfer
);
735 btrfs_set_qgroup_limit_rsv_excl(l
, qgroup_limit
, qgroup
->rsv_excl
);
737 btrfs_mark_buffer_dirty(l
);
740 btrfs_free_path(path
);
744 static int update_qgroup_info_item(struct btrfs_trans_handle
*trans
,
745 struct btrfs_qgroup
*qgroup
)
747 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
748 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
749 struct btrfs_path
*path
;
750 struct btrfs_key key
;
751 struct extent_buffer
*l
;
752 struct btrfs_qgroup_info_item
*qgroup_info
;
756 if (btrfs_is_testing(fs_info
))
760 key
.type
= BTRFS_QGROUP_INFO_KEY
;
761 key
.offset
= qgroup
->qgroupid
;
763 path
= btrfs_alloc_path();
767 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
775 slot
= path
->slots
[0];
776 qgroup_info
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_info_item
);
777 btrfs_set_qgroup_info_generation(l
, qgroup_info
, trans
->transid
);
778 btrfs_set_qgroup_info_rfer(l
, qgroup_info
, qgroup
->rfer
);
779 btrfs_set_qgroup_info_rfer_cmpr(l
, qgroup_info
, qgroup
->rfer_cmpr
);
780 btrfs_set_qgroup_info_excl(l
, qgroup_info
, qgroup
->excl
);
781 btrfs_set_qgroup_info_excl_cmpr(l
, qgroup_info
, qgroup
->excl_cmpr
);
783 btrfs_mark_buffer_dirty(l
);
786 btrfs_free_path(path
);
790 static int update_qgroup_status_item(struct btrfs_trans_handle
*trans
)
792 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
793 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
794 struct btrfs_path
*path
;
795 struct btrfs_key key
;
796 struct extent_buffer
*l
;
797 struct btrfs_qgroup_status_item
*ptr
;
802 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
805 path
= btrfs_alloc_path();
809 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
817 slot
= path
->slots
[0];
818 ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_status_item
);
819 btrfs_set_qgroup_status_flags(l
, ptr
, fs_info
->qgroup_flags
);
820 btrfs_set_qgroup_status_generation(l
, ptr
, trans
->transid
);
821 btrfs_set_qgroup_status_rescan(l
, ptr
,
822 fs_info
->qgroup_rescan_progress
.objectid
);
824 btrfs_mark_buffer_dirty(l
);
827 btrfs_free_path(path
);
832 * called with qgroup_lock held
834 static int btrfs_clean_quota_tree(struct btrfs_trans_handle
*trans
,
835 struct btrfs_root
*root
)
837 struct btrfs_path
*path
;
838 struct btrfs_key key
;
839 struct extent_buffer
*leaf
= NULL
;
843 path
= btrfs_alloc_path();
847 path
->leave_spinning
= 1;
854 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
857 leaf
= path
->nodes
[0];
858 nr
= btrfs_header_nritems(leaf
);
862 * delete the leaf one by one
863 * since the whole tree is going
867 ret
= btrfs_del_items(trans
, root
, path
, 0, nr
);
871 btrfs_release_path(path
);
875 btrfs_free_path(path
);
879 int btrfs_quota_enable(struct btrfs_fs_info
*fs_info
)
881 struct btrfs_root
*quota_root
;
882 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
883 struct btrfs_path
*path
= NULL
;
884 struct btrfs_qgroup_status_item
*ptr
;
885 struct extent_buffer
*leaf
;
886 struct btrfs_key key
;
887 struct btrfs_key found_key
;
888 struct btrfs_qgroup
*qgroup
= NULL
;
889 struct btrfs_trans_handle
*trans
= NULL
;
893 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
894 if (fs_info
->quota_root
)
897 fs_info
->qgroup_ulist
= ulist_alloc(GFP_KERNEL
);
898 if (!fs_info
->qgroup_ulist
) {
904 * 1 for quota root item
905 * 1 for BTRFS_QGROUP_STATUS item
907 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
908 * per subvolume. However those are not currently reserved since it
909 * would be a lot of overkill.
911 trans
= btrfs_start_transaction(tree_root
, 2);
913 ret
= PTR_ERR(trans
);
919 * initially create the quota tree
921 quota_root
= btrfs_create_tree(trans
, BTRFS_QUOTA_TREE_OBJECTID
);
922 if (IS_ERR(quota_root
)) {
923 ret
= PTR_ERR(quota_root
);
924 btrfs_abort_transaction(trans
, ret
);
928 path
= btrfs_alloc_path();
931 btrfs_abort_transaction(trans
, ret
);
936 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
939 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
942 btrfs_abort_transaction(trans
, ret
);
946 leaf
= path
->nodes
[0];
947 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0],
948 struct btrfs_qgroup_status_item
);
949 btrfs_set_qgroup_status_generation(leaf
, ptr
, trans
->transid
);
950 btrfs_set_qgroup_status_version(leaf
, ptr
, BTRFS_QGROUP_STATUS_VERSION
);
951 fs_info
->qgroup_flags
= BTRFS_QGROUP_STATUS_FLAG_ON
|
952 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
953 btrfs_set_qgroup_status_flags(leaf
, ptr
, fs_info
->qgroup_flags
);
954 btrfs_set_qgroup_status_rescan(leaf
, ptr
, 0);
956 btrfs_mark_buffer_dirty(leaf
);
959 key
.type
= BTRFS_ROOT_REF_KEY
;
962 btrfs_release_path(path
);
963 ret
= btrfs_search_slot_for_read(tree_root
, &key
, path
, 1, 0);
967 btrfs_abort_transaction(trans
, ret
);
972 slot
= path
->slots
[0];
973 leaf
= path
->nodes
[0];
974 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
976 if (found_key
.type
== BTRFS_ROOT_REF_KEY
) {
977 ret
= add_qgroup_item(trans
, quota_root
,
980 btrfs_abort_transaction(trans
, ret
);
984 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
985 if (IS_ERR(qgroup
)) {
986 ret
= PTR_ERR(qgroup
);
987 btrfs_abort_transaction(trans
, ret
);
991 ret
= btrfs_next_item(tree_root
, path
);
993 btrfs_abort_transaction(trans
, ret
);
1001 btrfs_release_path(path
);
1002 ret
= add_qgroup_item(trans
, quota_root
, BTRFS_FS_TREE_OBJECTID
);
1004 btrfs_abort_transaction(trans
, ret
);
1008 qgroup
= add_qgroup_rb(fs_info
, BTRFS_FS_TREE_OBJECTID
);
1009 if (IS_ERR(qgroup
)) {
1010 ret
= PTR_ERR(qgroup
);
1011 btrfs_abort_transaction(trans
, ret
);
1015 ret
= btrfs_commit_transaction(trans
);
1021 * Set quota enabled flag after committing the transaction, to avoid
1022 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1025 spin_lock(&fs_info
->qgroup_lock
);
1026 fs_info
->quota_root
= quota_root
;
1027 set_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1028 spin_unlock(&fs_info
->qgroup_lock
);
1030 ret
= qgroup_rescan_init(fs_info
, 0, 1);
1032 qgroup_rescan_zero_tracking(fs_info
);
1033 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
1034 &fs_info
->qgroup_rescan_work
);
1038 btrfs_free_path(path
);
1041 free_extent_buffer(quota_root
->node
);
1042 free_extent_buffer(quota_root
->commit_root
);
1047 ulist_free(fs_info
->qgroup_ulist
);
1048 fs_info
->qgroup_ulist
= NULL
;
1050 btrfs_end_transaction(trans
);
1052 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1056 int btrfs_quota_disable(struct btrfs_fs_info
*fs_info
)
1058 struct btrfs_root
*quota_root
;
1059 struct btrfs_trans_handle
*trans
= NULL
;
1062 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1063 if (!fs_info
->quota_root
)
1067 * 1 For the root item
1069 * We should also reserve enough items for the quota tree deletion in
1070 * btrfs_clean_quota_tree but this is not done.
1072 trans
= btrfs_start_transaction(fs_info
->tree_root
, 1);
1073 if (IS_ERR(trans
)) {
1074 ret
= PTR_ERR(trans
);
1078 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1079 btrfs_qgroup_wait_for_completion(fs_info
, false);
1080 spin_lock(&fs_info
->qgroup_lock
);
1081 quota_root
= fs_info
->quota_root
;
1082 fs_info
->quota_root
= NULL
;
1083 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
1084 spin_unlock(&fs_info
->qgroup_lock
);
1086 btrfs_free_qgroup_config(fs_info
);
1088 ret
= btrfs_clean_quota_tree(trans
, quota_root
);
1090 btrfs_abort_transaction(trans
, ret
);
1094 ret
= btrfs_del_root(trans
, "a_root
->root_key
);
1096 btrfs_abort_transaction(trans
, ret
);
1100 list_del("a_root
->dirty_list
);
1102 btrfs_tree_lock(quota_root
->node
);
1103 btrfs_clean_tree_block(quota_root
->node
);
1104 btrfs_tree_unlock(quota_root
->node
);
1105 btrfs_free_tree_block(trans
, quota_root
, quota_root
->node
, 0, 1);
1107 free_extent_buffer(quota_root
->node
);
1108 free_extent_buffer(quota_root
->commit_root
);
1112 ret
= btrfs_end_transaction(trans
);
1114 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1118 static void qgroup_dirty(struct btrfs_fs_info
*fs_info
,
1119 struct btrfs_qgroup
*qgroup
)
1121 if (list_empty(&qgroup
->dirty
))
1122 list_add(&qgroup
->dirty
, &fs_info
->dirty_qgroups
);
1126 * The easy accounting, we're updating qgroup relationship whose child qgroup
1127 * only has exclusive extents.
1129 * In this case, all exclusive extents will also be exclusive for parent, so
1130 * excl/rfer just get added/removed.
1132 * So is qgroup reservation space, which should also be added/removed to
1134 * Or when child tries to release reservation space, parent will underflow its
1135 * reservation (for relationship adding case).
1137 * Caller should hold fs_info->qgroup_lock.
1139 static int __qgroup_excl_accounting(struct btrfs_fs_info
*fs_info
,
1140 struct ulist
*tmp
, u64 ref_root
,
1141 struct btrfs_qgroup
*src
, int sign
)
1143 struct btrfs_qgroup
*qgroup
;
1144 struct btrfs_qgroup_list
*glist
;
1145 struct ulist_node
*unode
;
1146 struct ulist_iterator uiter
;
1147 u64 num_bytes
= src
->excl
;
1150 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
1154 qgroup
->rfer
+= sign
* num_bytes
;
1155 qgroup
->rfer_cmpr
+= sign
* num_bytes
;
1157 WARN_ON(sign
< 0 && qgroup
->excl
< num_bytes
);
1158 qgroup
->excl
+= sign
* num_bytes
;
1159 qgroup
->excl_cmpr
+= sign
* num_bytes
;
1162 qgroup_rsv_add_by_qgroup(fs_info
, qgroup
, src
);
1164 qgroup_rsv_release_by_qgroup(fs_info
, qgroup
, src
);
1166 qgroup_dirty(fs_info
, qgroup
);
1168 /* Get all of the parent groups that contain this qgroup */
1169 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1170 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1171 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
1176 /* Iterate all of the parents and adjust their reference counts */
1177 ULIST_ITER_INIT(&uiter
);
1178 while ((unode
= ulist_next(tmp
, &uiter
))) {
1179 qgroup
= unode_aux_to_qgroup(unode
);
1180 qgroup
->rfer
+= sign
* num_bytes
;
1181 qgroup
->rfer_cmpr
+= sign
* num_bytes
;
1182 WARN_ON(sign
< 0 && qgroup
->excl
< num_bytes
);
1183 qgroup
->excl
+= sign
* num_bytes
;
1185 qgroup_rsv_add_by_qgroup(fs_info
, qgroup
, src
);
1187 qgroup_rsv_release_by_qgroup(fs_info
, qgroup
, src
);
1188 qgroup
->excl_cmpr
+= sign
* num_bytes
;
1189 qgroup_dirty(fs_info
, qgroup
);
1191 /* Add any parents of the parents */
1192 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1193 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1194 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
1206 * Quick path for updating qgroup with only excl refs.
1208 * In that case, just update all parent will be enough.
1209 * Or we needs to do a full rescan.
1210 * Caller should also hold fs_info->qgroup_lock.
1212 * Return 0 for quick update, return >0 for need to full rescan
1213 * and mark INCONSISTENT flag.
1214 * Return < 0 for other error.
1216 static int quick_update_accounting(struct btrfs_fs_info
*fs_info
,
1217 struct ulist
*tmp
, u64 src
, u64 dst
,
1220 struct btrfs_qgroup
*qgroup
;
1224 qgroup
= find_qgroup_rb(fs_info
, src
);
1227 if (qgroup
->excl
== qgroup
->rfer
) {
1229 err
= __qgroup_excl_accounting(fs_info
, tmp
, dst
,
1238 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1242 int btrfs_add_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1245 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1246 struct btrfs_root
*quota_root
;
1247 struct btrfs_qgroup
*parent
;
1248 struct btrfs_qgroup
*member
;
1249 struct btrfs_qgroup_list
*list
;
1253 /* Check the level of src and dst first */
1254 if (btrfs_qgroup_level(src
) >= btrfs_qgroup_level(dst
))
1257 tmp
= ulist_alloc(GFP_KERNEL
);
1261 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1262 quota_root
= fs_info
->quota_root
;
1267 member
= find_qgroup_rb(fs_info
, src
);
1268 parent
= find_qgroup_rb(fs_info
, dst
);
1269 if (!member
|| !parent
) {
1274 /* check if such qgroup relation exist firstly */
1275 list_for_each_entry(list
, &member
->groups
, next_group
) {
1276 if (list
->group
== parent
) {
1282 ret
= add_qgroup_relation_item(trans
, src
, dst
);
1286 ret
= add_qgroup_relation_item(trans
, dst
, src
);
1288 del_qgroup_relation_item(trans
, src
, dst
);
1292 spin_lock(&fs_info
->qgroup_lock
);
1293 ret
= add_relation_rb(fs_info
, src
, dst
);
1295 spin_unlock(&fs_info
->qgroup_lock
);
1298 ret
= quick_update_accounting(fs_info
, tmp
, src
, dst
, 1);
1299 spin_unlock(&fs_info
->qgroup_lock
);
1301 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1306 static int __del_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1309 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1310 struct btrfs_root
*quota_root
;
1311 struct btrfs_qgroup
*parent
;
1312 struct btrfs_qgroup
*member
;
1313 struct btrfs_qgroup_list
*list
;
1318 tmp
= ulist_alloc(GFP_KERNEL
);
1322 quota_root
= fs_info
->quota_root
;
1328 member
= find_qgroup_rb(fs_info
, src
);
1329 parent
= find_qgroup_rb(fs_info
, dst
);
1330 if (!member
|| !parent
) {
1335 /* check if such qgroup relation exist firstly */
1336 list_for_each_entry(list
, &member
->groups
, next_group
) {
1337 if (list
->group
== parent
)
1343 ret
= del_qgroup_relation_item(trans
, src
, dst
);
1344 err
= del_qgroup_relation_item(trans
, dst
, src
);
1348 spin_lock(&fs_info
->qgroup_lock
);
1349 del_relation_rb(fs_info
, src
, dst
);
1350 ret
= quick_update_accounting(fs_info
, tmp
, src
, dst
, -1);
1351 spin_unlock(&fs_info
->qgroup_lock
);
1357 int btrfs_del_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1360 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1363 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1364 ret
= __del_qgroup_relation(trans
, src
, dst
);
1365 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1370 int btrfs_create_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
1372 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1373 struct btrfs_root
*quota_root
;
1374 struct btrfs_qgroup
*qgroup
;
1377 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1378 quota_root
= fs_info
->quota_root
;
1383 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1389 ret
= add_qgroup_item(trans
, quota_root
, qgroupid
);
1393 spin_lock(&fs_info
->qgroup_lock
);
1394 qgroup
= add_qgroup_rb(fs_info
, qgroupid
);
1395 spin_unlock(&fs_info
->qgroup_lock
);
1398 ret
= PTR_ERR(qgroup
);
1400 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1404 int btrfs_remove_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
1406 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1407 struct btrfs_root
*quota_root
;
1408 struct btrfs_qgroup
*qgroup
;
1409 struct btrfs_qgroup_list
*list
;
1412 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1413 quota_root
= fs_info
->quota_root
;
1419 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1425 /* Check if there are no children of this qgroup */
1426 if (!list_empty(&qgroup
->members
)) {
1431 ret
= del_qgroup_item(trans
, qgroupid
);
1432 if (ret
&& ret
!= -ENOENT
)
1435 while (!list_empty(&qgroup
->groups
)) {
1436 list
= list_first_entry(&qgroup
->groups
,
1437 struct btrfs_qgroup_list
, next_group
);
1438 ret
= __del_qgroup_relation(trans
, qgroupid
,
1439 list
->group
->qgroupid
);
1444 spin_lock(&fs_info
->qgroup_lock
);
1445 del_qgroup_rb(fs_info
, qgroupid
);
1446 spin_unlock(&fs_info
->qgroup_lock
);
1448 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1452 int btrfs_limit_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
,
1453 struct btrfs_qgroup_limit
*limit
)
1455 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1456 struct btrfs_root
*quota_root
;
1457 struct btrfs_qgroup
*qgroup
;
1459 /* Sometimes we would want to clear the limit on this qgroup.
1460 * To meet this requirement, we treat the -1 as a special value
1461 * which tell kernel to clear the limit on this qgroup.
1463 const u64 CLEAR_VALUE
= -1;
1465 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1466 quota_root
= fs_info
->quota_root
;
1472 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1478 spin_lock(&fs_info
->qgroup_lock
);
1479 if (limit
->flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) {
1480 if (limit
->max_rfer
== CLEAR_VALUE
) {
1481 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_MAX_RFER
;
1482 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_MAX_RFER
;
1483 qgroup
->max_rfer
= 0;
1485 qgroup
->max_rfer
= limit
->max_rfer
;
1488 if (limit
->flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) {
1489 if (limit
->max_excl
== CLEAR_VALUE
) {
1490 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_MAX_EXCL
;
1491 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_MAX_EXCL
;
1492 qgroup
->max_excl
= 0;
1494 qgroup
->max_excl
= limit
->max_excl
;
1497 if (limit
->flags
& BTRFS_QGROUP_LIMIT_RSV_RFER
) {
1498 if (limit
->rsv_rfer
== CLEAR_VALUE
) {
1499 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_RSV_RFER
;
1500 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_RSV_RFER
;
1501 qgroup
->rsv_rfer
= 0;
1503 qgroup
->rsv_rfer
= limit
->rsv_rfer
;
1506 if (limit
->flags
& BTRFS_QGROUP_LIMIT_RSV_EXCL
) {
1507 if (limit
->rsv_excl
== CLEAR_VALUE
) {
1508 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_RSV_EXCL
;
1509 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_RSV_EXCL
;
1510 qgroup
->rsv_excl
= 0;
1512 qgroup
->rsv_excl
= limit
->rsv_excl
;
1515 qgroup
->lim_flags
|= limit
->flags
;
1517 spin_unlock(&fs_info
->qgroup_lock
);
1519 ret
= update_qgroup_limit_item(trans
, qgroup
);
1521 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1522 btrfs_info(fs_info
, "unable to update quota limit for %llu",
1527 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1531 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info
*fs_info
,
1532 struct btrfs_delayed_ref_root
*delayed_refs
,
1533 struct btrfs_qgroup_extent_record
*record
)
1535 struct rb_node
**p
= &delayed_refs
->dirty_extent_root
.rb_node
;
1536 struct rb_node
*parent_node
= NULL
;
1537 struct btrfs_qgroup_extent_record
*entry
;
1538 u64 bytenr
= record
->bytenr
;
1540 lockdep_assert_held(&delayed_refs
->lock
);
1541 trace_btrfs_qgroup_trace_extent(fs_info
, record
);
1545 entry
= rb_entry(parent_node
, struct btrfs_qgroup_extent_record
,
1547 if (bytenr
< entry
->bytenr
) {
1549 } else if (bytenr
> entry
->bytenr
) {
1550 p
= &(*p
)->rb_right
;
1552 if (record
->data_rsv
&& !entry
->data_rsv
) {
1553 entry
->data_rsv
= record
->data_rsv
;
1554 entry
->data_rsv_refroot
=
1555 record
->data_rsv_refroot
;
1561 rb_link_node(&record
->node
, parent_node
, p
);
1562 rb_insert_color(&record
->node
, &delayed_refs
->dirty_extent_root
);
1566 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info
*fs_info
,
1567 struct btrfs_qgroup_extent_record
*qrecord
)
1569 struct ulist
*old_root
;
1570 u64 bytenr
= qrecord
->bytenr
;
1573 ret
= btrfs_find_all_roots(NULL
, fs_info
, bytenr
, 0, &old_root
, false);
1575 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1577 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1583 * Here we don't need to get the lock of
1584 * trans->transaction->delayed_refs, since inserted qrecord won't
1585 * be deleted, only qrecord->node may be modified (new qrecord insert)
1587 * So modifying qrecord->old_roots is safe here
1589 qrecord
->old_roots
= old_root
;
1593 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle
*trans
, u64 bytenr
,
1594 u64 num_bytes
, gfp_t gfp_flag
)
1596 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1597 struct btrfs_qgroup_extent_record
*record
;
1598 struct btrfs_delayed_ref_root
*delayed_refs
;
1601 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)
1602 || bytenr
== 0 || num_bytes
== 0)
1604 record
= kzalloc(sizeof(*record
), gfp_flag
);
1608 delayed_refs
= &trans
->transaction
->delayed_refs
;
1609 record
->bytenr
= bytenr
;
1610 record
->num_bytes
= num_bytes
;
1611 record
->old_roots
= NULL
;
1613 spin_lock(&delayed_refs
->lock
);
1614 ret
= btrfs_qgroup_trace_extent_nolock(fs_info
, delayed_refs
, record
);
1615 spin_unlock(&delayed_refs
->lock
);
1620 return btrfs_qgroup_trace_extent_post(fs_info
, record
);
1623 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle
*trans
,
1624 struct extent_buffer
*eb
)
1626 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1627 int nr
= btrfs_header_nritems(eb
);
1628 int i
, extent_type
, ret
;
1629 struct btrfs_key key
;
1630 struct btrfs_file_extent_item
*fi
;
1631 u64 bytenr
, num_bytes
;
1633 /* We can be called directly from walk_up_proc() */
1634 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
1637 for (i
= 0; i
< nr
; i
++) {
1638 btrfs_item_key_to_cpu(eb
, &key
, i
);
1640 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1643 fi
= btrfs_item_ptr(eb
, i
, struct btrfs_file_extent_item
);
1644 /* filter out non qgroup-accountable extents */
1645 extent_type
= btrfs_file_extent_type(eb
, fi
);
1647 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
1650 bytenr
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1654 num_bytes
= btrfs_file_extent_disk_num_bytes(eb
, fi
);
1656 ret
= btrfs_qgroup_trace_extent(trans
, bytenr
, num_bytes
,
1666 * Walk up the tree from the bottom, freeing leaves and any interior
1667 * nodes which have had all slots visited. If a node (leaf or
1668 * interior) is freed, the node above it will have it's slot
1669 * incremented. The root node will never be freed.
1671 * At the end of this function, we should have a path which has all
1672 * slots incremented to the next position for a search. If we need to
1673 * read a new node it will be NULL and the node above it will have the
1674 * correct slot selected for a later read.
1676 * If we increment the root nodes slot counter past the number of
1677 * elements, 1 is returned to signal completion of the search.
1679 static int adjust_slots_upwards(struct btrfs_path
*path
, int root_level
)
1683 struct extent_buffer
*eb
;
1685 if (root_level
== 0)
1688 while (level
<= root_level
) {
1689 eb
= path
->nodes
[level
];
1690 nr
= btrfs_header_nritems(eb
);
1691 path
->slots
[level
]++;
1692 slot
= path
->slots
[level
];
1693 if (slot
>= nr
|| level
== 0) {
1695 * Don't free the root - we will detect this
1696 * condition after our loop and return a
1697 * positive value for caller to stop walking the tree.
1699 if (level
!= root_level
) {
1700 btrfs_tree_unlock_rw(eb
, path
->locks
[level
]);
1701 path
->locks
[level
] = 0;
1703 free_extent_buffer(eb
);
1704 path
->nodes
[level
] = NULL
;
1705 path
->slots
[level
] = 0;
1709 * We have a valid slot to walk back down
1710 * from. Stop here so caller can process these
1719 eb
= path
->nodes
[root_level
];
1720 if (path
->slots
[root_level
] >= btrfs_header_nritems(eb
))
1727 * Helper function to trace a subtree tree block swap.
1729 * The swap will happen in highest tree block, but there may be a lot of
1730 * tree blocks involved.
1733 * OO = Old tree blocks
1734 * NN = New tree blocks allocated during balance
1736 * File tree (257) Reloc tree for 257
1739 * L1 OO OO (a) OO NN (a)
1741 * L0 OO OO OO OO OO OO NN NN
1744 * When calling qgroup_trace_extent_swap(), we will pass:
1746 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
1750 * In that case, qgroup_trace_extent_swap() will search from OO(a) to
1751 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
1753 * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
1755 * 1) Tree search from @src_eb
1756 * It should acts as a simplified btrfs_search_slot().
1757 * The key for search can be extracted from @dst_path->nodes[dst_level]
1760 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
1761 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1762 * They should be marked during previous (@dst_level = 1) iteration.
1764 * 3) Mark file extents in leaves dirty
1765 * We don't have good way to pick out new file extents only.
1766 * So we still follow the old method by scanning all file extents in
1769 * This function can free us from keeping two paths, thus later we only need
1770 * to care about how to iterate all new tree blocks in reloc tree.
1772 static int qgroup_trace_extent_swap(struct btrfs_trans_handle
* trans
,
1773 struct extent_buffer
*src_eb
,
1774 struct btrfs_path
*dst_path
,
1775 int dst_level
, int root_level
,
1778 struct btrfs_key key
;
1779 struct btrfs_path
*src_path
;
1780 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1781 u32 nodesize
= fs_info
->nodesize
;
1782 int cur_level
= root_level
;
1785 BUG_ON(dst_level
> root_level
);
1786 /* Level mismatch */
1787 if (btrfs_header_level(src_eb
) != root_level
)
1790 src_path
= btrfs_alloc_path();
1797 btrfs_node_key_to_cpu(dst_path
->nodes
[dst_level
], &key
, 0);
1799 btrfs_item_key_to_cpu(dst_path
->nodes
[dst_level
], &key
, 0);
1802 extent_buffer_get(src_eb
);
1803 src_path
->nodes
[root_level
] = src_eb
;
1804 src_path
->slots
[root_level
] = dst_path
->slots
[root_level
];
1805 src_path
->locks
[root_level
] = 0;
1807 /* A simplified version of btrfs_search_slot() */
1808 while (cur_level
>= dst_level
) {
1809 struct btrfs_key src_key
;
1810 struct btrfs_key dst_key
;
1812 if (src_path
->nodes
[cur_level
] == NULL
) {
1813 struct btrfs_key first_key
;
1814 struct extent_buffer
*eb
;
1819 eb
= src_path
->nodes
[cur_level
+ 1];
1820 parent_slot
= src_path
->slots
[cur_level
+ 1];
1821 child_bytenr
= btrfs_node_blockptr(eb
, parent_slot
);
1822 child_gen
= btrfs_node_ptr_generation(eb
, parent_slot
);
1823 btrfs_node_key_to_cpu(eb
, &first_key
, parent_slot
);
1825 eb
= read_tree_block(fs_info
, child_bytenr
, child_gen
,
1826 cur_level
, &first_key
);
1830 } else if (!extent_buffer_uptodate(eb
)) {
1831 free_extent_buffer(eb
);
1836 src_path
->nodes
[cur_level
] = eb
;
1838 btrfs_tree_read_lock(eb
);
1839 btrfs_set_lock_blocking_read(eb
);
1840 src_path
->locks
[cur_level
] = BTRFS_READ_LOCK_BLOCKING
;
1843 src_path
->slots
[cur_level
] = dst_path
->slots
[cur_level
];
1845 btrfs_node_key_to_cpu(dst_path
->nodes
[cur_level
],
1846 &dst_key
, dst_path
->slots
[cur_level
]);
1847 btrfs_node_key_to_cpu(src_path
->nodes
[cur_level
],
1848 &src_key
, src_path
->slots
[cur_level
]);
1850 btrfs_item_key_to_cpu(dst_path
->nodes
[cur_level
],
1851 &dst_key
, dst_path
->slots
[cur_level
]);
1852 btrfs_item_key_to_cpu(src_path
->nodes
[cur_level
],
1853 &src_key
, src_path
->slots
[cur_level
]);
1855 /* Content mismatch, something went wrong */
1856 if (btrfs_comp_cpu_keys(&dst_key
, &src_key
)) {
1864 * Now both @dst_path and @src_path have been populated, record the tree
1865 * blocks for qgroup accounting.
1867 ret
= btrfs_qgroup_trace_extent(trans
, src_path
->nodes
[dst_level
]->start
,
1868 nodesize
, GFP_NOFS
);
1871 ret
= btrfs_qgroup_trace_extent(trans
,
1872 dst_path
->nodes
[dst_level
]->start
,
1873 nodesize
, GFP_NOFS
);
1877 /* Record leaf file extents */
1878 if (dst_level
== 0 && trace_leaf
) {
1879 ret
= btrfs_qgroup_trace_leaf_items(trans
, src_path
->nodes
[0]);
1882 ret
= btrfs_qgroup_trace_leaf_items(trans
, dst_path
->nodes
[0]);
1885 btrfs_free_path(src_path
);
1890 * Helper function to do recursive generation-aware depth-first search, to
1891 * locate all new tree blocks in a subtree of reloc tree.
1893 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
1902 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
1906 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
1907 * above tree blocks along with their counter parts in file tree.
1908 * While during search, old tree blocks OO(c) will be skipped as tree block swap
1909 * won't affect OO(c).
1911 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle
* trans
,
1912 struct extent_buffer
*src_eb
,
1913 struct btrfs_path
*dst_path
,
1914 int cur_level
, int root_level
,
1915 u64 last_snapshot
, bool trace_leaf
)
1917 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1918 struct extent_buffer
*eb
;
1919 bool need_cleanup
= false;
1923 /* Level sanity check */
1924 if (cur_level
< 0 || cur_level
>= BTRFS_MAX_LEVEL
- 1 ||
1925 root_level
< 0 || root_level
>= BTRFS_MAX_LEVEL
- 1 ||
1926 root_level
< cur_level
) {
1927 btrfs_err_rl(fs_info
,
1928 "%s: bad levels, cur_level=%d root_level=%d",
1929 __func__
, cur_level
, root_level
);
1933 /* Read the tree block if needed */
1934 if (dst_path
->nodes
[cur_level
] == NULL
) {
1935 struct btrfs_key first_key
;
1941 * dst_path->nodes[root_level] must be initialized before
1942 * calling this function.
1944 if (cur_level
== root_level
) {
1945 btrfs_err_rl(fs_info
,
1946 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
1947 __func__
, root_level
, root_level
, cur_level
);
1952 * We need to get child blockptr/gen from parent before we can
1955 eb
= dst_path
->nodes
[cur_level
+ 1];
1956 parent_slot
= dst_path
->slots
[cur_level
+ 1];
1957 child_bytenr
= btrfs_node_blockptr(eb
, parent_slot
);
1958 child_gen
= btrfs_node_ptr_generation(eb
, parent_slot
);
1959 btrfs_node_key_to_cpu(eb
, &first_key
, parent_slot
);
1961 /* This node is old, no need to trace */
1962 if (child_gen
< last_snapshot
)
1965 eb
= read_tree_block(fs_info
, child_bytenr
, child_gen
,
1966 cur_level
, &first_key
);
1970 } else if (!extent_buffer_uptodate(eb
)) {
1971 free_extent_buffer(eb
);
1976 dst_path
->nodes
[cur_level
] = eb
;
1977 dst_path
->slots
[cur_level
] = 0;
1979 btrfs_tree_read_lock(eb
);
1980 btrfs_set_lock_blocking_read(eb
);
1981 dst_path
->locks
[cur_level
] = BTRFS_READ_LOCK_BLOCKING
;
1982 need_cleanup
= true;
1985 /* Now record this tree block and its counter part for qgroups */
1986 ret
= qgroup_trace_extent_swap(trans
, src_eb
, dst_path
, cur_level
,
1987 root_level
, trace_leaf
);
1991 eb
= dst_path
->nodes
[cur_level
];
1993 if (cur_level
> 0) {
1994 /* Iterate all child tree blocks */
1995 for (i
= 0; i
< btrfs_header_nritems(eb
); i
++) {
1996 /* Skip old tree blocks as they won't be swapped */
1997 if (btrfs_node_ptr_generation(eb
, i
) < last_snapshot
)
1999 dst_path
->slots
[cur_level
] = i
;
2001 /* Recursive call (at most 7 times) */
2002 ret
= qgroup_trace_new_subtree_blocks(trans
, src_eb
,
2003 dst_path
, cur_level
- 1, root_level
,
2004 last_snapshot
, trace_leaf
);
2013 btrfs_tree_unlock_rw(dst_path
->nodes
[cur_level
],
2014 dst_path
->locks
[cur_level
]);
2015 free_extent_buffer(dst_path
->nodes
[cur_level
]);
2016 dst_path
->nodes
[cur_level
] = NULL
;
2017 dst_path
->slots
[cur_level
] = 0;
2018 dst_path
->locks
[cur_level
] = 0;
2024 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle
*trans
,
2025 struct extent_buffer
*src_eb
,
2026 struct extent_buffer
*dst_eb
,
2027 u64 last_snapshot
, bool trace_leaf
)
2029 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2030 struct btrfs_path
*dst_path
= NULL
;
2034 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2037 /* Wrong parameter order */
2038 if (btrfs_header_generation(src_eb
) > btrfs_header_generation(dst_eb
)) {
2039 btrfs_err_rl(fs_info
,
2040 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__
,
2041 btrfs_header_generation(src_eb
),
2042 btrfs_header_generation(dst_eb
));
2046 if (!extent_buffer_uptodate(src_eb
) || !extent_buffer_uptodate(dst_eb
)) {
2051 level
= btrfs_header_level(dst_eb
);
2052 dst_path
= btrfs_alloc_path();
2058 extent_buffer_get(dst_eb
);
2059 dst_path
->nodes
[level
] = dst_eb
;
2060 dst_path
->slots
[level
] = 0;
2061 dst_path
->locks
[level
] = 0;
2063 /* Do the generation aware breadth-first search */
2064 ret
= qgroup_trace_new_subtree_blocks(trans
, src_eb
, dst_path
, level
,
2065 level
, last_snapshot
, trace_leaf
);
2071 btrfs_free_path(dst_path
);
2073 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2077 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle
*trans
,
2078 struct extent_buffer
*root_eb
,
2079 u64 root_gen
, int root_level
)
2081 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2084 struct extent_buffer
*eb
= root_eb
;
2085 struct btrfs_path
*path
= NULL
;
2087 BUG_ON(root_level
< 0 || root_level
>= BTRFS_MAX_LEVEL
);
2088 BUG_ON(root_eb
== NULL
);
2090 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2093 if (!extent_buffer_uptodate(root_eb
)) {
2094 ret
= btrfs_read_buffer(root_eb
, root_gen
, root_level
, NULL
);
2099 if (root_level
== 0) {
2100 ret
= btrfs_qgroup_trace_leaf_items(trans
, root_eb
);
2104 path
= btrfs_alloc_path();
2109 * Walk down the tree. Missing extent blocks are filled in as
2110 * we go. Metadata is accounted every time we read a new
2113 * When we reach a leaf, we account for file extent items in it,
2114 * walk back up the tree (adjusting slot pointers as we go)
2115 * and restart the search process.
2117 extent_buffer_get(root_eb
); /* For path */
2118 path
->nodes
[root_level
] = root_eb
;
2119 path
->slots
[root_level
] = 0;
2120 path
->locks
[root_level
] = 0; /* so release_path doesn't try to unlock */
2123 while (level
>= 0) {
2124 if (path
->nodes
[level
] == NULL
) {
2125 struct btrfs_key first_key
;
2131 * We need to get child blockptr/gen from parent before
2134 eb
= path
->nodes
[level
+ 1];
2135 parent_slot
= path
->slots
[level
+ 1];
2136 child_bytenr
= btrfs_node_blockptr(eb
, parent_slot
);
2137 child_gen
= btrfs_node_ptr_generation(eb
, parent_slot
);
2138 btrfs_node_key_to_cpu(eb
, &first_key
, parent_slot
);
2140 eb
= read_tree_block(fs_info
, child_bytenr
, child_gen
,
2145 } else if (!extent_buffer_uptodate(eb
)) {
2146 free_extent_buffer(eb
);
2151 path
->nodes
[level
] = eb
;
2152 path
->slots
[level
] = 0;
2154 btrfs_tree_read_lock(eb
);
2155 btrfs_set_lock_blocking_read(eb
);
2156 path
->locks
[level
] = BTRFS_READ_LOCK_BLOCKING
;
2158 ret
= btrfs_qgroup_trace_extent(trans
, child_bytenr
,
2166 ret
= btrfs_qgroup_trace_leaf_items(trans
,
2167 path
->nodes
[level
]);
2171 /* Nonzero return here means we completed our search */
2172 ret
= adjust_slots_upwards(path
, root_level
);
2176 /* Restart search with new slots */
2185 btrfs_free_path(path
);
2190 #define UPDATE_NEW 0
2191 #define UPDATE_OLD 1
2193 * Walk all of the roots that points to the bytenr and adjust their refcnts.
2195 static int qgroup_update_refcnt(struct btrfs_fs_info
*fs_info
,
2196 struct ulist
*roots
, struct ulist
*tmp
,
2197 struct ulist
*qgroups
, u64 seq
, int update_old
)
2199 struct ulist_node
*unode
;
2200 struct ulist_iterator uiter
;
2201 struct ulist_node
*tmp_unode
;
2202 struct ulist_iterator tmp_uiter
;
2203 struct btrfs_qgroup
*qg
;
2208 ULIST_ITER_INIT(&uiter
);
2209 while ((unode
= ulist_next(roots
, &uiter
))) {
2210 qg
= find_qgroup_rb(fs_info
, unode
->val
);
2215 ret
= ulist_add(qgroups
, qg
->qgroupid
, qgroup_to_aux(qg
),
2219 ret
= ulist_add(tmp
, qg
->qgroupid
, qgroup_to_aux(qg
), GFP_ATOMIC
);
2222 ULIST_ITER_INIT(&tmp_uiter
);
2223 while ((tmp_unode
= ulist_next(tmp
, &tmp_uiter
))) {
2224 struct btrfs_qgroup_list
*glist
;
2226 qg
= unode_aux_to_qgroup(tmp_unode
);
2228 btrfs_qgroup_update_old_refcnt(qg
, seq
, 1);
2230 btrfs_qgroup_update_new_refcnt(qg
, seq
, 1);
2231 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2232 ret
= ulist_add(qgroups
, glist
->group
->qgroupid
,
2233 qgroup_to_aux(glist
->group
),
2237 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
2238 qgroup_to_aux(glist
->group
),
2249 * Update qgroup rfer/excl counters.
2250 * Rfer update is easy, codes can explain themselves.
2252 * Excl update is tricky, the update is split into 2 part.
2253 * Part 1: Possible exclusive <-> sharing detect:
2255 * -------------------------------------
2257 * -------------------------------------
2259 * -------------------------------------
2262 * A: cur_old_roots < nr_old_roots (not exclusive before)
2263 * !A: cur_old_roots == nr_old_roots (possible exclusive before)
2264 * B: cur_new_roots < nr_new_roots (not exclusive now)
2265 * !B: cur_new_roots == nr_new_roots (possible exclusive now)
2268 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing
2269 * *: Definitely not changed. **: Possible unchanged.
2271 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2273 * To make the logic clear, we first use condition A and B to split
2274 * combination into 4 results.
2276 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2277 * only on variant maybe 0.
2279 * Lastly, check result **, since there are 2 variants maybe 0, split them
2281 * But this time we don't need to consider other things, the codes and logic
2282 * is easy to understand now.
2284 static int qgroup_update_counters(struct btrfs_fs_info
*fs_info
,
2285 struct ulist
*qgroups
,
2288 u64 num_bytes
, u64 seq
)
2290 struct ulist_node
*unode
;
2291 struct ulist_iterator uiter
;
2292 struct btrfs_qgroup
*qg
;
2293 u64 cur_new_count
, cur_old_count
;
2295 ULIST_ITER_INIT(&uiter
);
2296 while ((unode
= ulist_next(qgroups
, &uiter
))) {
2299 qg
= unode_aux_to_qgroup(unode
);
2300 cur_old_count
= btrfs_qgroup_get_old_refcnt(qg
, seq
);
2301 cur_new_count
= btrfs_qgroup_get_new_refcnt(qg
, seq
);
2303 trace_qgroup_update_counters(fs_info
, qg
, cur_old_count
,
2306 /* Rfer update part */
2307 if (cur_old_count
== 0 && cur_new_count
> 0) {
2308 qg
->rfer
+= num_bytes
;
2309 qg
->rfer_cmpr
+= num_bytes
;
2312 if (cur_old_count
> 0 && cur_new_count
== 0) {
2313 qg
->rfer
-= num_bytes
;
2314 qg
->rfer_cmpr
-= num_bytes
;
2318 /* Excl update part */
2319 /* Exclusive/none -> shared case */
2320 if (cur_old_count
== nr_old_roots
&&
2321 cur_new_count
< nr_new_roots
) {
2322 /* Exclusive -> shared */
2323 if (cur_old_count
!= 0) {
2324 qg
->excl
-= num_bytes
;
2325 qg
->excl_cmpr
-= num_bytes
;
2330 /* Shared -> exclusive/none case */
2331 if (cur_old_count
< nr_old_roots
&&
2332 cur_new_count
== nr_new_roots
) {
2333 /* Shared->exclusive */
2334 if (cur_new_count
!= 0) {
2335 qg
->excl
+= num_bytes
;
2336 qg
->excl_cmpr
+= num_bytes
;
2341 /* Exclusive/none -> exclusive/none case */
2342 if (cur_old_count
== nr_old_roots
&&
2343 cur_new_count
== nr_new_roots
) {
2344 if (cur_old_count
== 0) {
2345 /* None -> exclusive/none */
2347 if (cur_new_count
!= 0) {
2348 /* None -> exclusive */
2349 qg
->excl
+= num_bytes
;
2350 qg
->excl_cmpr
+= num_bytes
;
2353 /* None -> none, nothing changed */
2355 /* Exclusive -> exclusive/none */
2357 if (cur_new_count
== 0) {
2358 /* Exclusive -> none */
2359 qg
->excl
-= num_bytes
;
2360 qg
->excl_cmpr
-= num_bytes
;
2363 /* Exclusive -> exclusive, nothing changed */
2368 qgroup_dirty(fs_info
, qg
);
2374 * Check if the @roots potentially is a list of fs tree roots
2376 * Return 0 for definitely not a fs/subvol tree roots ulist
2377 * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2380 static int maybe_fs_roots(struct ulist
*roots
)
2382 struct ulist_node
*unode
;
2383 struct ulist_iterator uiter
;
2385 /* Empty one, still possible for fs roots */
2386 if (!roots
|| roots
->nnodes
== 0)
2389 ULIST_ITER_INIT(&uiter
);
2390 unode
= ulist_next(roots
, &uiter
);
2395 * If it contains fs tree roots, then it must belong to fs/subvol
2397 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2399 return is_fstree(unode
->val
);
2402 int btrfs_qgroup_account_extent(struct btrfs_trans_handle
*trans
, u64 bytenr
,
2403 u64 num_bytes
, struct ulist
*old_roots
,
2404 struct ulist
*new_roots
)
2406 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2407 struct ulist
*qgroups
= NULL
;
2408 struct ulist
*tmp
= NULL
;
2410 u64 nr_new_roots
= 0;
2411 u64 nr_old_roots
= 0;
2414 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2418 if (!maybe_fs_roots(new_roots
))
2420 nr_new_roots
= new_roots
->nnodes
;
2423 if (!maybe_fs_roots(old_roots
))
2425 nr_old_roots
= old_roots
->nnodes
;
2428 /* Quick exit, either not fs tree roots, or won't affect any qgroup */
2429 if (nr_old_roots
== 0 && nr_new_roots
== 0)
2432 BUG_ON(!fs_info
->quota_root
);
2434 trace_btrfs_qgroup_account_extent(fs_info
, trans
->transid
, bytenr
,
2435 num_bytes
, nr_old_roots
, nr_new_roots
);
2437 qgroups
= ulist_alloc(GFP_NOFS
);
2442 tmp
= ulist_alloc(GFP_NOFS
);
2448 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2449 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
2450 if (fs_info
->qgroup_rescan_progress
.objectid
<= bytenr
) {
2451 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2456 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2458 spin_lock(&fs_info
->qgroup_lock
);
2459 seq
= fs_info
->qgroup_seq
;
2461 /* Update old refcnts using old_roots */
2462 ret
= qgroup_update_refcnt(fs_info
, old_roots
, tmp
, qgroups
, seq
,
2467 /* Update new refcnts using new_roots */
2468 ret
= qgroup_update_refcnt(fs_info
, new_roots
, tmp
, qgroups
, seq
,
2473 qgroup_update_counters(fs_info
, qgroups
, nr_old_roots
, nr_new_roots
,
2477 * Bump qgroup_seq to avoid seq overlap
2479 fs_info
->qgroup_seq
+= max(nr_old_roots
, nr_new_roots
) + 1;
2481 spin_unlock(&fs_info
->qgroup_lock
);
2484 ulist_free(qgroups
);
2485 ulist_free(old_roots
);
2486 ulist_free(new_roots
);
2490 int btrfs_qgroup_account_extents(struct btrfs_trans_handle
*trans
)
2492 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2493 struct btrfs_qgroup_extent_record
*record
;
2494 struct btrfs_delayed_ref_root
*delayed_refs
;
2495 struct ulist
*new_roots
= NULL
;
2496 struct rb_node
*node
;
2497 u64 num_dirty_extents
= 0;
2501 delayed_refs
= &trans
->transaction
->delayed_refs
;
2502 qgroup_to_skip
= delayed_refs
->qgroup_to_skip
;
2503 while ((node
= rb_first(&delayed_refs
->dirty_extent_root
))) {
2504 record
= rb_entry(node
, struct btrfs_qgroup_extent_record
,
2507 num_dirty_extents
++;
2508 trace_btrfs_qgroup_account_extents(fs_info
, record
);
2512 * Old roots should be searched when inserting qgroup
2515 if (WARN_ON(!record
->old_roots
)) {
2516 /* Search commit root to find old_roots */
2517 ret
= btrfs_find_all_roots(NULL
, fs_info
,
2519 &record
->old_roots
, false);
2524 /* Free the reserved data space */
2525 btrfs_qgroup_free_refroot(fs_info
,
2526 record
->data_rsv_refroot
,
2528 BTRFS_QGROUP_RSV_DATA
);
2530 * Use SEQ_LAST as time_seq to do special search, which
2531 * doesn't lock tree or delayed_refs and search current
2532 * root. It's safe inside commit_transaction().
2534 ret
= btrfs_find_all_roots(trans
, fs_info
,
2535 record
->bytenr
, SEQ_LAST
, &new_roots
, false);
2538 if (qgroup_to_skip
) {
2539 ulist_del(new_roots
, qgroup_to_skip
, 0);
2540 ulist_del(record
->old_roots
, qgroup_to_skip
,
2543 ret
= btrfs_qgroup_account_extent(trans
, record
->bytenr
,
2547 record
->old_roots
= NULL
;
2551 ulist_free(record
->old_roots
);
2552 ulist_free(new_roots
);
2554 rb_erase(node
, &delayed_refs
->dirty_extent_root
);
2558 trace_qgroup_num_dirty_extents(fs_info
, trans
->transid
,
2564 * called from commit_transaction. Writes all changed qgroups to disk.
2566 int btrfs_run_qgroups(struct btrfs_trans_handle
*trans
)
2568 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2569 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
2575 spin_lock(&fs_info
->qgroup_lock
);
2576 while (!list_empty(&fs_info
->dirty_qgroups
)) {
2577 struct btrfs_qgroup
*qgroup
;
2578 qgroup
= list_first_entry(&fs_info
->dirty_qgroups
,
2579 struct btrfs_qgroup
, dirty
);
2580 list_del_init(&qgroup
->dirty
);
2581 spin_unlock(&fs_info
->qgroup_lock
);
2582 ret
= update_qgroup_info_item(trans
, qgroup
);
2584 fs_info
->qgroup_flags
|=
2585 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2586 ret
= update_qgroup_limit_item(trans
, qgroup
);
2588 fs_info
->qgroup_flags
|=
2589 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2590 spin_lock(&fs_info
->qgroup_lock
);
2592 if (test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2593 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_ON
;
2595 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
2596 spin_unlock(&fs_info
->qgroup_lock
);
2598 ret
= update_qgroup_status_item(trans
);
2600 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2606 * Copy the accounting information between qgroups. This is necessary
2607 * when a snapshot or a subvolume is created. Throwing an error will
2608 * cause a transaction abort so we take extra care here to only error
2609 * when a readonly fs is a reasonable outcome.
2611 int btrfs_qgroup_inherit(struct btrfs_trans_handle
*trans
, u64 srcid
,
2612 u64 objectid
, struct btrfs_qgroup_inherit
*inherit
)
2617 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2618 struct btrfs_root
*quota_root
;
2619 struct btrfs_qgroup
*srcgroup
;
2620 struct btrfs_qgroup
*dstgroup
;
2624 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
2625 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2628 quota_root
= fs_info
->quota_root
;
2635 i_qgroups
= (u64
*)(inherit
+ 1);
2636 nums
= inherit
->num_qgroups
+ 2 * inherit
->num_ref_copies
+
2637 2 * inherit
->num_excl_copies
;
2638 for (i
= 0; i
< nums
; ++i
) {
2639 srcgroup
= find_qgroup_rb(fs_info
, *i_qgroups
);
2642 * Zero out invalid groups so we can ignore
2646 ((srcgroup
->qgroupid
>> 48) <= (objectid
>> 48)))
2654 * create a tracking group for the subvol itself
2656 ret
= add_qgroup_item(trans
, quota_root
, objectid
);
2661 * add qgroup to all inherited groups
2664 i_qgroups
= (u64
*)(inherit
+ 1);
2665 for (i
= 0; i
< inherit
->num_qgroups
; ++i
, ++i_qgroups
) {
2666 if (*i_qgroups
== 0)
2668 ret
= add_qgroup_relation_item(trans
, objectid
,
2670 if (ret
&& ret
!= -EEXIST
)
2672 ret
= add_qgroup_relation_item(trans
, *i_qgroups
,
2674 if (ret
&& ret
!= -EEXIST
)
2681 spin_lock(&fs_info
->qgroup_lock
);
2683 dstgroup
= add_qgroup_rb(fs_info
, objectid
);
2684 if (IS_ERR(dstgroup
)) {
2685 ret
= PTR_ERR(dstgroup
);
2689 if (inherit
&& inherit
->flags
& BTRFS_QGROUP_INHERIT_SET_LIMITS
) {
2690 dstgroup
->lim_flags
= inherit
->lim
.flags
;
2691 dstgroup
->max_rfer
= inherit
->lim
.max_rfer
;
2692 dstgroup
->max_excl
= inherit
->lim
.max_excl
;
2693 dstgroup
->rsv_rfer
= inherit
->lim
.rsv_rfer
;
2694 dstgroup
->rsv_excl
= inherit
->lim
.rsv_excl
;
2696 ret
= update_qgroup_limit_item(trans
, dstgroup
);
2698 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2700 "unable to update quota limit for %llu",
2701 dstgroup
->qgroupid
);
2707 srcgroup
= find_qgroup_rb(fs_info
, srcid
);
2712 * We call inherit after we clone the root in order to make sure
2713 * our counts don't go crazy, so at this point the only
2714 * difference between the two roots should be the root node.
2716 level_size
= fs_info
->nodesize
;
2717 dstgroup
->rfer
= srcgroup
->rfer
;
2718 dstgroup
->rfer_cmpr
= srcgroup
->rfer_cmpr
;
2719 dstgroup
->excl
= level_size
;
2720 dstgroup
->excl_cmpr
= level_size
;
2721 srcgroup
->excl
= level_size
;
2722 srcgroup
->excl_cmpr
= level_size
;
2724 /* inherit the limit info */
2725 dstgroup
->lim_flags
= srcgroup
->lim_flags
;
2726 dstgroup
->max_rfer
= srcgroup
->max_rfer
;
2727 dstgroup
->max_excl
= srcgroup
->max_excl
;
2728 dstgroup
->rsv_rfer
= srcgroup
->rsv_rfer
;
2729 dstgroup
->rsv_excl
= srcgroup
->rsv_excl
;
2731 qgroup_dirty(fs_info
, dstgroup
);
2732 qgroup_dirty(fs_info
, srcgroup
);
2738 i_qgroups
= (u64
*)(inherit
+ 1);
2739 for (i
= 0; i
< inherit
->num_qgroups
; ++i
) {
2741 ret
= add_relation_rb(fs_info
, objectid
, *i_qgroups
);
2748 for (i
= 0; i
< inherit
->num_ref_copies
; ++i
, i_qgroups
+= 2) {
2749 struct btrfs_qgroup
*src
;
2750 struct btrfs_qgroup
*dst
;
2752 if (!i_qgroups
[0] || !i_qgroups
[1])
2755 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2756 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2763 dst
->rfer
= src
->rfer
- level_size
;
2764 dst
->rfer_cmpr
= src
->rfer_cmpr
- level_size
;
2766 for (i
= 0; i
< inherit
->num_excl_copies
; ++i
, i_qgroups
+= 2) {
2767 struct btrfs_qgroup
*src
;
2768 struct btrfs_qgroup
*dst
;
2770 if (!i_qgroups
[0] || !i_qgroups
[1])
2773 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2774 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2781 dst
->excl
= src
->excl
+ level_size
;
2782 dst
->excl_cmpr
= src
->excl_cmpr
+ level_size
;
2786 spin_unlock(&fs_info
->qgroup_lock
);
2788 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
2793 * Two limits to commit transaction in advance.
2795 * For RATIO, it will be 1/RATIO of the remaining limit as threshold.
2796 * For SIZE, it will be in byte unit as threshold.
2798 #define QGROUP_FREE_RATIO 32
2799 #define QGROUP_FREE_SIZE SZ_32M
2800 static bool qgroup_check_limits(struct btrfs_fs_info
*fs_info
,
2801 const struct btrfs_qgroup
*qg
, u64 num_bytes
)
2806 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) &&
2807 qgroup_rsv_total(qg
) + (s64
)qg
->rfer
+ num_bytes
> qg
->max_rfer
)
2810 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) &&
2811 qgroup_rsv_total(qg
) + (s64
)qg
->excl
+ num_bytes
> qg
->max_excl
)
2815 * Even if we passed the check, it's better to check if reservation
2816 * for meta_pertrans is pushing us near limit.
2817 * If there is too much pertrans reservation or it's near the limit,
2818 * let's try commit transaction to free some, using transaction_kthread
2820 if ((qg
->lim_flags
& (BTRFS_QGROUP_LIMIT_MAX_RFER
|
2821 BTRFS_QGROUP_LIMIT_MAX_EXCL
))) {
2822 if (qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) {
2823 free
= qg
->max_excl
- qgroup_rsv_total(qg
) - qg
->excl
;
2824 threshold
= min_t(u64
, qg
->max_excl
/ QGROUP_FREE_RATIO
,
2827 free
= qg
->max_rfer
- qgroup_rsv_total(qg
) - qg
->rfer
;
2828 threshold
= min_t(u64
, qg
->max_rfer
/ QGROUP_FREE_RATIO
,
2833 * Use transaction_kthread to commit transaction, so we no
2834 * longer need to bother nested transaction nor lock context.
2836 if (free
< threshold
)
2837 btrfs_commit_transaction_locksafe(fs_info
);
2843 static int qgroup_reserve(struct btrfs_root
*root
, u64 num_bytes
, bool enforce
,
2844 enum btrfs_qgroup_rsv_type type
)
2846 struct btrfs_root
*quota_root
;
2847 struct btrfs_qgroup
*qgroup
;
2848 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2849 u64 ref_root
= root
->root_key
.objectid
;
2851 struct ulist_node
*unode
;
2852 struct ulist_iterator uiter
;
2854 if (!is_fstree(ref_root
))
2860 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE
, &fs_info
->flags
) &&
2861 capable(CAP_SYS_RESOURCE
))
2864 spin_lock(&fs_info
->qgroup_lock
);
2865 quota_root
= fs_info
->quota_root
;
2869 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
2874 * in a first step, we check all affected qgroups if any limits would
2877 ulist_reinit(fs_info
->qgroup_ulist
);
2878 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
2879 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
2882 ULIST_ITER_INIT(&uiter
);
2883 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2884 struct btrfs_qgroup
*qg
;
2885 struct btrfs_qgroup_list
*glist
;
2887 qg
= unode_aux_to_qgroup(unode
);
2889 if (enforce
&& !qgroup_check_limits(fs_info
, qg
, num_bytes
)) {
2894 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2895 ret
= ulist_add(fs_info
->qgroup_ulist
,
2896 glist
->group
->qgroupid
,
2897 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
2904 * no limits exceeded, now record the reservation into all qgroups
2906 ULIST_ITER_INIT(&uiter
);
2907 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2908 struct btrfs_qgroup
*qg
;
2910 qg
= unode_aux_to_qgroup(unode
);
2912 qgroup_rsv_add(fs_info
, qg
, num_bytes
, type
);
2916 spin_unlock(&fs_info
->qgroup_lock
);
2921 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0
2924 * Will handle all higher level qgroup too.
2926 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
2927 * This special case is only used for META_PERTRANS type.
2929 void btrfs_qgroup_free_refroot(struct btrfs_fs_info
*fs_info
,
2930 u64 ref_root
, u64 num_bytes
,
2931 enum btrfs_qgroup_rsv_type type
)
2933 struct btrfs_root
*quota_root
;
2934 struct btrfs_qgroup
*qgroup
;
2935 struct ulist_node
*unode
;
2936 struct ulist_iterator uiter
;
2939 if (!is_fstree(ref_root
))
2945 if (num_bytes
== (u64
)-1 && type
!= BTRFS_QGROUP_RSV_META_PERTRANS
) {
2946 WARN(1, "%s: Invalid type to free", __func__
);
2949 spin_lock(&fs_info
->qgroup_lock
);
2951 quota_root
= fs_info
->quota_root
;
2955 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
2959 if (num_bytes
== (u64
)-1)
2961 * We're freeing all pertrans rsv, get reserved value from
2962 * level 0 qgroup as real num_bytes to free.
2964 num_bytes
= qgroup
->rsv
.values
[type
];
2966 ulist_reinit(fs_info
->qgroup_ulist
);
2967 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
2968 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
2971 ULIST_ITER_INIT(&uiter
);
2972 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2973 struct btrfs_qgroup
*qg
;
2974 struct btrfs_qgroup_list
*glist
;
2976 qg
= unode_aux_to_qgroup(unode
);
2978 qgroup_rsv_release(fs_info
, qg
, num_bytes
, type
);
2980 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2981 ret
= ulist_add(fs_info
->qgroup_ulist
,
2982 glist
->group
->qgroupid
,
2983 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
2990 spin_unlock(&fs_info
->qgroup_lock
);
2994 * Check if the leaf is the last leaf. Which means all node pointers
2995 * are at their last position.
2997 static bool is_last_leaf(struct btrfs_path
*path
)
3001 for (i
= 1; i
< BTRFS_MAX_LEVEL
&& path
->nodes
[i
]; i
++) {
3002 if (path
->slots
[i
] != btrfs_header_nritems(path
->nodes
[i
]) - 1)
3009 * returns < 0 on error, 0 when more leafs are to be scanned.
3010 * returns 1 when done.
3012 static int qgroup_rescan_leaf(struct btrfs_trans_handle
*trans
,
3013 struct btrfs_path
*path
)
3015 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3016 struct btrfs_key found
;
3017 struct extent_buffer
*scratch_leaf
= NULL
;
3018 struct ulist
*roots
= NULL
;
3024 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3025 ret
= btrfs_search_slot_for_read(fs_info
->extent_root
,
3026 &fs_info
->qgroup_rescan_progress
,
3029 btrfs_debug(fs_info
,
3030 "current progress key (%llu %u %llu), search_slot ret %d",
3031 fs_info
->qgroup_rescan_progress
.objectid
,
3032 fs_info
->qgroup_rescan_progress
.type
,
3033 fs_info
->qgroup_rescan_progress
.offset
, ret
);
3037 * The rescan is about to end, we will not be scanning any
3038 * further blocks. We cannot unset the RESCAN flag here, because
3039 * we want to commit the transaction if everything went well.
3040 * To make the live accounting work in this phase, we set our
3041 * scan progress pointer such that every real extent objectid
3044 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
3045 btrfs_release_path(path
);
3046 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3049 done
= is_last_leaf(path
);
3051 btrfs_item_key_to_cpu(path
->nodes
[0], &found
,
3052 btrfs_header_nritems(path
->nodes
[0]) - 1);
3053 fs_info
->qgroup_rescan_progress
.objectid
= found
.objectid
+ 1;
3055 scratch_leaf
= btrfs_clone_extent_buffer(path
->nodes
[0]);
3056 if (!scratch_leaf
) {
3058 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3061 slot
= path
->slots
[0];
3062 btrfs_release_path(path
);
3063 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3065 for (; slot
< btrfs_header_nritems(scratch_leaf
); ++slot
) {
3066 btrfs_item_key_to_cpu(scratch_leaf
, &found
, slot
);
3067 if (found
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3068 found
.type
!= BTRFS_METADATA_ITEM_KEY
)
3070 if (found
.type
== BTRFS_METADATA_ITEM_KEY
)
3071 num_bytes
= fs_info
->nodesize
;
3073 num_bytes
= found
.offset
;
3075 ret
= btrfs_find_all_roots(NULL
, fs_info
, found
.objectid
, 0,
3079 /* For rescan, just pass old_roots as NULL */
3080 ret
= btrfs_qgroup_account_extent(trans
, found
.objectid
,
3081 num_bytes
, NULL
, roots
);
3087 free_extent_buffer(scratch_leaf
);
3091 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
3096 static void btrfs_qgroup_rescan_worker(struct btrfs_work
*work
)
3098 struct btrfs_fs_info
*fs_info
= container_of(work
, struct btrfs_fs_info
,
3099 qgroup_rescan_work
);
3100 struct btrfs_path
*path
;
3101 struct btrfs_trans_handle
*trans
= NULL
;
3105 path
= btrfs_alloc_path();
3109 * Rescan should only search for commit root, and any later difference
3110 * should be recorded by qgroup
3112 path
->search_commit_root
= 1;
3113 path
->skip_locking
= 1;
3116 while (!err
&& !btrfs_fs_closing(fs_info
)) {
3117 trans
= btrfs_start_transaction(fs_info
->fs_root
, 0);
3118 if (IS_ERR(trans
)) {
3119 err
= PTR_ERR(trans
);
3122 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)) {
3125 err
= qgroup_rescan_leaf(trans
, path
);
3128 btrfs_commit_transaction(trans
);
3130 btrfs_end_transaction(trans
);
3134 btrfs_free_path(path
);
3136 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3137 if (!btrfs_fs_closing(fs_info
))
3138 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3141 fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
) {
3142 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
3143 } else if (err
< 0) {
3144 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
3146 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3149 * only update status, since the previous part has already updated the
3152 trans
= btrfs_start_transaction(fs_info
->quota_root
, 1);
3153 if (IS_ERR(trans
)) {
3154 err
= PTR_ERR(trans
);
3156 "fail to start transaction for status update: %d",
3160 ret
= update_qgroup_status_item(trans
);
3163 btrfs_err(fs_info
, "fail to update qgroup status: %d", err
);
3165 btrfs_end_transaction(trans
);
3167 if (btrfs_fs_closing(fs_info
)) {
3168 btrfs_info(fs_info
, "qgroup scan paused");
3169 } else if (err
>= 0) {
3170 btrfs_info(fs_info
, "qgroup scan completed%s",
3171 err
> 0 ? " (inconsistency flag cleared)" : "");
3173 btrfs_err(fs_info
, "qgroup scan failed with %d", err
);
3177 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3178 fs_info
->qgroup_rescan_running
= false;
3179 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3180 complete_all(&fs_info
->qgroup_rescan_completion
);
3184 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3185 * memory required for the rescan context.
3188 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
3194 /* we're resuming qgroup rescan at mount time */
3195 if (!(fs_info
->qgroup_flags
&
3196 BTRFS_QGROUP_STATUS_FLAG_RESCAN
)) {
3198 "qgroup rescan init failed, qgroup is not enabled");
3200 } else if (!(fs_info
->qgroup_flags
&
3201 BTRFS_QGROUP_STATUS_FLAG_ON
)) {
3203 "qgroup rescan init failed, qgroup rescan is not queued");
3211 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3212 spin_lock(&fs_info
->qgroup_lock
);
3215 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
3217 "qgroup rescan is already in progress");
3219 } else if (!(fs_info
->qgroup_flags
&
3220 BTRFS_QGROUP_STATUS_FLAG_ON
)) {
3222 "qgroup rescan init failed, qgroup is not enabled");
3227 spin_unlock(&fs_info
->qgroup_lock
);
3228 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3231 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3234 memset(&fs_info
->qgroup_rescan_progress
, 0,
3235 sizeof(fs_info
->qgroup_rescan_progress
));
3236 fs_info
->qgroup_rescan_progress
.objectid
= progress_objectid
;
3237 init_completion(&fs_info
->qgroup_rescan_completion
);
3238 fs_info
->qgroup_rescan_running
= true;
3240 spin_unlock(&fs_info
->qgroup_lock
);
3241 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3243 memset(&fs_info
->qgroup_rescan_work
, 0,
3244 sizeof(fs_info
->qgroup_rescan_work
));
3245 btrfs_init_work(&fs_info
->qgroup_rescan_work
,
3246 btrfs_qgroup_rescan_helper
,
3247 btrfs_qgroup_rescan_worker
, NULL
, NULL
);
3252 qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
)
3255 struct btrfs_qgroup
*qgroup
;
3257 spin_lock(&fs_info
->qgroup_lock
);
3258 /* clear all current qgroup tracking information */
3259 for (n
= rb_first(&fs_info
->qgroup_tree
); n
; n
= rb_next(n
)) {
3260 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
3262 qgroup
->rfer_cmpr
= 0;
3264 qgroup
->excl_cmpr
= 0;
3265 qgroup_dirty(fs_info
, qgroup
);
3267 spin_unlock(&fs_info
->qgroup_lock
);
3271 btrfs_qgroup_rescan(struct btrfs_fs_info
*fs_info
)
3274 struct btrfs_trans_handle
*trans
;
3276 ret
= qgroup_rescan_init(fs_info
, 0, 1);
3281 * We have set the rescan_progress to 0, which means no more
3282 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3283 * However, btrfs_qgroup_account_ref may be right after its call
3284 * to btrfs_find_all_roots, in which case it would still do the
3286 * To solve this, we're committing the transaction, which will
3287 * ensure we run all delayed refs and only after that, we are
3288 * going to clear all tracking information for a clean start.
3291 trans
= btrfs_join_transaction(fs_info
->fs_root
);
3292 if (IS_ERR(trans
)) {
3293 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3294 return PTR_ERR(trans
);
3296 ret
= btrfs_commit_transaction(trans
);
3298 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3302 qgroup_rescan_zero_tracking(fs_info
);
3304 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
3305 &fs_info
->qgroup_rescan_work
);
3310 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info
*fs_info
,
3316 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3317 spin_lock(&fs_info
->qgroup_lock
);
3318 running
= fs_info
->qgroup_rescan_running
;
3319 spin_unlock(&fs_info
->qgroup_lock
);
3320 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3326 ret
= wait_for_completion_interruptible(
3327 &fs_info
->qgroup_rescan_completion
);
3329 wait_for_completion(&fs_info
->qgroup_rescan_completion
);
3335 * this is only called from open_ctree where we're still single threaded, thus
3336 * locking is omitted here.
3339 btrfs_qgroup_rescan_resume(struct btrfs_fs_info
*fs_info
)
3341 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
)
3342 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
3343 &fs_info
->qgroup_rescan_work
);
3347 * Reserve qgroup space for range [start, start + len).
3349 * This function will either reserve space from related qgroups or doing
3350 * nothing if the range is already reserved.
3352 * Return 0 for successful reserve
3353 * Return <0 for error (including -EQUOT)
3355 * NOTE: this function may sleep for memory allocation.
3356 * if btrfs_qgroup_reserve_data() is called multiple times with
3357 * same @reserved, caller must ensure when error happens it's OK
3358 * to free *ALL* reserved space.
3360 int btrfs_qgroup_reserve_data(struct inode
*inode
,
3361 struct extent_changeset
**reserved_ret
, u64 start
,
3364 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3365 struct ulist_node
*unode
;
3366 struct ulist_iterator uiter
;
3367 struct extent_changeset
*reserved
;
3372 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &root
->fs_info
->flags
) ||
3373 !is_fstree(root
->root_key
.objectid
) || len
== 0)
3376 /* @reserved parameter is mandatory for qgroup */
3377 if (WARN_ON(!reserved_ret
))
3379 if (!*reserved_ret
) {
3380 *reserved_ret
= extent_changeset_alloc();
3384 reserved
= *reserved_ret
;
3385 /* Record already reserved space */
3386 orig_reserved
= reserved
->bytes_changed
;
3387 ret
= set_record_extent_bits(&BTRFS_I(inode
)->io_tree
, start
,
3388 start
+ len
-1, EXTENT_QGROUP_RESERVED
, reserved
);
3390 /* Newly reserved space */
3391 to_reserve
= reserved
->bytes_changed
- orig_reserved
;
3392 trace_btrfs_qgroup_reserve_data(inode
, start
, len
,
3393 to_reserve
, QGROUP_RESERVE
);
3396 ret
= qgroup_reserve(root
, to_reserve
, true, BTRFS_QGROUP_RSV_DATA
);
3403 /* cleanup *ALL* already reserved ranges */
3404 ULIST_ITER_INIT(&uiter
);
3405 while ((unode
= ulist_next(&reserved
->range_changed
, &uiter
)))
3406 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, unode
->val
,
3407 unode
->aux
, EXTENT_QGROUP_RESERVED
, 0, 0, NULL
);
3408 extent_changeset_release(reserved
);
3412 /* Free ranges specified by @reserved, normally in error path */
3413 static int qgroup_free_reserved_data(struct inode
*inode
,
3414 struct extent_changeset
*reserved
, u64 start
, u64 len
)
3416 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3417 struct ulist_node
*unode
;
3418 struct ulist_iterator uiter
;
3419 struct extent_changeset changeset
;
3423 extent_changeset_init(&changeset
);
3424 len
= round_up(start
+ len
, root
->fs_info
->sectorsize
);
3425 start
= round_down(start
, root
->fs_info
->sectorsize
);
3427 ULIST_ITER_INIT(&uiter
);
3428 while ((unode
= ulist_next(&reserved
->range_changed
, &uiter
))) {
3429 u64 range_start
= unode
->val
;
3430 /* unode->aux is the inclusive end */
3431 u64 range_len
= unode
->aux
- range_start
+ 1;
3435 extent_changeset_release(&changeset
);
3437 /* Only free range in range [start, start + len) */
3438 if (range_start
>= start
+ len
||
3439 range_start
+ range_len
<= start
)
3441 free_start
= max(range_start
, start
);
3442 free_len
= min(start
+ len
, range_start
+ range_len
) -
3445 * TODO: To also modify reserved->ranges_reserved to reflect
3448 * However as long as we free qgroup reserved according to
3449 * EXTENT_QGROUP_RESERVED, we won't double free.
3450 * So not need to rush.
3452 ret
= clear_record_extent_bits(&BTRFS_I(inode
)->io_failure_tree
,
3453 free_start
, free_start
+ free_len
- 1,
3454 EXTENT_QGROUP_RESERVED
, &changeset
);
3457 freed
+= changeset
.bytes_changed
;
3459 btrfs_qgroup_free_refroot(root
->fs_info
, root
->root_key
.objectid
, freed
,
3460 BTRFS_QGROUP_RSV_DATA
);
3463 extent_changeset_release(&changeset
);
3467 static int __btrfs_qgroup_release_data(struct inode
*inode
,
3468 struct extent_changeset
*reserved
, u64 start
, u64 len
,
3471 struct extent_changeset changeset
;
3472 int trace_op
= QGROUP_RELEASE
;
3475 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
,
3476 &BTRFS_I(inode
)->root
->fs_info
->flags
))
3479 /* In release case, we shouldn't have @reserved */
3480 WARN_ON(!free
&& reserved
);
3481 if (free
&& reserved
)
3482 return qgroup_free_reserved_data(inode
, reserved
, start
, len
);
3483 extent_changeset_init(&changeset
);
3484 ret
= clear_record_extent_bits(&BTRFS_I(inode
)->io_tree
, start
,
3485 start
+ len
-1, EXTENT_QGROUP_RESERVED
, &changeset
);
3490 trace_op
= QGROUP_FREE
;
3491 trace_btrfs_qgroup_release_data(inode
, start
, len
,
3492 changeset
.bytes_changed
, trace_op
);
3494 btrfs_qgroup_free_refroot(BTRFS_I(inode
)->root
->fs_info
,
3495 BTRFS_I(inode
)->root
->root_key
.objectid
,
3496 changeset
.bytes_changed
, BTRFS_QGROUP_RSV_DATA
);
3497 ret
= changeset
.bytes_changed
;
3499 extent_changeset_release(&changeset
);
3504 * Free a reserved space range from io_tree and related qgroups
3506 * Should be called when a range of pages get invalidated before reaching disk.
3507 * Or for error cleanup case.
3508 * if @reserved is given, only reserved range in [@start, @start + @len) will
3511 * For data written to disk, use btrfs_qgroup_release_data().
3513 * NOTE: This function may sleep for memory allocation.
3515 int btrfs_qgroup_free_data(struct inode
*inode
,
3516 struct extent_changeset
*reserved
, u64 start
, u64 len
)
3518 return __btrfs_qgroup_release_data(inode
, reserved
, start
, len
, 1);
3522 * Release a reserved space range from io_tree only.
3524 * Should be called when a range of pages get written to disk and corresponding
3525 * FILE_EXTENT is inserted into corresponding root.
3527 * Since new qgroup accounting framework will only update qgroup numbers at
3528 * commit_transaction() time, its reserved space shouldn't be freed from
3531 * But we should release the range from io_tree, to allow further write to be
3534 * NOTE: This function may sleep for memory allocation.
3536 int btrfs_qgroup_release_data(struct inode
*inode
, u64 start
, u64 len
)
3538 return __btrfs_qgroup_release_data(inode
, NULL
, start
, len
, 0);
3541 static void add_root_meta_rsv(struct btrfs_root
*root
, int num_bytes
,
3542 enum btrfs_qgroup_rsv_type type
)
3544 if (type
!= BTRFS_QGROUP_RSV_META_PREALLOC
&&
3545 type
!= BTRFS_QGROUP_RSV_META_PERTRANS
)
3550 spin_lock(&root
->qgroup_meta_rsv_lock
);
3551 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
)
3552 root
->qgroup_meta_rsv_prealloc
+= num_bytes
;
3554 root
->qgroup_meta_rsv_pertrans
+= num_bytes
;
3555 spin_unlock(&root
->qgroup_meta_rsv_lock
);
3558 static int sub_root_meta_rsv(struct btrfs_root
*root
, int num_bytes
,
3559 enum btrfs_qgroup_rsv_type type
)
3561 if (type
!= BTRFS_QGROUP_RSV_META_PREALLOC
&&
3562 type
!= BTRFS_QGROUP_RSV_META_PERTRANS
)
3567 spin_lock(&root
->qgroup_meta_rsv_lock
);
3568 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
) {
3569 num_bytes
= min_t(u64
, root
->qgroup_meta_rsv_prealloc
,
3571 root
->qgroup_meta_rsv_prealloc
-= num_bytes
;
3573 num_bytes
= min_t(u64
, root
->qgroup_meta_rsv_pertrans
,
3575 root
->qgroup_meta_rsv_pertrans
-= num_bytes
;
3577 spin_unlock(&root
->qgroup_meta_rsv_lock
);
3581 int __btrfs_qgroup_reserve_meta(struct btrfs_root
*root
, int num_bytes
,
3582 enum btrfs_qgroup_rsv_type type
, bool enforce
)
3584 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3587 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3588 !is_fstree(root
->root_key
.objectid
) || num_bytes
== 0)
3591 BUG_ON(num_bytes
!= round_down(num_bytes
, fs_info
->nodesize
));
3592 trace_qgroup_meta_reserve(root
, type
, (s64
)num_bytes
);
3593 ret
= qgroup_reserve(root
, num_bytes
, enforce
, type
);
3597 * Record what we have reserved into root.
3599 * To avoid quota disabled->enabled underflow.
3600 * In that case, we may try to free space we haven't reserved
3601 * (since quota was disabled), so record what we reserved into root.
3602 * And ensure later release won't underflow this number.
3604 add_root_meta_rsv(root
, num_bytes
, type
);
3608 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root
*root
)
3610 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3612 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3613 !is_fstree(root
->root_key
.objectid
))
3616 /* TODO: Update trace point to handle such free */
3617 trace_qgroup_meta_free_all_pertrans(root
);
3618 /* Special value -1 means to free all reserved space */
3619 btrfs_qgroup_free_refroot(fs_info
, root
->root_key
.objectid
, (u64
)-1,
3620 BTRFS_QGROUP_RSV_META_PERTRANS
);
3623 void __btrfs_qgroup_free_meta(struct btrfs_root
*root
, int num_bytes
,
3624 enum btrfs_qgroup_rsv_type type
)
3626 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3628 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3629 !is_fstree(root
->root_key
.objectid
))
3633 * reservation for META_PREALLOC can happen before quota is enabled,
3634 * which can lead to underflow.
3635 * Here ensure we will only free what we really have reserved.
3637 num_bytes
= sub_root_meta_rsv(root
, num_bytes
, type
);
3638 BUG_ON(num_bytes
!= round_down(num_bytes
, fs_info
->nodesize
));
3639 trace_qgroup_meta_reserve(root
, type
, -(s64
)num_bytes
);
3640 btrfs_qgroup_free_refroot(fs_info
, root
->root_key
.objectid
,
3644 static void qgroup_convert_meta(struct btrfs_fs_info
*fs_info
, u64 ref_root
,
3647 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
3648 struct btrfs_qgroup
*qgroup
;
3649 struct ulist_node
*unode
;
3650 struct ulist_iterator uiter
;
3658 spin_lock(&fs_info
->qgroup_lock
);
3659 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
3662 ulist_reinit(fs_info
->qgroup_ulist
);
3663 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
3664 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
3667 ULIST_ITER_INIT(&uiter
);
3668 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
3669 struct btrfs_qgroup
*qg
;
3670 struct btrfs_qgroup_list
*glist
;
3672 qg
= unode_aux_to_qgroup(unode
);
3674 qgroup_rsv_release(fs_info
, qg
, num_bytes
,
3675 BTRFS_QGROUP_RSV_META_PREALLOC
);
3676 qgroup_rsv_add(fs_info
, qg
, num_bytes
,
3677 BTRFS_QGROUP_RSV_META_PERTRANS
);
3678 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
3679 ret
= ulist_add(fs_info
->qgroup_ulist
,
3680 glist
->group
->qgroupid
,
3681 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
3687 spin_unlock(&fs_info
->qgroup_lock
);
3690 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root
*root
, int num_bytes
)
3692 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3694 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3695 !is_fstree(root
->root_key
.objectid
))
3697 /* Same as btrfs_qgroup_free_meta_prealloc() */
3698 num_bytes
= sub_root_meta_rsv(root
, num_bytes
,
3699 BTRFS_QGROUP_RSV_META_PREALLOC
);
3700 trace_qgroup_meta_convert(root
, num_bytes
);
3701 qgroup_convert_meta(fs_info
, root
->root_key
.objectid
, num_bytes
);
3705 * Check qgroup reserved space leaking, normally at destroy inode
3708 void btrfs_qgroup_check_reserved_leak(struct inode
*inode
)
3710 struct extent_changeset changeset
;
3711 struct ulist_node
*unode
;
3712 struct ulist_iterator iter
;
3715 extent_changeset_init(&changeset
);
3716 ret
= clear_record_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, (u64
)-1,
3717 EXTENT_QGROUP_RESERVED
, &changeset
);
3720 if (WARN_ON(changeset
.bytes_changed
)) {
3721 ULIST_ITER_INIT(&iter
);
3722 while ((unode
= ulist_next(&changeset
.range_changed
, &iter
))) {
3723 btrfs_warn(BTRFS_I(inode
)->root
->fs_info
,
3724 "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
3725 inode
->i_ino
, unode
->val
, unode
->aux
);
3727 btrfs_qgroup_free_refroot(BTRFS_I(inode
)->root
->fs_info
,
3728 BTRFS_I(inode
)->root
->root_key
.objectid
,
3729 changeset
.bytes_changed
, BTRFS_QGROUP_RSV_DATA
);
3732 extent_changeset_release(&changeset
);
3735 void btrfs_qgroup_init_swapped_blocks(
3736 struct btrfs_qgroup_swapped_blocks
*swapped_blocks
)
3740 spin_lock_init(&swapped_blocks
->lock
);
3741 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++)
3742 swapped_blocks
->blocks
[i
] = RB_ROOT
;
3743 swapped_blocks
->swapped
= false;
3747 * Delete all swapped blocks record of @root.
3748 * Every record here means we skipped a full subtree scan for qgroup.
3750 * Gets called when committing one transaction.
3752 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root
*root
)
3754 struct btrfs_qgroup_swapped_blocks
*swapped_blocks
;
3757 swapped_blocks
= &root
->swapped_blocks
;
3759 spin_lock(&swapped_blocks
->lock
);
3760 if (!swapped_blocks
->swapped
)
3762 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
3763 struct rb_root
*cur_root
= &swapped_blocks
->blocks
[i
];
3764 struct btrfs_qgroup_swapped_block
*entry
;
3765 struct btrfs_qgroup_swapped_block
*next
;
3767 rbtree_postorder_for_each_entry_safe(entry
, next
, cur_root
,
3770 swapped_blocks
->blocks
[i
] = RB_ROOT
;
3772 swapped_blocks
->swapped
= false;
3774 spin_unlock(&swapped_blocks
->lock
);
3778 * Add subtree roots record into @subvol_root.
3780 * @subvol_root: tree root of the subvolume tree get swapped
3781 * @bg: block group under balance
3782 * @subvol_parent/slot: pointer to the subtree root in subvolume tree
3783 * @reloc_parent/slot: pointer to the subtree root in reloc tree
3784 * BOTH POINTERS ARE BEFORE TREE SWAP
3785 * @last_snapshot: last snapshot generation of the subvolume tree
3787 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle
*trans
,
3788 struct btrfs_root
*subvol_root
,
3789 struct btrfs_block_group_cache
*bg
,
3790 struct extent_buffer
*subvol_parent
, int subvol_slot
,
3791 struct extent_buffer
*reloc_parent
, int reloc_slot
,
3794 struct btrfs_fs_info
*fs_info
= subvol_root
->fs_info
;
3795 struct btrfs_qgroup_swapped_blocks
*blocks
= &subvol_root
->swapped_blocks
;
3796 struct btrfs_qgroup_swapped_block
*block
;
3797 struct rb_node
**cur
;
3798 struct rb_node
*parent
= NULL
;
3799 int level
= btrfs_header_level(subvol_parent
) - 1;
3802 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
3805 if (btrfs_node_ptr_generation(subvol_parent
, subvol_slot
) >
3806 btrfs_node_ptr_generation(reloc_parent
, reloc_slot
)) {
3807 btrfs_err_rl(fs_info
,
3808 "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
3810 btrfs_node_ptr_generation(subvol_parent
, subvol_slot
),
3811 btrfs_node_ptr_generation(reloc_parent
, reloc_slot
));
3815 block
= kmalloc(sizeof(*block
), GFP_NOFS
);
3822 * @reloc_parent/slot is still before swap, while @block is going to
3823 * record the bytenr after swap, so we do the swap here.
3825 block
->subvol_bytenr
= btrfs_node_blockptr(reloc_parent
, reloc_slot
);
3826 block
->subvol_generation
= btrfs_node_ptr_generation(reloc_parent
,
3828 block
->reloc_bytenr
= btrfs_node_blockptr(subvol_parent
, subvol_slot
);
3829 block
->reloc_generation
= btrfs_node_ptr_generation(subvol_parent
,
3831 block
->last_snapshot
= last_snapshot
;
3832 block
->level
= level
;
3835 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
3836 * no one else can modify tree blocks thus we qgroup will not change
3837 * no matter the value of trace_leaf.
3839 if (bg
&& bg
->flags
& BTRFS_BLOCK_GROUP_DATA
)
3840 block
->trace_leaf
= true;
3842 block
->trace_leaf
= false;
3843 btrfs_node_key_to_cpu(reloc_parent
, &block
->first_key
, reloc_slot
);
3845 /* Insert @block into @blocks */
3846 spin_lock(&blocks
->lock
);
3847 cur
= &blocks
->blocks
[level
].rb_node
;
3849 struct btrfs_qgroup_swapped_block
*entry
;
3852 entry
= rb_entry(parent
, struct btrfs_qgroup_swapped_block
,
3855 if (entry
->subvol_bytenr
< block
->subvol_bytenr
) {
3856 cur
= &(*cur
)->rb_left
;
3857 } else if (entry
->subvol_bytenr
> block
->subvol_bytenr
) {
3858 cur
= &(*cur
)->rb_right
;
3860 if (entry
->subvol_generation
!=
3861 block
->subvol_generation
||
3862 entry
->reloc_bytenr
!= block
->reloc_bytenr
||
3863 entry
->reloc_generation
!=
3864 block
->reloc_generation
) {
3866 * Duplicated but mismatch entry found.
3869 * Marking qgroup inconsistent should be enough
3872 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG
));
3879 rb_link_node(&block
->node
, parent
, cur
);
3880 rb_insert_color(&block
->node
, &blocks
->blocks
[level
]);
3881 blocks
->swapped
= true;
3883 spin_unlock(&blocks
->lock
);
3886 fs_info
->qgroup_flags
|=
3887 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
3892 * Check if the tree block is a subtree root, and if so do the needed
3893 * delayed subtree trace for qgroup.
3895 * This is called during btrfs_cow_block().
3897 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle
*trans
,
3898 struct btrfs_root
*root
,
3899 struct extent_buffer
*subvol_eb
)
3901 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3902 struct btrfs_qgroup_swapped_blocks
*blocks
= &root
->swapped_blocks
;
3903 struct btrfs_qgroup_swapped_block
*block
;
3904 struct extent_buffer
*reloc_eb
= NULL
;
3905 struct rb_node
*node
;
3907 bool swapped
= false;
3908 int level
= btrfs_header_level(subvol_eb
);
3912 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
3914 if (!is_fstree(root
->root_key
.objectid
) || !root
->reloc_root
)
3917 spin_lock(&blocks
->lock
);
3918 if (!blocks
->swapped
) {
3919 spin_unlock(&blocks
->lock
);
3922 node
= blocks
->blocks
[level
].rb_node
;
3925 block
= rb_entry(node
, struct btrfs_qgroup_swapped_block
, node
);
3926 if (block
->subvol_bytenr
< subvol_eb
->start
) {
3927 node
= node
->rb_left
;
3928 } else if (block
->subvol_bytenr
> subvol_eb
->start
) {
3929 node
= node
->rb_right
;
3936 spin_unlock(&blocks
->lock
);
3939 /* Found one, remove it from @blocks first and update blocks->swapped */
3940 rb_erase(&block
->node
, &blocks
->blocks
[level
]);
3941 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
3942 if (RB_EMPTY_ROOT(&blocks
->blocks
[i
])) {
3947 blocks
->swapped
= swapped
;
3948 spin_unlock(&blocks
->lock
);
3950 /* Read out reloc subtree root */
3951 reloc_eb
= read_tree_block(fs_info
, block
->reloc_bytenr
,
3952 block
->reloc_generation
, block
->level
,
3954 if (IS_ERR(reloc_eb
)) {
3955 ret
= PTR_ERR(reloc_eb
);
3959 if (!extent_buffer_uptodate(reloc_eb
)) {
3964 ret
= qgroup_trace_subtree_swap(trans
, reloc_eb
, subvol_eb
,
3965 block
->last_snapshot
, block
->trace_leaf
);
3968 free_extent_buffer(reloc_eb
);
3971 btrfs_err_rl(fs_info
,
3972 "failed to account subtree at bytenr %llu: %d",
3973 subvol_eb
->start
, ret
);
3974 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;