1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 STRATO. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
16 #include "transaction.h"
21 #include "extent_io.h"
23 #include "block-group.h"
27 * - subvol delete -> delete when ref goes to 0? delete limits also?
31 * - copy also limits on subvol creation
34 * - performance benchmarks
35 * - check all ioctl parameters
39 * Helpers to access qgroup reservation
41 * Callers should ensure the lock context and type are valid
44 static u64
qgroup_rsv_total(const struct btrfs_qgroup
*qgroup
)
49 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
50 ret
+= qgroup
->rsv
.values
[i
];
55 #ifdef CONFIG_BTRFS_DEBUG
56 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type
)
58 if (type
== BTRFS_QGROUP_RSV_DATA
)
60 if (type
== BTRFS_QGROUP_RSV_META_PERTRANS
)
61 return "meta_pertrans";
62 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
)
63 return "meta_prealloc";
68 static void qgroup_rsv_add(struct btrfs_fs_info
*fs_info
,
69 struct btrfs_qgroup
*qgroup
, u64 num_bytes
,
70 enum btrfs_qgroup_rsv_type type
)
72 trace_qgroup_update_reserve(fs_info
, qgroup
, num_bytes
, type
);
73 qgroup
->rsv
.values
[type
] += num_bytes
;
76 static void qgroup_rsv_release(struct btrfs_fs_info
*fs_info
,
77 struct btrfs_qgroup
*qgroup
, u64 num_bytes
,
78 enum btrfs_qgroup_rsv_type type
)
80 trace_qgroup_update_reserve(fs_info
, qgroup
, -(s64
)num_bytes
, type
);
81 if (qgroup
->rsv
.values
[type
] >= num_bytes
) {
82 qgroup
->rsv
.values
[type
] -= num_bytes
;
85 #ifdef CONFIG_BTRFS_DEBUG
87 "qgroup %llu %s reserved space underflow, have %llu to free %llu",
88 qgroup
->qgroupid
, qgroup_rsv_type_str(type
),
89 qgroup
->rsv
.values
[type
], num_bytes
);
91 qgroup
->rsv
.values
[type
] = 0;
94 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info
*fs_info
,
95 struct btrfs_qgroup
*dest
,
96 struct btrfs_qgroup
*src
)
100 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
101 qgroup_rsv_add(fs_info
, dest
, src
->rsv
.values
[i
], i
);
104 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info
*fs_info
,
105 struct btrfs_qgroup
*dest
,
106 struct btrfs_qgroup
*src
)
110 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
111 qgroup_rsv_release(fs_info
, dest
, src
->rsv
.values
[i
], i
);
114 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup
*qg
, u64 seq
,
117 if (qg
->old_refcnt
< seq
)
118 qg
->old_refcnt
= seq
;
119 qg
->old_refcnt
+= mod
;
122 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup
*qg
, u64 seq
,
125 if (qg
->new_refcnt
< seq
)
126 qg
->new_refcnt
= seq
;
127 qg
->new_refcnt
+= mod
;
130 static inline u64
btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup
*qg
, u64 seq
)
132 if (qg
->old_refcnt
< seq
)
134 return qg
->old_refcnt
- seq
;
137 static inline u64
btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup
*qg
, u64 seq
)
139 if (qg
->new_refcnt
< seq
)
141 return qg
->new_refcnt
- seq
;
145 * glue structure to represent the relations between qgroups.
147 struct btrfs_qgroup_list
{
148 struct list_head next_group
;
149 struct list_head next_member
;
150 struct btrfs_qgroup
*group
;
151 struct btrfs_qgroup
*member
;
154 static inline u64
qgroup_to_aux(struct btrfs_qgroup
*qg
)
156 return (u64
)(uintptr_t)qg
;
159 static inline struct btrfs_qgroup
* unode_aux_to_qgroup(struct ulist_node
*n
)
161 return (struct btrfs_qgroup
*)(uintptr_t)n
->aux
;
165 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
167 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
);
169 /* must be called with qgroup_ioctl_lock held */
170 static struct btrfs_qgroup
*find_qgroup_rb(struct btrfs_fs_info
*fs_info
,
173 struct rb_node
*n
= fs_info
->qgroup_tree
.rb_node
;
174 struct btrfs_qgroup
*qgroup
;
177 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
178 if (qgroup
->qgroupid
< qgroupid
)
180 else if (qgroup
->qgroupid
> qgroupid
)
188 /* must be called with qgroup_lock held */
189 static struct btrfs_qgroup
*add_qgroup_rb(struct btrfs_fs_info
*fs_info
,
192 struct rb_node
**p
= &fs_info
->qgroup_tree
.rb_node
;
193 struct rb_node
*parent
= NULL
;
194 struct btrfs_qgroup
*qgroup
;
198 qgroup
= rb_entry(parent
, struct btrfs_qgroup
, node
);
200 if (qgroup
->qgroupid
< qgroupid
)
202 else if (qgroup
->qgroupid
> qgroupid
)
208 qgroup
= kzalloc(sizeof(*qgroup
), GFP_ATOMIC
);
210 return ERR_PTR(-ENOMEM
);
212 qgroup
->qgroupid
= qgroupid
;
213 INIT_LIST_HEAD(&qgroup
->groups
);
214 INIT_LIST_HEAD(&qgroup
->members
);
215 INIT_LIST_HEAD(&qgroup
->dirty
);
217 rb_link_node(&qgroup
->node
, parent
, p
);
218 rb_insert_color(&qgroup
->node
, &fs_info
->qgroup_tree
);
223 static void __del_qgroup_rb(struct btrfs_fs_info
*fs_info
,
224 struct btrfs_qgroup
*qgroup
)
226 struct btrfs_qgroup_list
*list
;
228 btrfs_sysfs_del_one_qgroup(fs_info
, qgroup
);
229 list_del(&qgroup
->dirty
);
230 while (!list_empty(&qgroup
->groups
)) {
231 list
= list_first_entry(&qgroup
->groups
,
232 struct btrfs_qgroup_list
, next_group
);
233 list_del(&list
->next_group
);
234 list_del(&list
->next_member
);
238 while (!list_empty(&qgroup
->members
)) {
239 list
= list_first_entry(&qgroup
->members
,
240 struct btrfs_qgroup_list
, next_member
);
241 list_del(&list
->next_group
);
242 list_del(&list
->next_member
);
248 /* must be called with qgroup_lock held */
249 static int del_qgroup_rb(struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
251 struct btrfs_qgroup
*qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
256 rb_erase(&qgroup
->node
, &fs_info
->qgroup_tree
);
257 __del_qgroup_rb(fs_info
, qgroup
);
261 /* must be called with qgroup_lock held */
262 static int add_relation_rb(struct btrfs_fs_info
*fs_info
,
263 u64 memberid
, u64 parentid
)
265 struct btrfs_qgroup
*member
;
266 struct btrfs_qgroup
*parent
;
267 struct btrfs_qgroup_list
*list
;
269 member
= find_qgroup_rb(fs_info
, memberid
);
270 parent
= find_qgroup_rb(fs_info
, parentid
);
271 if (!member
|| !parent
)
274 list
= kzalloc(sizeof(*list
), GFP_ATOMIC
);
278 list
->group
= parent
;
279 list
->member
= member
;
280 list_add_tail(&list
->next_group
, &member
->groups
);
281 list_add_tail(&list
->next_member
, &parent
->members
);
286 /* must be called with qgroup_lock held */
287 static int del_relation_rb(struct btrfs_fs_info
*fs_info
,
288 u64 memberid
, u64 parentid
)
290 struct btrfs_qgroup
*member
;
291 struct btrfs_qgroup
*parent
;
292 struct btrfs_qgroup_list
*list
;
294 member
= find_qgroup_rb(fs_info
, memberid
);
295 parent
= find_qgroup_rb(fs_info
, parentid
);
296 if (!member
|| !parent
)
299 list_for_each_entry(list
, &member
->groups
, next_group
) {
300 if (list
->group
== parent
) {
301 list_del(&list
->next_group
);
302 list_del(&list
->next_member
);
310 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
311 int btrfs_verify_qgroup_counts(struct btrfs_fs_info
*fs_info
, u64 qgroupid
,
314 struct btrfs_qgroup
*qgroup
;
316 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
319 if (qgroup
->rfer
!= rfer
|| qgroup
->excl
!= excl
)
326 * The full config is read in one go, only called from open_ctree()
327 * It doesn't use any locking, as at this point we're still single-threaded
329 int btrfs_read_qgroup_config(struct btrfs_fs_info
*fs_info
)
331 struct btrfs_key key
;
332 struct btrfs_key found_key
;
333 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
334 struct btrfs_path
*path
= NULL
;
335 struct extent_buffer
*l
;
339 u64 rescan_progress
= 0;
341 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
344 fs_info
->qgroup_ulist
= ulist_alloc(GFP_KERNEL
);
345 if (!fs_info
->qgroup_ulist
) {
350 path
= btrfs_alloc_path();
356 ret
= btrfs_sysfs_add_qgroups(fs_info
);
359 /* default this to quota off, in case no status key is found */
360 fs_info
->qgroup_flags
= 0;
363 * pass 1: read status, all qgroup infos and limits
368 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 1);
373 struct btrfs_qgroup
*qgroup
;
375 slot
= path
->slots
[0];
377 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
379 if (found_key
.type
== BTRFS_QGROUP_STATUS_KEY
) {
380 struct btrfs_qgroup_status_item
*ptr
;
382 ptr
= btrfs_item_ptr(l
, slot
,
383 struct btrfs_qgroup_status_item
);
385 if (btrfs_qgroup_status_version(l
, ptr
) !=
386 BTRFS_QGROUP_STATUS_VERSION
) {
388 "old qgroup version, quota disabled");
391 if (btrfs_qgroup_status_generation(l
, ptr
) !=
392 fs_info
->generation
) {
393 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
395 "qgroup generation mismatch, marked as inconsistent");
397 fs_info
->qgroup_flags
= btrfs_qgroup_status_flags(l
,
399 rescan_progress
= btrfs_qgroup_status_rescan(l
, ptr
);
403 if (found_key
.type
!= BTRFS_QGROUP_INFO_KEY
&&
404 found_key
.type
!= BTRFS_QGROUP_LIMIT_KEY
)
407 qgroup
= find_qgroup_rb(fs_info
, found_key
.offset
);
408 if ((qgroup
&& found_key
.type
== BTRFS_QGROUP_INFO_KEY
) ||
409 (!qgroup
&& found_key
.type
== BTRFS_QGROUP_LIMIT_KEY
)) {
410 btrfs_err(fs_info
, "inconsistent qgroup config");
411 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
414 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
415 if (IS_ERR(qgroup
)) {
416 ret
= PTR_ERR(qgroup
);
420 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, qgroup
);
424 switch (found_key
.type
) {
425 case BTRFS_QGROUP_INFO_KEY
: {
426 struct btrfs_qgroup_info_item
*ptr
;
428 ptr
= btrfs_item_ptr(l
, slot
,
429 struct btrfs_qgroup_info_item
);
430 qgroup
->rfer
= btrfs_qgroup_info_rfer(l
, ptr
);
431 qgroup
->rfer_cmpr
= btrfs_qgroup_info_rfer_cmpr(l
, ptr
);
432 qgroup
->excl
= btrfs_qgroup_info_excl(l
, ptr
);
433 qgroup
->excl_cmpr
= btrfs_qgroup_info_excl_cmpr(l
, ptr
);
434 /* generation currently unused */
437 case BTRFS_QGROUP_LIMIT_KEY
: {
438 struct btrfs_qgroup_limit_item
*ptr
;
440 ptr
= btrfs_item_ptr(l
, slot
,
441 struct btrfs_qgroup_limit_item
);
442 qgroup
->lim_flags
= btrfs_qgroup_limit_flags(l
, ptr
);
443 qgroup
->max_rfer
= btrfs_qgroup_limit_max_rfer(l
, ptr
);
444 qgroup
->max_excl
= btrfs_qgroup_limit_max_excl(l
, ptr
);
445 qgroup
->rsv_rfer
= btrfs_qgroup_limit_rsv_rfer(l
, ptr
);
446 qgroup
->rsv_excl
= btrfs_qgroup_limit_rsv_excl(l
, ptr
);
451 ret
= btrfs_next_item(quota_root
, path
);
457 btrfs_release_path(path
);
460 * pass 2: read all qgroup relations
463 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
465 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 0);
469 slot
= path
->slots
[0];
471 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
473 if (found_key
.type
!= BTRFS_QGROUP_RELATION_KEY
)
476 if (found_key
.objectid
> found_key
.offset
) {
477 /* parent <- member, not needed to build config */
478 /* FIXME should we omit the key completely? */
482 ret
= add_relation_rb(fs_info
, found_key
.objectid
,
484 if (ret
== -ENOENT
) {
486 "orphan qgroup relation 0x%llx->0x%llx",
487 found_key
.objectid
, found_key
.offset
);
488 ret
= 0; /* ignore the error */
493 ret
= btrfs_next_item(quota_root
, path
);
500 fs_info
->qgroup_flags
|= flags
;
501 if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))
502 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
503 else if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
&&
505 ret
= qgroup_rescan_init(fs_info
, rescan_progress
, 0);
506 btrfs_free_path(path
);
509 ulist_free(fs_info
->qgroup_ulist
);
510 fs_info
->qgroup_ulist
= NULL
;
511 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
512 btrfs_sysfs_del_qgroups(fs_info
);
515 return ret
< 0 ? ret
: 0;
519 * Called in close_ctree() when quota is still enabled. This verifies we don't
520 * leak some reserved space.
522 * Return false if no reserved space is left.
523 * Return true if some reserved space is leaked.
525 bool btrfs_check_quota_leak(struct btrfs_fs_info
*fs_info
)
527 struct rb_node
*node
;
530 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
533 * Since we're unmounting, there is no race and no need to grab qgroup
534 * lock. And here we don't go post-order to provide a more user
535 * friendly sorted result.
537 for (node
= rb_first(&fs_info
->qgroup_tree
); node
; node
= rb_next(node
)) {
538 struct btrfs_qgroup
*qgroup
;
541 qgroup
= rb_entry(node
, struct btrfs_qgroup
, node
);
542 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++) {
543 if (qgroup
->rsv
.values
[i
]) {
546 "qgroup %hu/%llu has unreleased space, type %d rsv %llu",
547 btrfs_qgroup_level(qgroup
->qgroupid
),
548 btrfs_qgroup_subvolid(qgroup
->qgroupid
),
549 i
, qgroup
->rsv
.values
[i
]);
557 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
558 * first two are in single-threaded paths.And for the third one, we have set
559 * quota_root to be null with qgroup_lock held before, so it is safe to clean
560 * up the in-memory structures without qgroup_lock held.
562 void btrfs_free_qgroup_config(struct btrfs_fs_info
*fs_info
)
565 struct btrfs_qgroup
*qgroup
;
567 while ((n
= rb_first(&fs_info
->qgroup_tree
))) {
568 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
569 rb_erase(n
, &fs_info
->qgroup_tree
);
570 __del_qgroup_rb(fs_info
, qgroup
);
573 * We call btrfs_free_qgroup_config() when unmounting
574 * filesystem and disabling quota, so we set qgroup_ulist
575 * to be null here to avoid double free.
577 ulist_free(fs_info
->qgroup_ulist
);
578 fs_info
->qgroup_ulist
= NULL
;
579 btrfs_sysfs_del_qgroups(fs_info
);
582 static int add_qgroup_relation_item(struct btrfs_trans_handle
*trans
, u64 src
,
586 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
587 struct btrfs_path
*path
;
588 struct btrfs_key key
;
590 path
= btrfs_alloc_path();
595 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
598 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
, 0);
600 btrfs_mark_buffer_dirty(path
->nodes
[0]);
602 btrfs_free_path(path
);
606 static int del_qgroup_relation_item(struct btrfs_trans_handle
*trans
, u64 src
,
610 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
611 struct btrfs_path
*path
;
612 struct btrfs_key key
;
614 path
= btrfs_alloc_path();
619 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
622 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
631 ret
= btrfs_del_item(trans
, quota_root
, path
);
633 btrfs_free_path(path
);
637 static int add_qgroup_item(struct btrfs_trans_handle
*trans
,
638 struct btrfs_root
*quota_root
, u64 qgroupid
)
641 struct btrfs_path
*path
;
642 struct btrfs_qgroup_info_item
*qgroup_info
;
643 struct btrfs_qgroup_limit_item
*qgroup_limit
;
644 struct extent_buffer
*leaf
;
645 struct btrfs_key key
;
647 if (btrfs_is_testing(quota_root
->fs_info
))
650 path
= btrfs_alloc_path();
655 key
.type
= BTRFS_QGROUP_INFO_KEY
;
656 key
.offset
= qgroupid
;
659 * Avoid a transaction abort by catching -EEXIST here. In that
660 * case, we proceed by re-initializing the existing structure
664 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
665 sizeof(*qgroup_info
));
666 if (ret
&& ret
!= -EEXIST
)
669 leaf
= path
->nodes
[0];
670 qgroup_info
= btrfs_item_ptr(leaf
, path
->slots
[0],
671 struct btrfs_qgroup_info_item
);
672 btrfs_set_qgroup_info_generation(leaf
, qgroup_info
, trans
->transid
);
673 btrfs_set_qgroup_info_rfer(leaf
, qgroup_info
, 0);
674 btrfs_set_qgroup_info_rfer_cmpr(leaf
, qgroup_info
, 0);
675 btrfs_set_qgroup_info_excl(leaf
, qgroup_info
, 0);
676 btrfs_set_qgroup_info_excl_cmpr(leaf
, qgroup_info
, 0);
678 btrfs_mark_buffer_dirty(leaf
);
680 btrfs_release_path(path
);
682 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
683 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
684 sizeof(*qgroup_limit
));
685 if (ret
&& ret
!= -EEXIST
)
688 leaf
= path
->nodes
[0];
689 qgroup_limit
= btrfs_item_ptr(leaf
, path
->slots
[0],
690 struct btrfs_qgroup_limit_item
);
691 btrfs_set_qgroup_limit_flags(leaf
, qgroup_limit
, 0);
692 btrfs_set_qgroup_limit_max_rfer(leaf
, qgroup_limit
, 0);
693 btrfs_set_qgroup_limit_max_excl(leaf
, qgroup_limit
, 0);
694 btrfs_set_qgroup_limit_rsv_rfer(leaf
, qgroup_limit
, 0);
695 btrfs_set_qgroup_limit_rsv_excl(leaf
, qgroup_limit
, 0);
697 btrfs_mark_buffer_dirty(leaf
);
701 btrfs_free_path(path
);
705 static int del_qgroup_item(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
708 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
709 struct btrfs_path
*path
;
710 struct btrfs_key key
;
712 path
= btrfs_alloc_path();
717 key
.type
= BTRFS_QGROUP_INFO_KEY
;
718 key
.offset
= qgroupid
;
719 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
728 ret
= btrfs_del_item(trans
, quota_root
, path
);
732 btrfs_release_path(path
);
734 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
735 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
744 ret
= btrfs_del_item(trans
, quota_root
, path
);
747 btrfs_free_path(path
);
751 static int update_qgroup_limit_item(struct btrfs_trans_handle
*trans
,
752 struct btrfs_qgroup
*qgroup
)
754 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
755 struct btrfs_path
*path
;
756 struct btrfs_key key
;
757 struct extent_buffer
*l
;
758 struct btrfs_qgroup_limit_item
*qgroup_limit
;
763 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
764 key
.offset
= qgroup
->qgroupid
;
766 path
= btrfs_alloc_path();
770 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
778 slot
= path
->slots
[0];
779 qgroup_limit
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_limit_item
);
780 btrfs_set_qgroup_limit_flags(l
, qgroup_limit
, qgroup
->lim_flags
);
781 btrfs_set_qgroup_limit_max_rfer(l
, qgroup_limit
, qgroup
->max_rfer
);
782 btrfs_set_qgroup_limit_max_excl(l
, qgroup_limit
, qgroup
->max_excl
);
783 btrfs_set_qgroup_limit_rsv_rfer(l
, qgroup_limit
, qgroup
->rsv_rfer
);
784 btrfs_set_qgroup_limit_rsv_excl(l
, qgroup_limit
, qgroup
->rsv_excl
);
786 btrfs_mark_buffer_dirty(l
);
789 btrfs_free_path(path
);
793 static int update_qgroup_info_item(struct btrfs_trans_handle
*trans
,
794 struct btrfs_qgroup
*qgroup
)
796 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
797 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
798 struct btrfs_path
*path
;
799 struct btrfs_key key
;
800 struct extent_buffer
*l
;
801 struct btrfs_qgroup_info_item
*qgroup_info
;
805 if (btrfs_is_testing(fs_info
))
809 key
.type
= BTRFS_QGROUP_INFO_KEY
;
810 key
.offset
= qgroup
->qgroupid
;
812 path
= btrfs_alloc_path();
816 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
824 slot
= path
->slots
[0];
825 qgroup_info
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_info_item
);
826 btrfs_set_qgroup_info_generation(l
, qgroup_info
, trans
->transid
);
827 btrfs_set_qgroup_info_rfer(l
, qgroup_info
, qgroup
->rfer
);
828 btrfs_set_qgroup_info_rfer_cmpr(l
, qgroup_info
, qgroup
->rfer_cmpr
);
829 btrfs_set_qgroup_info_excl(l
, qgroup_info
, qgroup
->excl
);
830 btrfs_set_qgroup_info_excl_cmpr(l
, qgroup_info
, qgroup
->excl_cmpr
);
832 btrfs_mark_buffer_dirty(l
);
835 btrfs_free_path(path
);
839 static int update_qgroup_status_item(struct btrfs_trans_handle
*trans
)
841 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
842 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
843 struct btrfs_path
*path
;
844 struct btrfs_key key
;
845 struct extent_buffer
*l
;
846 struct btrfs_qgroup_status_item
*ptr
;
851 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
854 path
= btrfs_alloc_path();
858 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
866 slot
= path
->slots
[0];
867 ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_status_item
);
868 btrfs_set_qgroup_status_flags(l
, ptr
, fs_info
->qgroup_flags
);
869 btrfs_set_qgroup_status_generation(l
, ptr
, trans
->transid
);
870 btrfs_set_qgroup_status_rescan(l
, ptr
,
871 fs_info
->qgroup_rescan_progress
.objectid
);
873 btrfs_mark_buffer_dirty(l
);
876 btrfs_free_path(path
);
881 * called with qgroup_lock held
883 static int btrfs_clean_quota_tree(struct btrfs_trans_handle
*trans
,
884 struct btrfs_root
*root
)
886 struct btrfs_path
*path
;
887 struct btrfs_key key
;
888 struct extent_buffer
*leaf
= NULL
;
892 path
= btrfs_alloc_path();
896 path
->leave_spinning
= 1;
903 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
906 leaf
= path
->nodes
[0];
907 nr
= btrfs_header_nritems(leaf
);
911 * delete the leaf one by one
912 * since the whole tree is going
916 ret
= btrfs_del_items(trans
, root
, path
, 0, nr
);
920 btrfs_release_path(path
);
924 btrfs_free_path(path
);
928 int btrfs_quota_enable(struct btrfs_fs_info
*fs_info
)
930 struct btrfs_root
*quota_root
;
931 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
932 struct btrfs_path
*path
= NULL
;
933 struct btrfs_qgroup_status_item
*ptr
;
934 struct extent_buffer
*leaf
;
935 struct btrfs_key key
;
936 struct btrfs_key found_key
;
937 struct btrfs_qgroup
*qgroup
= NULL
;
938 struct btrfs_trans_handle
*trans
= NULL
;
942 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
943 if (fs_info
->quota_root
)
946 fs_info
->qgroup_ulist
= ulist_alloc(GFP_KERNEL
);
947 if (!fs_info
->qgroup_ulist
) {
952 ret
= btrfs_sysfs_add_qgroups(fs_info
);
956 * 1 for quota root item
957 * 1 for BTRFS_QGROUP_STATUS item
959 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
960 * per subvolume. However those are not currently reserved since it
961 * would be a lot of overkill.
963 trans
= btrfs_start_transaction(tree_root
, 2);
965 ret
= PTR_ERR(trans
);
971 * initially create the quota tree
973 quota_root
= btrfs_create_tree(trans
, BTRFS_QUOTA_TREE_OBJECTID
);
974 if (IS_ERR(quota_root
)) {
975 ret
= PTR_ERR(quota_root
);
976 btrfs_abort_transaction(trans
, ret
);
980 path
= btrfs_alloc_path();
983 btrfs_abort_transaction(trans
, ret
);
988 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
991 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
994 btrfs_abort_transaction(trans
, ret
);
998 leaf
= path
->nodes
[0];
999 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0],
1000 struct btrfs_qgroup_status_item
);
1001 btrfs_set_qgroup_status_generation(leaf
, ptr
, trans
->transid
);
1002 btrfs_set_qgroup_status_version(leaf
, ptr
, BTRFS_QGROUP_STATUS_VERSION
);
1003 fs_info
->qgroup_flags
= BTRFS_QGROUP_STATUS_FLAG_ON
|
1004 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1005 btrfs_set_qgroup_status_flags(leaf
, ptr
, fs_info
->qgroup_flags
);
1006 btrfs_set_qgroup_status_rescan(leaf
, ptr
, 0);
1008 btrfs_mark_buffer_dirty(leaf
);
1011 key
.type
= BTRFS_ROOT_REF_KEY
;
1014 btrfs_release_path(path
);
1015 ret
= btrfs_search_slot_for_read(tree_root
, &key
, path
, 1, 0);
1019 btrfs_abort_transaction(trans
, ret
);
1024 slot
= path
->slots
[0];
1025 leaf
= path
->nodes
[0];
1026 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1028 if (found_key
.type
== BTRFS_ROOT_REF_KEY
) {
1029 ret
= add_qgroup_item(trans
, quota_root
,
1032 btrfs_abort_transaction(trans
, ret
);
1036 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
1037 if (IS_ERR(qgroup
)) {
1038 ret
= PTR_ERR(qgroup
);
1039 btrfs_abort_transaction(trans
, ret
);
1042 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, qgroup
);
1044 btrfs_abort_transaction(trans
, ret
);
1048 ret
= btrfs_next_item(tree_root
, path
);
1050 btrfs_abort_transaction(trans
, ret
);
1058 btrfs_release_path(path
);
1059 ret
= add_qgroup_item(trans
, quota_root
, BTRFS_FS_TREE_OBJECTID
);
1061 btrfs_abort_transaction(trans
, ret
);
1065 qgroup
= add_qgroup_rb(fs_info
, BTRFS_FS_TREE_OBJECTID
);
1066 if (IS_ERR(qgroup
)) {
1067 ret
= PTR_ERR(qgroup
);
1068 btrfs_abort_transaction(trans
, ret
);
1071 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, qgroup
);
1073 btrfs_abort_transaction(trans
, ret
);
1077 ret
= btrfs_commit_transaction(trans
);
1083 * Set quota enabled flag after committing the transaction, to avoid
1084 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1087 spin_lock(&fs_info
->qgroup_lock
);
1088 fs_info
->quota_root
= quota_root
;
1089 set_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1090 spin_unlock(&fs_info
->qgroup_lock
);
1092 ret
= qgroup_rescan_init(fs_info
, 0, 1);
1094 qgroup_rescan_zero_tracking(fs_info
);
1095 fs_info
->qgroup_rescan_running
= true;
1096 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
1097 &fs_info
->qgroup_rescan_work
);
1101 btrfs_free_path(path
);
1104 btrfs_put_root(quota_root
);
1107 ulist_free(fs_info
->qgroup_ulist
);
1108 fs_info
->qgroup_ulist
= NULL
;
1110 btrfs_end_transaction(trans
);
1111 btrfs_sysfs_del_qgroups(fs_info
);
1113 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1117 int btrfs_quota_disable(struct btrfs_fs_info
*fs_info
)
1119 struct btrfs_root
*quota_root
;
1120 struct btrfs_trans_handle
*trans
= NULL
;
1123 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1124 if (!fs_info
->quota_root
)
1128 * 1 For the root item
1130 * We should also reserve enough items for the quota tree deletion in
1131 * btrfs_clean_quota_tree but this is not done.
1133 trans
= btrfs_start_transaction(fs_info
->tree_root
, 1);
1134 if (IS_ERR(trans
)) {
1135 ret
= PTR_ERR(trans
);
1139 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1140 btrfs_qgroup_wait_for_completion(fs_info
, false);
1141 spin_lock(&fs_info
->qgroup_lock
);
1142 quota_root
= fs_info
->quota_root
;
1143 fs_info
->quota_root
= NULL
;
1144 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
1145 spin_unlock(&fs_info
->qgroup_lock
);
1147 btrfs_free_qgroup_config(fs_info
);
1149 ret
= btrfs_clean_quota_tree(trans
, quota_root
);
1151 btrfs_abort_transaction(trans
, ret
);
1155 ret
= btrfs_del_root(trans
, "a_root
->root_key
);
1157 btrfs_abort_transaction(trans
, ret
);
1161 list_del("a_root
->dirty_list
);
1163 btrfs_tree_lock(quota_root
->node
);
1164 btrfs_clean_tree_block(quota_root
->node
);
1165 btrfs_tree_unlock(quota_root
->node
);
1166 btrfs_free_tree_block(trans
, quota_root
, quota_root
->node
, 0, 1);
1168 btrfs_put_root(quota_root
);
1171 ret
= btrfs_end_transaction(trans
);
1173 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1177 static void qgroup_dirty(struct btrfs_fs_info
*fs_info
,
1178 struct btrfs_qgroup
*qgroup
)
1180 if (list_empty(&qgroup
->dirty
))
1181 list_add(&qgroup
->dirty
, &fs_info
->dirty_qgroups
);
1185 * The easy accounting, we're updating qgroup relationship whose child qgroup
1186 * only has exclusive extents.
1188 * In this case, all exclusive extents will also be exclusive for parent, so
1189 * excl/rfer just get added/removed.
1191 * So is qgroup reservation space, which should also be added/removed to
1193 * Or when child tries to release reservation space, parent will underflow its
1194 * reservation (for relationship adding case).
1196 * Caller should hold fs_info->qgroup_lock.
1198 static int __qgroup_excl_accounting(struct btrfs_fs_info
*fs_info
,
1199 struct ulist
*tmp
, u64 ref_root
,
1200 struct btrfs_qgroup
*src
, int sign
)
1202 struct btrfs_qgroup
*qgroup
;
1203 struct btrfs_qgroup_list
*glist
;
1204 struct ulist_node
*unode
;
1205 struct ulist_iterator uiter
;
1206 u64 num_bytes
= src
->excl
;
1209 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
1213 qgroup
->rfer
+= sign
* num_bytes
;
1214 qgroup
->rfer_cmpr
+= sign
* num_bytes
;
1216 WARN_ON(sign
< 0 && qgroup
->excl
< num_bytes
);
1217 qgroup
->excl
+= sign
* num_bytes
;
1218 qgroup
->excl_cmpr
+= sign
* num_bytes
;
1221 qgroup_rsv_add_by_qgroup(fs_info
, qgroup
, src
);
1223 qgroup_rsv_release_by_qgroup(fs_info
, qgroup
, src
);
1225 qgroup_dirty(fs_info
, qgroup
);
1227 /* Get all of the parent groups that contain this qgroup */
1228 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1229 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1230 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
1235 /* Iterate all of the parents and adjust their reference counts */
1236 ULIST_ITER_INIT(&uiter
);
1237 while ((unode
= ulist_next(tmp
, &uiter
))) {
1238 qgroup
= unode_aux_to_qgroup(unode
);
1239 qgroup
->rfer
+= sign
* num_bytes
;
1240 qgroup
->rfer_cmpr
+= sign
* num_bytes
;
1241 WARN_ON(sign
< 0 && qgroup
->excl
< num_bytes
);
1242 qgroup
->excl
+= sign
* num_bytes
;
1244 qgroup_rsv_add_by_qgroup(fs_info
, qgroup
, src
);
1246 qgroup_rsv_release_by_qgroup(fs_info
, qgroup
, src
);
1247 qgroup
->excl_cmpr
+= sign
* num_bytes
;
1248 qgroup_dirty(fs_info
, qgroup
);
1250 /* Add any parents of the parents */
1251 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1252 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1253 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
1265 * Quick path for updating qgroup with only excl refs.
1267 * In that case, just update all parent will be enough.
1268 * Or we needs to do a full rescan.
1269 * Caller should also hold fs_info->qgroup_lock.
1271 * Return 0 for quick update, return >0 for need to full rescan
1272 * and mark INCONSISTENT flag.
1273 * Return < 0 for other error.
1275 static int quick_update_accounting(struct btrfs_fs_info
*fs_info
,
1276 struct ulist
*tmp
, u64 src
, u64 dst
,
1279 struct btrfs_qgroup
*qgroup
;
1283 qgroup
= find_qgroup_rb(fs_info
, src
);
1286 if (qgroup
->excl
== qgroup
->rfer
) {
1288 err
= __qgroup_excl_accounting(fs_info
, tmp
, dst
,
1297 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1301 int btrfs_add_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1304 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1305 struct btrfs_qgroup
*parent
;
1306 struct btrfs_qgroup
*member
;
1307 struct btrfs_qgroup_list
*list
;
1311 /* Check the level of src and dst first */
1312 if (btrfs_qgroup_level(src
) >= btrfs_qgroup_level(dst
))
1315 tmp
= ulist_alloc(GFP_KERNEL
);
1319 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1320 if (!fs_info
->quota_root
) {
1324 member
= find_qgroup_rb(fs_info
, src
);
1325 parent
= find_qgroup_rb(fs_info
, dst
);
1326 if (!member
|| !parent
) {
1331 /* check if such qgroup relation exist firstly */
1332 list_for_each_entry(list
, &member
->groups
, next_group
) {
1333 if (list
->group
== parent
) {
1339 ret
= add_qgroup_relation_item(trans
, src
, dst
);
1343 ret
= add_qgroup_relation_item(trans
, dst
, src
);
1345 del_qgroup_relation_item(trans
, src
, dst
);
1349 spin_lock(&fs_info
->qgroup_lock
);
1350 ret
= add_relation_rb(fs_info
, src
, dst
);
1352 spin_unlock(&fs_info
->qgroup_lock
);
1355 ret
= quick_update_accounting(fs_info
, tmp
, src
, dst
, 1);
1356 spin_unlock(&fs_info
->qgroup_lock
);
1358 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1363 static int __del_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1366 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1367 struct btrfs_qgroup
*parent
;
1368 struct btrfs_qgroup
*member
;
1369 struct btrfs_qgroup_list
*list
;
1375 tmp
= ulist_alloc(GFP_KERNEL
);
1379 if (!fs_info
->quota_root
) {
1384 member
= find_qgroup_rb(fs_info
, src
);
1385 parent
= find_qgroup_rb(fs_info
, dst
);
1387 * The parent/member pair doesn't exist, then try to delete the dead
1388 * relation items only.
1390 if (!member
|| !parent
)
1393 /* check if such qgroup relation exist firstly */
1394 list_for_each_entry(list
, &member
->groups
, next_group
) {
1395 if (list
->group
== parent
) {
1402 ret
= del_qgroup_relation_item(trans
, src
, dst
);
1403 if (ret
< 0 && ret
!= -ENOENT
)
1405 ret2
= del_qgroup_relation_item(trans
, dst
, src
);
1406 if (ret2
< 0 && ret2
!= -ENOENT
)
1409 /* At least one deletion succeeded, return 0 */
1414 spin_lock(&fs_info
->qgroup_lock
);
1415 del_relation_rb(fs_info
, src
, dst
);
1416 ret
= quick_update_accounting(fs_info
, tmp
, src
, dst
, -1);
1417 spin_unlock(&fs_info
->qgroup_lock
);
1424 int btrfs_del_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1427 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1430 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1431 ret
= __del_qgroup_relation(trans
, src
, dst
);
1432 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1437 int btrfs_create_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
1439 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1440 struct btrfs_root
*quota_root
;
1441 struct btrfs_qgroup
*qgroup
;
1444 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1445 if (!fs_info
->quota_root
) {
1449 quota_root
= fs_info
->quota_root
;
1450 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1456 ret
= add_qgroup_item(trans
, quota_root
, qgroupid
);
1460 spin_lock(&fs_info
->qgroup_lock
);
1461 qgroup
= add_qgroup_rb(fs_info
, qgroupid
);
1462 spin_unlock(&fs_info
->qgroup_lock
);
1464 if (IS_ERR(qgroup
)) {
1465 ret
= PTR_ERR(qgroup
);
1468 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, qgroup
);
1470 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1474 int btrfs_remove_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
1476 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1477 struct btrfs_qgroup
*qgroup
;
1478 struct btrfs_qgroup_list
*list
;
1481 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1482 if (!fs_info
->quota_root
) {
1487 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1493 /* Check if there are no children of this qgroup */
1494 if (!list_empty(&qgroup
->members
)) {
1499 ret
= del_qgroup_item(trans
, qgroupid
);
1500 if (ret
&& ret
!= -ENOENT
)
1503 while (!list_empty(&qgroup
->groups
)) {
1504 list
= list_first_entry(&qgroup
->groups
,
1505 struct btrfs_qgroup_list
, next_group
);
1506 ret
= __del_qgroup_relation(trans
, qgroupid
,
1507 list
->group
->qgroupid
);
1512 spin_lock(&fs_info
->qgroup_lock
);
1513 del_qgroup_rb(fs_info
, qgroupid
);
1514 spin_unlock(&fs_info
->qgroup_lock
);
1516 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1520 int btrfs_limit_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
,
1521 struct btrfs_qgroup_limit
*limit
)
1523 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1524 struct btrfs_qgroup
*qgroup
;
1526 /* Sometimes we would want to clear the limit on this qgroup.
1527 * To meet this requirement, we treat the -1 as a special value
1528 * which tell kernel to clear the limit on this qgroup.
1530 const u64 CLEAR_VALUE
= -1;
1532 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1533 if (!fs_info
->quota_root
) {
1538 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1544 spin_lock(&fs_info
->qgroup_lock
);
1545 if (limit
->flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) {
1546 if (limit
->max_rfer
== CLEAR_VALUE
) {
1547 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_MAX_RFER
;
1548 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_MAX_RFER
;
1549 qgroup
->max_rfer
= 0;
1551 qgroup
->max_rfer
= limit
->max_rfer
;
1554 if (limit
->flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) {
1555 if (limit
->max_excl
== CLEAR_VALUE
) {
1556 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_MAX_EXCL
;
1557 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_MAX_EXCL
;
1558 qgroup
->max_excl
= 0;
1560 qgroup
->max_excl
= limit
->max_excl
;
1563 if (limit
->flags
& BTRFS_QGROUP_LIMIT_RSV_RFER
) {
1564 if (limit
->rsv_rfer
== CLEAR_VALUE
) {
1565 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_RSV_RFER
;
1566 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_RSV_RFER
;
1567 qgroup
->rsv_rfer
= 0;
1569 qgroup
->rsv_rfer
= limit
->rsv_rfer
;
1572 if (limit
->flags
& BTRFS_QGROUP_LIMIT_RSV_EXCL
) {
1573 if (limit
->rsv_excl
== CLEAR_VALUE
) {
1574 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_RSV_EXCL
;
1575 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_RSV_EXCL
;
1576 qgroup
->rsv_excl
= 0;
1578 qgroup
->rsv_excl
= limit
->rsv_excl
;
1581 qgroup
->lim_flags
|= limit
->flags
;
1583 spin_unlock(&fs_info
->qgroup_lock
);
1585 ret
= update_qgroup_limit_item(trans
, qgroup
);
1587 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1588 btrfs_info(fs_info
, "unable to update quota limit for %llu",
1593 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1597 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info
*fs_info
,
1598 struct btrfs_delayed_ref_root
*delayed_refs
,
1599 struct btrfs_qgroup_extent_record
*record
)
1601 struct rb_node
**p
= &delayed_refs
->dirty_extent_root
.rb_node
;
1602 struct rb_node
*parent_node
= NULL
;
1603 struct btrfs_qgroup_extent_record
*entry
;
1604 u64 bytenr
= record
->bytenr
;
1606 lockdep_assert_held(&delayed_refs
->lock
);
1607 trace_btrfs_qgroup_trace_extent(fs_info
, record
);
1611 entry
= rb_entry(parent_node
, struct btrfs_qgroup_extent_record
,
1613 if (bytenr
< entry
->bytenr
) {
1615 } else if (bytenr
> entry
->bytenr
) {
1616 p
= &(*p
)->rb_right
;
1618 if (record
->data_rsv
&& !entry
->data_rsv
) {
1619 entry
->data_rsv
= record
->data_rsv
;
1620 entry
->data_rsv_refroot
=
1621 record
->data_rsv_refroot
;
1627 rb_link_node(&record
->node
, parent_node
, p
);
1628 rb_insert_color(&record
->node
, &delayed_refs
->dirty_extent_root
);
1632 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info
*fs_info
,
1633 struct btrfs_qgroup_extent_record
*qrecord
)
1635 struct ulist
*old_root
;
1636 u64 bytenr
= qrecord
->bytenr
;
1639 ret
= btrfs_find_all_roots(NULL
, fs_info
, bytenr
, 0, &old_root
, false);
1641 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1643 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1649 * Here we don't need to get the lock of
1650 * trans->transaction->delayed_refs, since inserted qrecord won't
1651 * be deleted, only qrecord->node may be modified (new qrecord insert)
1653 * So modifying qrecord->old_roots is safe here
1655 qrecord
->old_roots
= old_root
;
1659 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle
*trans
, u64 bytenr
,
1660 u64 num_bytes
, gfp_t gfp_flag
)
1662 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1663 struct btrfs_qgroup_extent_record
*record
;
1664 struct btrfs_delayed_ref_root
*delayed_refs
;
1667 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)
1668 || bytenr
== 0 || num_bytes
== 0)
1670 record
= kzalloc(sizeof(*record
), gfp_flag
);
1674 delayed_refs
= &trans
->transaction
->delayed_refs
;
1675 record
->bytenr
= bytenr
;
1676 record
->num_bytes
= num_bytes
;
1677 record
->old_roots
= NULL
;
1679 spin_lock(&delayed_refs
->lock
);
1680 ret
= btrfs_qgroup_trace_extent_nolock(fs_info
, delayed_refs
, record
);
1681 spin_unlock(&delayed_refs
->lock
);
1686 return btrfs_qgroup_trace_extent_post(fs_info
, record
);
1689 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle
*trans
,
1690 struct extent_buffer
*eb
)
1692 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1693 int nr
= btrfs_header_nritems(eb
);
1694 int i
, extent_type
, ret
;
1695 struct btrfs_key key
;
1696 struct btrfs_file_extent_item
*fi
;
1697 u64 bytenr
, num_bytes
;
1699 /* We can be called directly from walk_up_proc() */
1700 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
1703 for (i
= 0; i
< nr
; i
++) {
1704 btrfs_item_key_to_cpu(eb
, &key
, i
);
1706 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1709 fi
= btrfs_item_ptr(eb
, i
, struct btrfs_file_extent_item
);
1710 /* filter out non qgroup-accountable extents */
1711 extent_type
= btrfs_file_extent_type(eb
, fi
);
1713 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
1716 bytenr
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1720 num_bytes
= btrfs_file_extent_disk_num_bytes(eb
, fi
);
1722 ret
= btrfs_qgroup_trace_extent(trans
, bytenr
, num_bytes
,
1732 * Walk up the tree from the bottom, freeing leaves and any interior
1733 * nodes which have had all slots visited. If a node (leaf or
1734 * interior) is freed, the node above it will have it's slot
1735 * incremented. The root node will never be freed.
1737 * At the end of this function, we should have a path which has all
1738 * slots incremented to the next position for a search. If we need to
1739 * read a new node it will be NULL and the node above it will have the
1740 * correct slot selected for a later read.
1742 * If we increment the root nodes slot counter past the number of
1743 * elements, 1 is returned to signal completion of the search.
1745 static int adjust_slots_upwards(struct btrfs_path
*path
, int root_level
)
1749 struct extent_buffer
*eb
;
1751 if (root_level
== 0)
1754 while (level
<= root_level
) {
1755 eb
= path
->nodes
[level
];
1756 nr
= btrfs_header_nritems(eb
);
1757 path
->slots
[level
]++;
1758 slot
= path
->slots
[level
];
1759 if (slot
>= nr
|| level
== 0) {
1761 * Don't free the root - we will detect this
1762 * condition after our loop and return a
1763 * positive value for caller to stop walking the tree.
1765 if (level
!= root_level
) {
1766 btrfs_tree_unlock_rw(eb
, path
->locks
[level
]);
1767 path
->locks
[level
] = 0;
1769 free_extent_buffer(eb
);
1770 path
->nodes
[level
] = NULL
;
1771 path
->slots
[level
] = 0;
1775 * We have a valid slot to walk back down
1776 * from. Stop here so caller can process these
1785 eb
= path
->nodes
[root_level
];
1786 if (path
->slots
[root_level
] >= btrfs_header_nritems(eb
))
1793 * Helper function to trace a subtree tree block swap.
1795 * The swap will happen in highest tree block, but there may be a lot of
1796 * tree blocks involved.
1799 * OO = Old tree blocks
1800 * NN = New tree blocks allocated during balance
1802 * File tree (257) Reloc tree for 257
1805 * L1 OO OO (a) OO NN (a)
1807 * L0 OO OO OO OO OO OO NN NN
1810 * When calling qgroup_trace_extent_swap(), we will pass:
1812 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
1816 * In that case, qgroup_trace_extent_swap() will search from OO(a) to
1817 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
1819 * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
1821 * 1) Tree search from @src_eb
1822 * It should acts as a simplified btrfs_search_slot().
1823 * The key for search can be extracted from @dst_path->nodes[dst_level]
1826 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
1827 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1828 * They should be marked during previous (@dst_level = 1) iteration.
1830 * 3) Mark file extents in leaves dirty
1831 * We don't have good way to pick out new file extents only.
1832 * So we still follow the old method by scanning all file extents in
1835 * This function can free us from keeping two paths, thus later we only need
1836 * to care about how to iterate all new tree blocks in reloc tree.
1838 static int qgroup_trace_extent_swap(struct btrfs_trans_handle
* trans
,
1839 struct extent_buffer
*src_eb
,
1840 struct btrfs_path
*dst_path
,
1841 int dst_level
, int root_level
,
1844 struct btrfs_key key
;
1845 struct btrfs_path
*src_path
;
1846 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1847 u32 nodesize
= fs_info
->nodesize
;
1848 int cur_level
= root_level
;
1851 BUG_ON(dst_level
> root_level
);
1852 /* Level mismatch */
1853 if (btrfs_header_level(src_eb
) != root_level
)
1856 src_path
= btrfs_alloc_path();
1863 btrfs_node_key_to_cpu(dst_path
->nodes
[dst_level
], &key
, 0);
1865 btrfs_item_key_to_cpu(dst_path
->nodes
[dst_level
], &key
, 0);
1868 atomic_inc(&src_eb
->refs
);
1869 src_path
->nodes
[root_level
] = src_eb
;
1870 src_path
->slots
[root_level
] = dst_path
->slots
[root_level
];
1871 src_path
->locks
[root_level
] = 0;
1873 /* A simplified version of btrfs_search_slot() */
1874 while (cur_level
>= dst_level
) {
1875 struct btrfs_key src_key
;
1876 struct btrfs_key dst_key
;
1878 if (src_path
->nodes
[cur_level
] == NULL
) {
1879 struct btrfs_key first_key
;
1880 struct extent_buffer
*eb
;
1885 eb
= src_path
->nodes
[cur_level
+ 1];
1886 parent_slot
= src_path
->slots
[cur_level
+ 1];
1887 child_bytenr
= btrfs_node_blockptr(eb
, parent_slot
);
1888 child_gen
= btrfs_node_ptr_generation(eb
, parent_slot
);
1889 btrfs_node_key_to_cpu(eb
, &first_key
, parent_slot
);
1891 eb
= read_tree_block(fs_info
, child_bytenr
, child_gen
,
1892 cur_level
, &first_key
);
1896 } else if (!extent_buffer_uptodate(eb
)) {
1897 free_extent_buffer(eb
);
1902 src_path
->nodes
[cur_level
] = eb
;
1904 btrfs_tree_read_lock(eb
);
1905 btrfs_set_lock_blocking_read(eb
);
1906 src_path
->locks
[cur_level
] = BTRFS_READ_LOCK_BLOCKING
;
1909 src_path
->slots
[cur_level
] = dst_path
->slots
[cur_level
];
1911 btrfs_node_key_to_cpu(dst_path
->nodes
[cur_level
],
1912 &dst_key
, dst_path
->slots
[cur_level
]);
1913 btrfs_node_key_to_cpu(src_path
->nodes
[cur_level
],
1914 &src_key
, src_path
->slots
[cur_level
]);
1916 btrfs_item_key_to_cpu(dst_path
->nodes
[cur_level
],
1917 &dst_key
, dst_path
->slots
[cur_level
]);
1918 btrfs_item_key_to_cpu(src_path
->nodes
[cur_level
],
1919 &src_key
, src_path
->slots
[cur_level
]);
1921 /* Content mismatch, something went wrong */
1922 if (btrfs_comp_cpu_keys(&dst_key
, &src_key
)) {
1930 * Now both @dst_path and @src_path have been populated, record the tree
1931 * blocks for qgroup accounting.
1933 ret
= btrfs_qgroup_trace_extent(trans
, src_path
->nodes
[dst_level
]->start
,
1934 nodesize
, GFP_NOFS
);
1937 ret
= btrfs_qgroup_trace_extent(trans
,
1938 dst_path
->nodes
[dst_level
]->start
,
1939 nodesize
, GFP_NOFS
);
1943 /* Record leaf file extents */
1944 if (dst_level
== 0 && trace_leaf
) {
1945 ret
= btrfs_qgroup_trace_leaf_items(trans
, src_path
->nodes
[0]);
1948 ret
= btrfs_qgroup_trace_leaf_items(trans
, dst_path
->nodes
[0]);
1951 btrfs_free_path(src_path
);
1956 * Helper function to do recursive generation-aware depth-first search, to
1957 * locate all new tree blocks in a subtree of reloc tree.
1959 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
1968 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
1972 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
1973 * above tree blocks along with their counter parts in file tree.
1974 * While during search, old tree blocks OO(c) will be skipped as tree block swap
1975 * won't affect OO(c).
1977 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle
* trans
,
1978 struct extent_buffer
*src_eb
,
1979 struct btrfs_path
*dst_path
,
1980 int cur_level
, int root_level
,
1981 u64 last_snapshot
, bool trace_leaf
)
1983 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1984 struct extent_buffer
*eb
;
1985 bool need_cleanup
= false;
1989 /* Level sanity check */
1990 if (cur_level
< 0 || cur_level
>= BTRFS_MAX_LEVEL
- 1 ||
1991 root_level
< 0 || root_level
>= BTRFS_MAX_LEVEL
- 1 ||
1992 root_level
< cur_level
) {
1993 btrfs_err_rl(fs_info
,
1994 "%s: bad levels, cur_level=%d root_level=%d",
1995 __func__
, cur_level
, root_level
);
1999 /* Read the tree block if needed */
2000 if (dst_path
->nodes
[cur_level
] == NULL
) {
2001 struct btrfs_key first_key
;
2007 * dst_path->nodes[root_level] must be initialized before
2008 * calling this function.
2010 if (cur_level
== root_level
) {
2011 btrfs_err_rl(fs_info
,
2012 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2013 __func__
, root_level
, root_level
, cur_level
);
2018 * We need to get child blockptr/gen from parent before we can
2021 eb
= dst_path
->nodes
[cur_level
+ 1];
2022 parent_slot
= dst_path
->slots
[cur_level
+ 1];
2023 child_bytenr
= btrfs_node_blockptr(eb
, parent_slot
);
2024 child_gen
= btrfs_node_ptr_generation(eb
, parent_slot
);
2025 btrfs_node_key_to_cpu(eb
, &first_key
, parent_slot
);
2027 /* This node is old, no need to trace */
2028 if (child_gen
< last_snapshot
)
2031 eb
= read_tree_block(fs_info
, child_bytenr
, child_gen
,
2032 cur_level
, &first_key
);
2036 } else if (!extent_buffer_uptodate(eb
)) {
2037 free_extent_buffer(eb
);
2042 dst_path
->nodes
[cur_level
] = eb
;
2043 dst_path
->slots
[cur_level
] = 0;
2045 btrfs_tree_read_lock(eb
);
2046 btrfs_set_lock_blocking_read(eb
);
2047 dst_path
->locks
[cur_level
] = BTRFS_READ_LOCK_BLOCKING
;
2048 need_cleanup
= true;
2051 /* Now record this tree block and its counter part for qgroups */
2052 ret
= qgroup_trace_extent_swap(trans
, src_eb
, dst_path
, cur_level
,
2053 root_level
, trace_leaf
);
2057 eb
= dst_path
->nodes
[cur_level
];
2059 if (cur_level
> 0) {
2060 /* Iterate all child tree blocks */
2061 for (i
= 0; i
< btrfs_header_nritems(eb
); i
++) {
2062 /* Skip old tree blocks as they won't be swapped */
2063 if (btrfs_node_ptr_generation(eb
, i
) < last_snapshot
)
2065 dst_path
->slots
[cur_level
] = i
;
2067 /* Recursive call (at most 7 times) */
2068 ret
= qgroup_trace_new_subtree_blocks(trans
, src_eb
,
2069 dst_path
, cur_level
- 1, root_level
,
2070 last_snapshot
, trace_leaf
);
2079 btrfs_tree_unlock_rw(dst_path
->nodes
[cur_level
],
2080 dst_path
->locks
[cur_level
]);
2081 free_extent_buffer(dst_path
->nodes
[cur_level
]);
2082 dst_path
->nodes
[cur_level
] = NULL
;
2083 dst_path
->slots
[cur_level
] = 0;
2084 dst_path
->locks
[cur_level
] = 0;
2090 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle
*trans
,
2091 struct extent_buffer
*src_eb
,
2092 struct extent_buffer
*dst_eb
,
2093 u64 last_snapshot
, bool trace_leaf
)
2095 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2096 struct btrfs_path
*dst_path
= NULL
;
2100 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2103 /* Wrong parameter order */
2104 if (btrfs_header_generation(src_eb
) > btrfs_header_generation(dst_eb
)) {
2105 btrfs_err_rl(fs_info
,
2106 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__
,
2107 btrfs_header_generation(src_eb
),
2108 btrfs_header_generation(dst_eb
));
2112 if (!extent_buffer_uptodate(src_eb
) || !extent_buffer_uptodate(dst_eb
)) {
2117 level
= btrfs_header_level(dst_eb
);
2118 dst_path
= btrfs_alloc_path();
2124 atomic_inc(&dst_eb
->refs
);
2125 dst_path
->nodes
[level
] = dst_eb
;
2126 dst_path
->slots
[level
] = 0;
2127 dst_path
->locks
[level
] = 0;
2129 /* Do the generation aware breadth-first search */
2130 ret
= qgroup_trace_new_subtree_blocks(trans
, src_eb
, dst_path
, level
,
2131 level
, last_snapshot
, trace_leaf
);
2137 btrfs_free_path(dst_path
);
2139 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2143 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle
*trans
,
2144 struct extent_buffer
*root_eb
,
2145 u64 root_gen
, int root_level
)
2147 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2150 struct extent_buffer
*eb
= root_eb
;
2151 struct btrfs_path
*path
= NULL
;
2153 BUG_ON(root_level
< 0 || root_level
>= BTRFS_MAX_LEVEL
);
2154 BUG_ON(root_eb
== NULL
);
2156 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2159 if (!extent_buffer_uptodate(root_eb
)) {
2160 ret
= btrfs_read_buffer(root_eb
, root_gen
, root_level
, NULL
);
2165 if (root_level
== 0) {
2166 ret
= btrfs_qgroup_trace_leaf_items(trans
, root_eb
);
2170 path
= btrfs_alloc_path();
2175 * Walk down the tree. Missing extent blocks are filled in as
2176 * we go. Metadata is accounted every time we read a new
2179 * When we reach a leaf, we account for file extent items in it,
2180 * walk back up the tree (adjusting slot pointers as we go)
2181 * and restart the search process.
2183 atomic_inc(&root_eb
->refs
); /* For path */
2184 path
->nodes
[root_level
] = root_eb
;
2185 path
->slots
[root_level
] = 0;
2186 path
->locks
[root_level
] = 0; /* so release_path doesn't try to unlock */
2189 while (level
>= 0) {
2190 if (path
->nodes
[level
] == NULL
) {
2191 struct btrfs_key first_key
;
2197 * We need to get child blockptr/gen from parent before
2200 eb
= path
->nodes
[level
+ 1];
2201 parent_slot
= path
->slots
[level
+ 1];
2202 child_bytenr
= btrfs_node_blockptr(eb
, parent_slot
);
2203 child_gen
= btrfs_node_ptr_generation(eb
, parent_slot
);
2204 btrfs_node_key_to_cpu(eb
, &first_key
, parent_slot
);
2206 eb
= read_tree_block(fs_info
, child_bytenr
, child_gen
,
2211 } else if (!extent_buffer_uptodate(eb
)) {
2212 free_extent_buffer(eb
);
2217 path
->nodes
[level
] = eb
;
2218 path
->slots
[level
] = 0;
2220 btrfs_tree_read_lock(eb
);
2221 btrfs_set_lock_blocking_read(eb
);
2222 path
->locks
[level
] = BTRFS_READ_LOCK_BLOCKING
;
2224 ret
= btrfs_qgroup_trace_extent(trans
, child_bytenr
,
2232 ret
= btrfs_qgroup_trace_leaf_items(trans
,
2233 path
->nodes
[level
]);
2237 /* Nonzero return here means we completed our search */
2238 ret
= adjust_slots_upwards(path
, root_level
);
2242 /* Restart search with new slots */
2251 btrfs_free_path(path
);
2256 #define UPDATE_NEW 0
2257 #define UPDATE_OLD 1
2259 * Walk all of the roots that points to the bytenr and adjust their refcnts.
2261 static int qgroup_update_refcnt(struct btrfs_fs_info
*fs_info
,
2262 struct ulist
*roots
, struct ulist
*tmp
,
2263 struct ulist
*qgroups
, u64 seq
, int update_old
)
2265 struct ulist_node
*unode
;
2266 struct ulist_iterator uiter
;
2267 struct ulist_node
*tmp_unode
;
2268 struct ulist_iterator tmp_uiter
;
2269 struct btrfs_qgroup
*qg
;
2274 ULIST_ITER_INIT(&uiter
);
2275 while ((unode
= ulist_next(roots
, &uiter
))) {
2276 qg
= find_qgroup_rb(fs_info
, unode
->val
);
2281 ret
= ulist_add(qgroups
, qg
->qgroupid
, qgroup_to_aux(qg
),
2285 ret
= ulist_add(tmp
, qg
->qgroupid
, qgroup_to_aux(qg
), GFP_ATOMIC
);
2288 ULIST_ITER_INIT(&tmp_uiter
);
2289 while ((tmp_unode
= ulist_next(tmp
, &tmp_uiter
))) {
2290 struct btrfs_qgroup_list
*glist
;
2292 qg
= unode_aux_to_qgroup(tmp_unode
);
2294 btrfs_qgroup_update_old_refcnt(qg
, seq
, 1);
2296 btrfs_qgroup_update_new_refcnt(qg
, seq
, 1);
2297 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2298 ret
= ulist_add(qgroups
, glist
->group
->qgroupid
,
2299 qgroup_to_aux(glist
->group
),
2303 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
2304 qgroup_to_aux(glist
->group
),
2315 * Update qgroup rfer/excl counters.
2316 * Rfer update is easy, codes can explain themselves.
2318 * Excl update is tricky, the update is split into 2 part.
2319 * Part 1: Possible exclusive <-> sharing detect:
2321 * -------------------------------------
2323 * -------------------------------------
2325 * -------------------------------------
2328 * A: cur_old_roots < nr_old_roots (not exclusive before)
2329 * !A: cur_old_roots == nr_old_roots (possible exclusive before)
2330 * B: cur_new_roots < nr_new_roots (not exclusive now)
2331 * !B: cur_new_roots == nr_new_roots (possible exclusive now)
2334 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing
2335 * *: Definitely not changed. **: Possible unchanged.
2337 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2339 * To make the logic clear, we first use condition A and B to split
2340 * combination into 4 results.
2342 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2343 * only on variant maybe 0.
2345 * Lastly, check result **, since there are 2 variants maybe 0, split them
2347 * But this time we don't need to consider other things, the codes and logic
2348 * is easy to understand now.
2350 static int qgroup_update_counters(struct btrfs_fs_info
*fs_info
,
2351 struct ulist
*qgroups
,
2354 u64 num_bytes
, u64 seq
)
2356 struct ulist_node
*unode
;
2357 struct ulist_iterator uiter
;
2358 struct btrfs_qgroup
*qg
;
2359 u64 cur_new_count
, cur_old_count
;
2361 ULIST_ITER_INIT(&uiter
);
2362 while ((unode
= ulist_next(qgroups
, &uiter
))) {
2365 qg
= unode_aux_to_qgroup(unode
);
2366 cur_old_count
= btrfs_qgroup_get_old_refcnt(qg
, seq
);
2367 cur_new_count
= btrfs_qgroup_get_new_refcnt(qg
, seq
);
2369 trace_qgroup_update_counters(fs_info
, qg
, cur_old_count
,
2372 /* Rfer update part */
2373 if (cur_old_count
== 0 && cur_new_count
> 0) {
2374 qg
->rfer
+= num_bytes
;
2375 qg
->rfer_cmpr
+= num_bytes
;
2378 if (cur_old_count
> 0 && cur_new_count
== 0) {
2379 qg
->rfer
-= num_bytes
;
2380 qg
->rfer_cmpr
-= num_bytes
;
2384 /* Excl update part */
2385 /* Exclusive/none -> shared case */
2386 if (cur_old_count
== nr_old_roots
&&
2387 cur_new_count
< nr_new_roots
) {
2388 /* Exclusive -> shared */
2389 if (cur_old_count
!= 0) {
2390 qg
->excl
-= num_bytes
;
2391 qg
->excl_cmpr
-= num_bytes
;
2396 /* Shared -> exclusive/none case */
2397 if (cur_old_count
< nr_old_roots
&&
2398 cur_new_count
== nr_new_roots
) {
2399 /* Shared->exclusive */
2400 if (cur_new_count
!= 0) {
2401 qg
->excl
+= num_bytes
;
2402 qg
->excl_cmpr
+= num_bytes
;
2407 /* Exclusive/none -> exclusive/none case */
2408 if (cur_old_count
== nr_old_roots
&&
2409 cur_new_count
== nr_new_roots
) {
2410 if (cur_old_count
== 0) {
2411 /* None -> exclusive/none */
2413 if (cur_new_count
!= 0) {
2414 /* None -> exclusive */
2415 qg
->excl
+= num_bytes
;
2416 qg
->excl_cmpr
+= num_bytes
;
2419 /* None -> none, nothing changed */
2421 /* Exclusive -> exclusive/none */
2423 if (cur_new_count
== 0) {
2424 /* Exclusive -> none */
2425 qg
->excl
-= num_bytes
;
2426 qg
->excl_cmpr
-= num_bytes
;
2429 /* Exclusive -> exclusive, nothing changed */
2434 qgroup_dirty(fs_info
, qg
);
2440 * Check if the @roots potentially is a list of fs tree roots
2442 * Return 0 for definitely not a fs/subvol tree roots ulist
2443 * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2446 static int maybe_fs_roots(struct ulist
*roots
)
2448 struct ulist_node
*unode
;
2449 struct ulist_iterator uiter
;
2451 /* Empty one, still possible for fs roots */
2452 if (!roots
|| roots
->nnodes
== 0)
2455 ULIST_ITER_INIT(&uiter
);
2456 unode
= ulist_next(roots
, &uiter
);
2461 * If it contains fs tree roots, then it must belong to fs/subvol
2463 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2465 return is_fstree(unode
->val
);
2468 int btrfs_qgroup_account_extent(struct btrfs_trans_handle
*trans
, u64 bytenr
,
2469 u64 num_bytes
, struct ulist
*old_roots
,
2470 struct ulist
*new_roots
)
2472 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2473 struct ulist
*qgroups
= NULL
;
2474 struct ulist
*tmp
= NULL
;
2476 u64 nr_new_roots
= 0;
2477 u64 nr_old_roots
= 0;
2481 * If quotas get disabled meanwhile, the resouces need to be freed and
2482 * we can't just exit here.
2484 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2488 if (!maybe_fs_roots(new_roots
))
2490 nr_new_roots
= new_roots
->nnodes
;
2493 if (!maybe_fs_roots(old_roots
))
2495 nr_old_roots
= old_roots
->nnodes
;
2498 /* Quick exit, either not fs tree roots, or won't affect any qgroup */
2499 if (nr_old_roots
== 0 && nr_new_roots
== 0)
2502 BUG_ON(!fs_info
->quota_root
);
2504 trace_btrfs_qgroup_account_extent(fs_info
, trans
->transid
, bytenr
,
2505 num_bytes
, nr_old_roots
, nr_new_roots
);
2507 qgroups
= ulist_alloc(GFP_NOFS
);
2512 tmp
= ulist_alloc(GFP_NOFS
);
2518 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2519 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
2520 if (fs_info
->qgroup_rescan_progress
.objectid
<= bytenr
) {
2521 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2526 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2528 spin_lock(&fs_info
->qgroup_lock
);
2529 seq
= fs_info
->qgroup_seq
;
2531 /* Update old refcnts using old_roots */
2532 ret
= qgroup_update_refcnt(fs_info
, old_roots
, tmp
, qgroups
, seq
,
2537 /* Update new refcnts using new_roots */
2538 ret
= qgroup_update_refcnt(fs_info
, new_roots
, tmp
, qgroups
, seq
,
2543 qgroup_update_counters(fs_info
, qgroups
, nr_old_roots
, nr_new_roots
,
2547 * Bump qgroup_seq to avoid seq overlap
2549 fs_info
->qgroup_seq
+= max(nr_old_roots
, nr_new_roots
) + 1;
2551 spin_unlock(&fs_info
->qgroup_lock
);
2554 ulist_free(qgroups
);
2555 ulist_free(old_roots
);
2556 ulist_free(new_roots
);
2560 int btrfs_qgroup_account_extents(struct btrfs_trans_handle
*trans
)
2562 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2563 struct btrfs_qgroup_extent_record
*record
;
2564 struct btrfs_delayed_ref_root
*delayed_refs
;
2565 struct ulist
*new_roots
= NULL
;
2566 struct rb_node
*node
;
2567 u64 num_dirty_extents
= 0;
2571 delayed_refs
= &trans
->transaction
->delayed_refs
;
2572 qgroup_to_skip
= delayed_refs
->qgroup_to_skip
;
2573 while ((node
= rb_first(&delayed_refs
->dirty_extent_root
))) {
2574 record
= rb_entry(node
, struct btrfs_qgroup_extent_record
,
2577 num_dirty_extents
++;
2578 trace_btrfs_qgroup_account_extents(fs_info
, record
);
2582 * Old roots should be searched when inserting qgroup
2585 if (WARN_ON(!record
->old_roots
)) {
2586 /* Search commit root to find old_roots */
2587 ret
= btrfs_find_all_roots(NULL
, fs_info
,
2589 &record
->old_roots
, false);
2594 /* Free the reserved data space */
2595 btrfs_qgroup_free_refroot(fs_info
,
2596 record
->data_rsv_refroot
,
2598 BTRFS_QGROUP_RSV_DATA
);
2600 * Use SEQ_LAST as time_seq to do special search, which
2601 * doesn't lock tree or delayed_refs and search current
2602 * root. It's safe inside commit_transaction().
2604 ret
= btrfs_find_all_roots(trans
, fs_info
,
2605 record
->bytenr
, SEQ_LAST
, &new_roots
, false);
2608 if (qgroup_to_skip
) {
2609 ulist_del(new_roots
, qgroup_to_skip
, 0);
2610 ulist_del(record
->old_roots
, qgroup_to_skip
,
2613 ret
= btrfs_qgroup_account_extent(trans
, record
->bytenr
,
2617 record
->old_roots
= NULL
;
2621 ulist_free(record
->old_roots
);
2622 ulist_free(new_roots
);
2624 rb_erase(node
, &delayed_refs
->dirty_extent_root
);
2628 trace_qgroup_num_dirty_extents(fs_info
, trans
->transid
,
2634 * called from commit_transaction. Writes all changed qgroups to disk.
2636 int btrfs_run_qgroups(struct btrfs_trans_handle
*trans
)
2638 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2641 if (!fs_info
->quota_root
)
2644 spin_lock(&fs_info
->qgroup_lock
);
2645 while (!list_empty(&fs_info
->dirty_qgroups
)) {
2646 struct btrfs_qgroup
*qgroup
;
2647 qgroup
= list_first_entry(&fs_info
->dirty_qgroups
,
2648 struct btrfs_qgroup
, dirty
);
2649 list_del_init(&qgroup
->dirty
);
2650 spin_unlock(&fs_info
->qgroup_lock
);
2651 ret
= update_qgroup_info_item(trans
, qgroup
);
2653 fs_info
->qgroup_flags
|=
2654 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2655 ret
= update_qgroup_limit_item(trans
, qgroup
);
2657 fs_info
->qgroup_flags
|=
2658 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2659 spin_lock(&fs_info
->qgroup_lock
);
2661 if (test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2662 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_ON
;
2664 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
2665 spin_unlock(&fs_info
->qgroup_lock
);
2667 ret
= update_qgroup_status_item(trans
);
2669 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2675 * Copy the accounting information between qgroups. This is necessary
2676 * when a snapshot or a subvolume is created. Throwing an error will
2677 * cause a transaction abort so we take extra care here to only error
2678 * when a readonly fs is a reasonable outcome.
2680 int btrfs_qgroup_inherit(struct btrfs_trans_handle
*trans
, u64 srcid
,
2681 u64 objectid
, struct btrfs_qgroup_inherit
*inherit
)
2686 bool committing
= false;
2687 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2688 struct btrfs_root
*quota_root
;
2689 struct btrfs_qgroup
*srcgroup
;
2690 struct btrfs_qgroup
*dstgroup
;
2691 bool need_rescan
= false;
2696 * There are only two callers of this function.
2698 * One in create_subvol() in the ioctl context, which needs to hold
2699 * the qgroup_ioctl_lock.
2701 * The other one in create_pending_snapshot() where no other qgroup
2702 * code can modify the fs as they all need to either start a new trans
2703 * or hold a trans handler, thus we don't need to hold
2704 * qgroup_ioctl_lock.
2705 * This would avoid long and complex lock chain and make lockdep happy.
2707 spin_lock(&fs_info
->trans_lock
);
2708 if (trans
->transaction
->state
== TRANS_STATE_COMMIT_DOING
)
2710 spin_unlock(&fs_info
->trans_lock
);
2713 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
2714 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2717 quota_root
= fs_info
->quota_root
;
2724 i_qgroups
= (u64
*)(inherit
+ 1);
2725 nums
= inherit
->num_qgroups
+ 2 * inherit
->num_ref_copies
+
2726 2 * inherit
->num_excl_copies
;
2727 for (i
= 0; i
< nums
; ++i
) {
2728 srcgroup
= find_qgroup_rb(fs_info
, *i_qgroups
);
2731 * Zero out invalid groups so we can ignore
2735 ((srcgroup
->qgroupid
>> 48) <= (objectid
>> 48)))
2743 * create a tracking group for the subvol itself
2745 ret
= add_qgroup_item(trans
, quota_root
, objectid
);
2750 * add qgroup to all inherited groups
2753 i_qgroups
= (u64
*)(inherit
+ 1);
2754 for (i
= 0; i
< inherit
->num_qgroups
; ++i
, ++i_qgroups
) {
2755 if (*i_qgroups
== 0)
2757 ret
= add_qgroup_relation_item(trans
, objectid
,
2759 if (ret
&& ret
!= -EEXIST
)
2761 ret
= add_qgroup_relation_item(trans
, *i_qgroups
,
2763 if (ret
&& ret
!= -EEXIST
)
2770 spin_lock(&fs_info
->qgroup_lock
);
2772 dstgroup
= add_qgroup_rb(fs_info
, objectid
);
2773 if (IS_ERR(dstgroup
)) {
2774 ret
= PTR_ERR(dstgroup
);
2778 if (inherit
&& inherit
->flags
& BTRFS_QGROUP_INHERIT_SET_LIMITS
) {
2779 dstgroup
->lim_flags
= inherit
->lim
.flags
;
2780 dstgroup
->max_rfer
= inherit
->lim
.max_rfer
;
2781 dstgroup
->max_excl
= inherit
->lim
.max_excl
;
2782 dstgroup
->rsv_rfer
= inherit
->lim
.rsv_rfer
;
2783 dstgroup
->rsv_excl
= inherit
->lim
.rsv_excl
;
2785 ret
= update_qgroup_limit_item(trans
, dstgroup
);
2787 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2789 "unable to update quota limit for %llu",
2790 dstgroup
->qgroupid
);
2796 srcgroup
= find_qgroup_rb(fs_info
, srcid
);
2801 * We call inherit after we clone the root in order to make sure
2802 * our counts don't go crazy, so at this point the only
2803 * difference between the two roots should be the root node.
2805 level_size
= fs_info
->nodesize
;
2806 dstgroup
->rfer
= srcgroup
->rfer
;
2807 dstgroup
->rfer_cmpr
= srcgroup
->rfer_cmpr
;
2808 dstgroup
->excl
= level_size
;
2809 dstgroup
->excl_cmpr
= level_size
;
2810 srcgroup
->excl
= level_size
;
2811 srcgroup
->excl_cmpr
= level_size
;
2813 /* inherit the limit info */
2814 dstgroup
->lim_flags
= srcgroup
->lim_flags
;
2815 dstgroup
->max_rfer
= srcgroup
->max_rfer
;
2816 dstgroup
->max_excl
= srcgroup
->max_excl
;
2817 dstgroup
->rsv_rfer
= srcgroup
->rsv_rfer
;
2818 dstgroup
->rsv_excl
= srcgroup
->rsv_excl
;
2820 qgroup_dirty(fs_info
, dstgroup
);
2821 qgroup_dirty(fs_info
, srcgroup
);
2827 i_qgroups
= (u64
*)(inherit
+ 1);
2828 for (i
= 0; i
< inherit
->num_qgroups
; ++i
) {
2830 ret
= add_relation_rb(fs_info
, objectid
, *i_qgroups
);
2837 * If we're doing a snapshot, and adding the snapshot to a new
2838 * qgroup, the numbers are guaranteed to be incorrect.
2844 for (i
= 0; i
< inherit
->num_ref_copies
; ++i
, i_qgroups
+= 2) {
2845 struct btrfs_qgroup
*src
;
2846 struct btrfs_qgroup
*dst
;
2848 if (!i_qgroups
[0] || !i_qgroups
[1])
2851 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2852 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2859 dst
->rfer
= src
->rfer
- level_size
;
2860 dst
->rfer_cmpr
= src
->rfer_cmpr
- level_size
;
2862 /* Manually tweaking numbers certainly needs a rescan */
2865 for (i
= 0; i
< inherit
->num_excl_copies
; ++i
, i_qgroups
+= 2) {
2866 struct btrfs_qgroup
*src
;
2867 struct btrfs_qgroup
*dst
;
2869 if (!i_qgroups
[0] || !i_qgroups
[1])
2872 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2873 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2880 dst
->excl
= src
->excl
+ level_size
;
2881 dst
->excl_cmpr
= src
->excl_cmpr
+ level_size
;
2886 spin_unlock(&fs_info
->qgroup_lock
);
2888 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, dstgroup
);
2891 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
2893 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2897 static bool qgroup_check_limits(const struct btrfs_qgroup
*qg
, u64 num_bytes
)
2899 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) &&
2900 qgroup_rsv_total(qg
) + (s64
)qg
->rfer
+ num_bytes
> qg
->max_rfer
)
2903 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) &&
2904 qgroup_rsv_total(qg
) + (s64
)qg
->excl
+ num_bytes
> qg
->max_excl
)
2910 static int qgroup_reserve(struct btrfs_root
*root
, u64 num_bytes
, bool enforce
,
2911 enum btrfs_qgroup_rsv_type type
)
2913 struct btrfs_qgroup
*qgroup
;
2914 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2915 u64 ref_root
= root
->root_key
.objectid
;
2917 struct ulist_node
*unode
;
2918 struct ulist_iterator uiter
;
2920 if (!is_fstree(ref_root
))
2926 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE
, &fs_info
->flags
) &&
2927 capable(CAP_SYS_RESOURCE
))
2930 spin_lock(&fs_info
->qgroup_lock
);
2931 if (!fs_info
->quota_root
)
2934 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
2939 * in a first step, we check all affected qgroups if any limits would
2942 ulist_reinit(fs_info
->qgroup_ulist
);
2943 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
2944 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
2947 ULIST_ITER_INIT(&uiter
);
2948 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2949 struct btrfs_qgroup
*qg
;
2950 struct btrfs_qgroup_list
*glist
;
2952 qg
= unode_aux_to_qgroup(unode
);
2954 if (enforce
&& !qgroup_check_limits(qg
, num_bytes
)) {
2959 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2960 ret
= ulist_add(fs_info
->qgroup_ulist
,
2961 glist
->group
->qgroupid
,
2962 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
2969 * no limits exceeded, now record the reservation into all qgroups
2971 ULIST_ITER_INIT(&uiter
);
2972 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2973 struct btrfs_qgroup
*qg
;
2975 qg
= unode_aux_to_qgroup(unode
);
2977 qgroup_rsv_add(fs_info
, qg
, num_bytes
, type
);
2981 spin_unlock(&fs_info
->qgroup_lock
);
2986 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0
2989 * Will handle all higher level qgroup too.
2991 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
2992 * This special case is only used for META_PERTRANS type.
2994 void btrfs_qgroup_free_refroot(struct btrfs_fs_info
*fs_info
,
2995 u64 ref_root
, u64 num_bytes
,
2996 enum btrfs_qgroup_rsv_type type
)
2998 struct btrfs_qgroup
*qgroup
;
2999 struct ulist_node
*unode
;
3000 struct ulist_iterator uiter
;
3003 if (!is_fstree(ref_root
))
3009 if (num_bytes
== (u64
)-1 && type
!= BTRFS_QGROUP_RSV_META_PERTRANS
) {
3010 WARN(1, "%s: Invalid type to free", __func__
);
3013 spin_lock(&fs_info
->qgroup_lock
);
3015 if (!fs_info
->quota_root
)
3018 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
3022 if (num_bytes
== (u64
)-1)
3024 * We're freeing all pertrans rsv, get reserved value from
3025 * level 0 qgroup as real num_bytes to free.
3027 num_bytes
= qgroup
->rsv
.values
[type
];
3029 ulist_reinit(fs_info
->qgroup_ulist
);
3030 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
3031 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
3034 ULIST_ITER_INIT(&uiter
);
3035 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
3036 struct btrfs_qgroup
*qg
;
3037 struct btrfs_qgroup_list
*glist
;
3039 qg
= unode_aux_to_qgroup(unode
);
3041 qgroup_rsv_release(fs_info
, qg
, num_bytes
, type
);
3043 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
3044 ret
= ulist_add(fs_info
->qgroup_ulist
,
3045 glist
->group
->qgroupid
,
3046 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
3053 spin_unlock(&fs_info
->qgroup_lock
);
3057 * Check if the leaf is the last leaf. Which means all node pointers
3058 * are at their last position.
3060 static bool is_last_leaf(struct btrfs_path
*path
)
3064 for (i
= 1; i
< BTRFS_MAX_LEVEL
&& path
->nodes
[i
]; i
++) {
3065 if (path
->slots
[i
] != btrfs_header_nritems(path
->nodes
[i
]) - 1)
3072 * returns < 0 on error, 0 when more leafs are to be scanned.
3073 * returns 1 when done.
3075 static int qgroup_rescan_leaf(struct btrfs_trans_handle
*trans
,
3076 struct btrfs_path
*path
)
3078 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3079 struct btrfs_key found
;
3080 struct extent_buffer
*scratch_leaf
= NULL
;
3081 struct ulist
*roots
= NULL
;
3087 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3088 ret
= btrfs_search_slot_for_read(fs_info
->extent_root
,
3089 &fs_info
->qgroup_rescan_progress
,
3092 btrfs_debug(fs_info
,
3093 "current progress key (%llu %u %llu), search_slot ret %d",
3094 fs_info
->qgroup_rescan_progress
.objectid
,
3095 fs_info
->qgroup_rescan_progress
.type
,
3096 fs_info
->qgroup_rescan_progress
.offset
, ret
);
3100 * The rescan is about to end, we will not be scanning any
3101 * further blocks. We cannot unset the RESCAN flag here, because
3102 * we want to commit the transaction if everything went well.
3103 * To make the live accounting work in this phase, we set our
3104 * scan progress pointer such that every real extent objectid
3107 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
3108 btrfs_release_path(path
);
3109 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3112 done
= is_last_leaf(path
);
3114 btrfs_item_key_to_cpu(path
->nodes
[0], &found
,
3115 btrfs_header_nritems(path
->nodes
[0]) - 1);
3116 fs_info
->qgroup_rescan_progress
.objectid
= found
.objectid
+ 1;
3118 scratch_leaf
= btrfs_clone_extent_buffer(path
->nodes
[0]);
3119 if (!scratch_leaf
) {
3121 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3124 slot
= path
->slots
[0];
3125 btrfs_release_path(path
);
3126 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3128 for (; slot
< btrfs_header_nritems(scratch_leaf
); ++slot
) {
3129 btrfs_item_key_to_cpu(scratch_leaf
, &found
, slot
);
3130 if (found
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3131 found
.type
!= BTRFS_METADATA_ITEM_KEY
)
3133 if (found
.type
== BTRFS_METADATA_ITEM_KEY
)
3134 num_bytes
= fs_info
->nodesize
;
3136 num_bytes
= found
.offset
;
3138 ret
= btrfs_find_all_roots(NULL
, fs_info
, found
.objectid
, 0,
3142 /* For rescan, just pass old_roots as NULL */
3143 ret
= btrfs_qgroup_account_extent(trans
, found
.objectid
,
3144 num_bytes
, NULL
, roots
);
3150 free_extent_buffer(scratch_leaf
);
3154 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
3159 static void btrfs_qgroup_rescan_worker(struct btrfs_work
*work
)
3161 struct btrfs_fs_info
*fs_info
= container_of(work
, struct btrfs_fs_info
,
3162 qgroup_rescan_work
);
3163 struct btrfs_path
*path
;
3164 struct btrfs_trans_handle
*trans
= NULL
;
3168 path
= btrfs_alloc_path();
3172 * Rescan should only search for commit root, and any later difference
3173 * should be recorded by qgroup
3175 path
->search_commit_root
= 1;
3176 path
->skip_locking
= 1;
3179 while (!err
&& !btrfs_fs_closing(fs_info
)) {
3180 trans
= btrfs_start_transaction(fs_info
->fs_root
, 0);
3181 if (IS_ERR(trans
)) {
3182 err
= PTR_ERR(trans
);
3185 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)) {
3188 err
= qgroup_rescan_leaf(trans
, path
);
3191 btrfs_commit_transaction(trans
);
3193 btrfs_end_transaction(trans
);
3197 btrfs_free_path(path
);
3199 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3201 fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
) {
3202 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
3203 } else if (err
< 0) {
3204 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
3206 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3209 * only update status, since the previous part has already updated the
3212 trans
= btrfs_start_transaction(fs_info
->quota_root
, 1);
3213 if (IS_ERR(trans
)) {
3214 err
= PTR_ERR(trans
);
3217 "fail to start transaction for status update: %d",
3221 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3222 if (!btrfs_fs_closing(fs_info
))
3223 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3225 ret
= update_qgroup_status_item(trans
);
3228 btrfs_err(fs_info
, "fail to update qgroup status: %d",
3232 fs_info
->qgroup_rescan_running
= false;
3233 complete_all(&fs_info
->qgroup_rescan_completion
);
3234 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3239 btrfs_end_transaction(trans
);
3241 if (btrfs_fs_closing(fs_info
)) {
3242 btrfs_info(fs_info
, "qgroup scan paused");
3243 } else if (err
>= 0) {
3244 btrfs_info(fs_info
, "qgroup scan completed%s",
3245 err
> 0 ? " (inconsistency flag cleared)" : "");
3247 btrfs_err(fs_info
, "qgroup scan failed with %d", err
);
3252 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3253 * memory required for the rescan context.
3256 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
3262 /* we're resuming qgroup rescan at mount time */
3263 if (!(fs_info
->qgroup_flags
&
3264 BTRFS_QGROUP_STATUS_FLAG_RESCAN
)) {
3266 "qgroup rescan init failed, qgroup rescan is not queued");
3268 } else if (!(fs_info
->qgroup_flags
&
3269 BTRFS_QGROUP_STATUS_FLAG_ON
)) {
3271 "qgroup rescan init failed, qgroup is not enabled");
3279 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3282 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
3284 "qgroup rescan is already in progress");
3286 } else if (!(fs_info
->qgroup_flags
&
3287 BTRFS_QGROUP_STATUS_FLAG_ON
)) {
3289 "qgroup rescan init failed, qgroup is not enabled");
3294 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3297 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3300 memset(&fs_info
->qgroup_rescan_progress
, 0,
3301 sizeof(fs_info
->qgroup_rescan_progress
));
3302 fs_info
->qgroup_rescan_progress
.objectid
= progress_objectid
;
3303 init_completion(&fs_info
->qgroup_rescan_completion
);
3304 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3306 btrfs_init_work(&fs_info
->qgroup_rescan_work
,
3307 btrfs_qgroup_rescan_worker
, NULL
, NULL
);
3312 qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
)
3315 struct btrfs_qgroup
*qgroup
;
3317 spin_lock(&fs_info
->qgroup_lock
);
3318 /* clear all current qgroup tracking information */
3319 for (n
= rb_first(&fs_info
->qgroup_tree
); n
; n
= rb_next(n
)) {
3320 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
3322 qgroup
->rfer_cmpr
= 0;
3324 qgroup
->excl_cmpr
= 0;
3325 qgroup_dirty(fs_info
, qgroup
);
3327 spin_unlock(&fs_info
->qgroup_lock
);
3331 btrfs_qgroup_rescan(struct btrfs_fs_info
*fs_info
)
3334 struct btrfs_trans_handle
*trans
;
3336 ret
= qgroup_rescan_init(fs_info
, 0, 1);
3341 * We have set the rescan_progress to 0, which means no more
3342 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3343 * However, btrfs_qgroup_account_ref may be right after its call
3344 * to btrfs_find_all_roots, in which case it would still do the
3346 * To solve this, we're committing the transaction, which will
3347 * ensure we run all delayed refs and only after that, we are
3348 * going to clear all tracking information for a clean start.
3351 trans
= btrfs_join_transaction(fs_info
->fs_root
);
3352 if (IS_ERR(trans
)) {
3353 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3354 return PTR_ERR(trans
);
3356 ret
= btrfs_commit_transaction(trans
);
3358 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3362 qgroup_rescan_zero_tracking(fs_info
);
3364 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3365 fs_info
->qgroup_rescan_running
= true;
3366 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
3367 &fs_info
->qgroup_rescan_work
);
3368 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3373 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info
*fs_info
,
3379 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3380 running
= fs_info
->qgroup_rescan_running
;
3381 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3387 ret
= wait_for_completion_interruptible(
3388 &fs_info
->qgroup_rescan_completion
);
3390 wait_for_completion(&fs_info
->qgroup_rescan_completion
);
3396 * this is only called from open_ctree where we're still single threaded, thus
3397 * locking is omitted here.
3400 btrfs_qgroup_rescan_resume(struct btrfs_fs_info
*fs_info
)
3402 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
3403 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3404 fs_info
->qgroup_rescan_running
= true;
3405 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
3406 &fs_info
->qgroup_rescan_work
);
3407 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3411 #define rbtree_iterate_from_safe(node, next, start) \
3412 for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
3414 static int qgroup_unreserve_range(struct btrfs_inode
*inode
,
3415 struct extent_changeset
*reserved
, u64 start
,
3418 struct rb_node
*node
;
3419 struct rb_node
*next
;
3420 struct ulist_node
*entry
= NULL
;
3423 node
= reserved
->range_changed
.root
.rb_node
;
3425 entry
= rb_entry(node
, struct ulist_node
, rb_node
);
3426 if (entry
->val
< start
)
3427 node
= node
->rb_right
;
3429 node
= node
->rb_left
;
3434 /* Empty changeset */
3438 if (entry
->val
> start
&& rb_prev(&entry
->rb_node
))
3439 entry
= rb_entry(rb_prev(&entry
->rb_node
), struct ulist_node
,
3442 rbtree_iterate_from_safe(node
, next
, &entry
->rb_node
) {
3448 entry
= rb_entry(node
, struct ulist_node
, rb_node
);
3449 entry_start
= entry
->val
;
3450 entry_end
= entry
->aux
;
3451 entry_len
= entry_end
- entry_start
+ 1;
3453 if (entry_start
>= start
+ len
)
3455 if (entry_start
+ entry_len
<= start
)
3458 * Now the entry is in [start, start + len), revert the
3459 * EXTENT_QGROUP_RESERVED bit.
3461 clear_ret
= clear_extent_bits(&inode
->io_tree
, entry_start
,
3462 entry_end
, EXTENT_QGROUP_RESERVED
);
3463 if (!ret
&& clear_ret
< 0)
3466 ulist_del(&reserved
->range_changed
, entry
->val
, entry
->aux
);
3467 if (likely(reserved
->bytes_changed
>= entry_len
)) {
3468 reserved
->bytes_changed
-= entry_len
;
3471 reserved
->bytes_changed
= 0;
3479 * Try to free some space for qgroup.
3481 * For qgroup, there are only 3 ways to free qgroup space:
3482 * - Flush nodatacow write
3483 * Any nodatacow write will free its reserved data space at run_delalloc_range().
3484 * In theory, we should only flush nodatacow inodes, but it's not yet
3485 * possible, so we need to flush the whole root.
3487 * - Wait for ordered extents
3488 * When ordered extents are finished, their reserved metadata is finally
3489 * converted to per_trans status, which can be freed by later commit
3492 * - Commit transaction
3493 * This would free the meta_per_trans space.
3494 * In theory this shouldn't provide much space, but any more qgroup space
3497 static int try_flush_qgroup(struct btrfs_root
*root
)
3499 struct btrfs_trans_handle
*trans
;
3503 * We don't want to run flush again and again, so if there is a running
3504 * one, we won't try to start a new flush, but exit directly.
3506 if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING
, &root
->state
)) {
3507 wait_event(root
->qgroup_flush_wait
,
3508 !test_bit(BTRFS_ROOT_QGROUP_FLUSHING
, &root
->state
));
3512 ret
= btrfs_start_delalloc_snapshot(root
);
3515 btrfs_wait_ordered_extents(root
, U64_MAX
, 0, (u64
)-1);
3517 trans
= btrfs_join_transaction(root
);
3518 if (IS_ERR(trans
)) {
3519 ret
= PTR_ERR(trans
);
3523 ret
= btrfs_commit_transaction(trans
);
3525 clear_bit(BTRFS_ROOT_QGROUP_FLUSHING
, &root
->state
);
3526 wake_up(&root
->qgroup_flush_wait
);
3530 static int qgroup_reserve_data(struct btrfs_inode
*inode
,
3531 struct extent_changeset
**reserved_ret
, u64 start
,
3534 struct btrfs_root
*root
= inode
->root
;
3535 struct extent_changeset
*reserved
;
3536 bool new_reserved
= false;
3541 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &root
->fs_info
->flags
) ||
3542 !is_fstree(root
->root_key
.objectid
) || len
== 0)
3545 /* @reserved parameter is mandatory for qgroup */
3546 if (WARN_ON(!reserved_ret
))
3548 if (!*reserved_ret
) {
3549 new_reserved
= true;
3550 *reserved_ret
= extent_changeset_alloc();
3554 reserved
= *reserved_ret
;
3555 /* Record already reserved space */
3556 orig_reserved
= reserved
->bytes_changed
;
3557 ret
= set_record_extent_bits(&inode
->io_tree
, start
,
3558 start
+ len
-1, EXTENT_QGROUP_RESERVED
, reserved
);
3560 /* Newly reserved space */
3561 to_reserve
= reserved
->bytes_changed
- orig_reserved
;
3562 trace_btrfs_qgroup_reserve_data(&inode
->vfs_inode
, start
, len
,
3563 to_reserve
, QGROUP_RESERVE
);
3566 ret
= qgroup_reserve(root
, to_reserve
, true, BTRFS_QGROUP_RSV_DATA
);
3573 qgroup_unreserve_range(inode
, reserved
, start
, len
);
3576 extent_changeset_release(reserved
);
3578 *reserved_ret
= NULL
;
3584 * Reserve qgroup space for range [start, start + len).
3586 * This function will either reserve space from related qgroups or do nothing
3587 * if the range is already reserved.
3589 * Return 0 for successful reservation
3590 * Return <0 for error (including -EQUOT)
3592 * NOTE: This function may sleep for memory allocation, dirty page flushing and
3593 * commit transaction. So caller should not hold any dirty page locked.
3595 int btrfs_qgroup_reserve_data(struct btrfs_inode
*inode
,
3596 struct extent_changeset
**reserved_ret
, u64 start
,
3601 ret
= qgroup_reserve_data(inode
, reserved_ret
, start
, len
);
3602 if (ret
<= 0 && ret
!= -EDQUOT
)
3605 ret
= try_flush_qgroup(inode
->root
);
3608 return qgroup_reserve_data(inode
, reserved_ret
, start
, len
);
3611 /* Free ranges specified by @reserved, normally in error path */
3612 static int qgroup_free_reserved_data(struct btrfs_inode
*inode
,
3613 struct extent_changeset
*reserved
, u64 start
, u64 len
)
3615 struct btrfs_root
*root
= inode
->root
;
3616 struct ulist_node
*unode
;
3617 struct ulist_iterator uiter
;
3618 struct extent_changeset changeset
;
3622 extent_changeset_init(&changeset
);
3623 len
= round_up(start
+ len
, root
->fs_info
->sectorsize
);
3624 start
= round_down(start
, root
->fs_info
->sectorsize
);
3626 ULIST_ITER_INIT(&uiter
);
3627 while ((unode
= ulist_next(&reserved
->range_changed
, &uiter
))) {
3628 u64 range_start
= unode
->val
;
3629 /* unode->aux is the inclusive end */
3630 u64 range_len
= unode
->aux
- range_start
+ 1;
3634 extent_changeset_release(&changeset
);
3636 /* Only free range in range [start, start + len) */
3637 if (range_start
>= start
+ len
||
3638 range_start
+ range_len
<= start
)
3640 free_start
= max(range_start
, start
);
3641 free_len
= min(start
+ len
, range_start
+ range_len
) -
3644 * TODO: To also modify reserved->ranges_reserved to reflect
3647 * However as long as we free qgroup reserved according to
3648 * EXTENT_QGROUP_RESERVED, we won't double free.
3649 * So not need to rush.
3651 ret
= clear_record_extent_bits(&inode
->io_tree
, free_start
,
3652 free_start
+ free_len
- 1,
3653 EXTENT_QGROUP_RESERVED
, &changeset
);
3656 freed
+= changeset
.bytes_changed
;
3658 btrfs_qgroup_free_refroot(root
->fs_info
, root
->root_key
.objectid
, freed
,
3659 BTRFS_QGROUP_RSV_DATA
);
3662 extent_changeset_release(&changeset
);
3666 static int __btrfs_qgroup_release_data(struct btrfs_inode
*inode
,
3667 struct extent_changeset
*reserved
, u64 start
, u64 len
,
3670 struct extent_changeset changeset
;
3671 int trace_op
= QGROUP_RELEASE
;
3674 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &inode
->root
->fs_info
->flags
))
3677 /* In release case, we shouldn't have @reserved */
3678 WARN_ON(!free
&& reserved
);
3679 if (free
&& reserved
)
3680 return qgroup_free_reserved_data(inode
, reserved
, start
, len
);
3681 extent_changeset_init(&changeset
);
3682 ret
= clear_record_extent_bits(&inode
->io_tree
, start
, start
+ len
-1,
3683 EXTENT_QGROUP_RESERVED
, &changeset
);
3688 trace_op
= QGROUP_FREE
;
3689 trace_btrfs_qgroup_release_data(&inode
->vfs_inode
, start
, len
,
3690 changeset
.bytes_changed
, trace_op
);
3692 btrfs_qgroup_free_refroot(inode
->root
->fs_info
,
3693 inode
->root
->root_key
.objectid
,
3694 changeset
.bytes_changed
, BTRFS_QGROUP_RSV_DATA
);
3695 ret
= changeset
.bytes_changed
;
3697 extent_changeset_release(&changeset
);
3702 * Free a reserved space range from io_tree and related qgroups
3704 * Should be called when a range of pages get invalidated before reaching disk.
3705 * Or for error cleanup case.
3706 * if @reserved is given, only reserved range in [@start, @start + @len) will
3709 * For data written to disk, use btrfs_qgroup_release_data().
3711 * NOTE: This function may sleep for memory allocation.
3713 int btrfs_qgroup_free_data(struct btrfs_inode
*inode
,
3714 struct extent_changeset
*reserved
, u64 start
, u64 len
)
3716 return __btrfs_qgroup_release_data(inode
, reserved
, start
, len
, 1);
3720 * Release a reserved space range from io_tree only.
3722 * Should be called when a range of pages get written to disk and corresponding
3723 * FILE_EXTENT is inserted into corresponding root.
3725 * Since new qgroup accounting framework will only update qgroup numbers at
3726 * commit_transaction() time, its reserved space shouldn't be freed from
3729 * But we should release the range from io_tree, to allow further write to be
3732 * NOTE: This function may sleep for memory allocation.
3734 int btrfs_qgroup_release_data(struct btrfs_inode
*inode
, u64 start
, u64 len
)
3736 return __btrfs_qgroup_release_data(inode
, NULL
, start
, len
, 0);
3739 static void add_root_meta_rsv(struct btrfs_root
*root
, int num_bytes
,
3740 enum btrfs_qgroup_rsv_type type
)
3742 if (type
!= BTRFS_QGROUP_RSV_META_PREALLOC
&&
3743 type
!= BTRFS_QGROUP_RSV_META_PERTRANS
)
3748 spin_lock(&root
->qgroup_meta_rsv_lock
);
3749 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
)
3750 root
->qgroup_meta_rsv_prealloc
+= num_bytes
;
3752 root
->qgroup_meta_rsv_pertrans
+= num_bytes
;
3753 spin_unlock(&root
->qgroup_meta_rsv_lock
);
3756 static int sub_root_meta_rsv(struct btrfs_root
*root
, int num_bytes
,
3757 enum btrfs_qgroup_rsv_type type
)
3759 if (type
!= BTRFS_QGROUP_RSV_META_PREALLOC
&&
3760 type
!= BTRFS_QGROUP_RSV_META_PERTRANS
)
3765 spin_lock(&root
->qgroup_meta_rsv_lock
);
3766 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
) {
3767 num_bytes
= min_t(u64
, root
->qgroup_meta_rsv_prealloc
,
3769 root
->qgroup_meta_rsv_prealloc
-= num_bytes
;
3771 num_bytes
= min_t(u64
, root
->qgroup_meta_rsv_pertrans
,
3773 root
->qgroup_meta_rsv_pertrans
-= num_bytes
;
3775 spin_unlock(&root
->qgroup_meta_rsv_lock
);
3779 static int qgroup_reserve_meta(struct btrfs_root
*root
, int num_bytes
,
3780 enum btrfs_qgroup_rsv_type type
, bool enforce
)
3782 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3785 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3786 !is_fstree(root
->root_key
.objectid
) || num_bytes
== 0)
3789 BUG_ON(num_bytes
!= round_down(num_bytes
, fs_info
->nodesize
));
3790 trace_qgroup_meta_reserve(root
, (s64
)num_bytes
, type
);
3791 ret
= qgroup_reserve(root
, num_bytes
, enforce
, type
);
3795 * Record what we have reserved into root.
3797 * To avoid quota disabled->enabled underflow.
3798 * In that case, we may try to free space we haven't reserved
3799 * (since quota was disabled), so record what we reserved into root.
3800 * And ensure later release won't underflow this number.
3802 add_root_meta_rsv(root
, num_bytes
, type
);
3806 int __btrfs_qgroup_reserve_meta(struct btrfs_root
*root
, int num_bytes
,
3807 enum btrfs_qgroup_rsv_type type
, bool enforce
)
3811 ret
= qgroup_reserve_meta(root
, num_bytes
, type
, enforce
);
3812 if (ret
<= 0 && ret
!= -EDQUOT
)
3815 ret
= try_flush_qgroup(root
);
3818 return qgroup_reserve_meta(root
, num_bytes
, type
, enforce
);
3821 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root
*root
)
3823 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3825 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3826 !is_fstree(root
->root_key
.objectid
))
3829 /* TODO: Update trace point to handle such free */
3830 trace_qgroup_meta_free_all_pertrans(root
);
3831 /* Special value -1 means to free all reserved space */
3832 btrfs_qgroup_free_refroot(fs_info
, root
->root_key
.objectid
, (u64
)-1,
3833 BTRFS_QGROUP_RSV_META_PERTRANS
);
3836 void __btrfs_qgroup_free_meta(struct btrfs_root
*root
, int num_bytes
,
3837 enum btrfs_qgroup_rsv_type type
)
3839 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3841 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3842 !is_fstree(root
->root_key
.objectid
))
3846 * reservation for META_PREALLOC can happen before quota is enabled,
3847 * which can lead to underflow.
3848 * Here ensure we will only free what we really have reserved.
3850 num_bytes
= sub_root_meta_rsv(root
, num_bytes
, type
);
3851 BUG_ON(num_bytes
!= round_down(num_bytes
, fs_info
->nodesize
));
3852 trace_qgroup_meta_reserve(root
, -(s64
)num_bytes
, type
);
3853 btrfs_qgroup_free_refroot(fs_info
, root
->root_key
.objectid
,
3857 static void qgroup_convert_meta(struct btrfs_fs_info
*fs_info
, u64 ref_root
,
3860 struct btrfs_qgroup
*qgroup
;
3861 struct ulist_node
*unode
;
3862 struct ulist_iterator uiter
;
3867 if (!fs_info
->quota_root
)
3870 spin_lock(&fs_info
->qgroup_lock
);
3871 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
3874 ulist_reinit(fs_info
->qgroup_ulist
);
3875 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
3876 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
3879 ULIST_ITER_INIT(&uiter
);
3880 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
3881 struct btrfs_qgroup
*qg
;
3882 struct btrfs_qgroup_list
*glist
;
3884 qg
= unode_aux_to_qgroup(unode
);
3886 qgroup_rsv_release(fs_info
, qg
, num_bytes
,
3887 BTRFS_QGROUP_RSV_META_PREALLOC
);
3888 qgroup_rsv_add(fs_info
, qg
, num_bytes
,
3889 BTRFS_QGROUP_RSV_META_PERTRANS
);
3890 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
3891 ret
= ulist_add(fs_info
->qgroup_ulist
,
3892 glist
->group
->qgroupid
,
3893 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
3899 spin_unlock(&fs_info
->qgroup_lock
);
3902 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root
*root
, int num_bytes
)
3904 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3906 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3907 !is_fstree(root
->root_key
.objectid
))
3909 /* Same as btrfs_qgroup_free_meta_prealloc() */
3910 num_bytes
= sub_root_meta_rsv(root
, num_bytes
,
3911 BTRFS_QGROUP_RSV_META_PREALLOC
);
3912 trace_qgroup_meta_convert(root
, num_bytes
);
3913 qgroup_convert_meta(fs_info
, root
->root_key
.objectid
, num_bytes
);
3917 * Check qgroup reserved space leaking, normally at destroy inode
3920 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode
*inode
)
3922 struct extent_changeset changeset
;
3923 struct ulist_node
*unode
;
3924 struct ulist_iterator iter
;
3927 extent_changeset_init(&changeset
);
3928 ret
= clear_record_extent_bits(&inode
->io_tree
, 0, (u64
)-1,
3929 EXTENT_QGROUP_RESERVED
, &changeset
);
3932 if (WARN_ON(changeset
.bytes_changed
)) {
3933 ULIST_ITER_INIT(&iter
);
3934 while ((unode
= ulist_next(&changeset
.range_changed
, &iter
))) {
3935 btrfs_warn(inode
->root
->fs_info
,
3936 "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
3937 btrfs_ino(inode
), unode
->val
, unode
->aux
);
3939 btrfs_qgroup_free_refroot(inode
->root
->fs_info
,
3940 inode
->root
->root_key
.objectid
,
3941 changeset
.bytes_changed
, BTRFS_QGROUP_RSV_DATA
);
3944 extent_changeset_release(&changeset
);
3947 void btrfs_qgroup_init_swapped_blocks(
3948 struct btrfs_qgroup_swapped_blocks
*swapped_blocks
)
3952 spin_lock_init(&swapped_blocks
->lock
);
3953 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++)
3954 swapped_blocks
->blocks
[i
] = RB_ROOT
;
3955 swapped_blocks
->swapped
= false;
3959 * Delete all swapped blocks record of @root.
3960 * Every record here means we skipped a full subtree scan for qgroup.
3962 * Gets called when committing one transaction.
3964 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root
*root
)
3966 struct btrfs_qgroup_swapped_blocks
*swapped_blocks
;
3969 swapped_blocks
= &root
->swapped_blocks
;
3971 spin_lock(&swapped_blocks
->lock
);
3972 if (!swapped_blocks
->swapped
)
3974 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
3975 struct rb_root
*cur_root
= &swapped_blocks
->blocks
[i
];
3976 struct btrfs_qgroup_swapped_block
*entry
;
3977 struct btrfs_qgroup_swapped_block
*next
;
3979 rbtree_postorder_for_each_entry_safe(entry
, next
, cur_root
,
3982 swapped_blocks
->blocks
[i
] = RB_ROOT
;
3984 swapped_blocks
->swapped
= false;
3986 spin_unlock(&swapped_blocks
->lock
);
3990 * Add subtree roots record into @subvol_root.
3992 * @subvol_root: tree root of the subvolume tree get swapped
3993 * @bg: block group under balance
3994 * @subvol_parent/slot: pointer to the subtree root in subvolume tree
3995 * @reloc_parent/slot: pointer to the subtree root in reloc tree
3996 * BOTH POINTERS ARE BEFORE TREE SWAP
3997 * @last_snapshot: last snapshot generation of the subvolume tree
3999 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle
*trans
,
4000 struct btrfs_root
*subvol_root
,
4001 struct btrfs_block_group
*bg
,
4002 struct extent_buffer
*subvol_parent
, int subvol_slot
,
4003 struct extent_buffer
*reloc_parent
, int reloc_slot
,
4006 struct btrfs_fs_info
*fs_info
= subvol_root
->fs_info
;
4007 struct btrfs_qgroup_swapped_blocks
*blocks
= &subvol_root
->swapped_blocks
;
4008 struct btrfs_qgroup_swapped_block
*block
;
4009 struct rb_node
**cur
;
4010 struct rb_node
*parent
= NULL
;
4011 int level
= btrfs_header_level(subvol_parent
) - 1;
4014 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
4017 if (btrfs_node_ptr_generation(subvol_parent
, subvol_slot
) >
4018 btrfs_node_ptr_generation(reloc_parent
, reloc_slot
)) {
4019 btrfs_err_rl(fs_info
,
4020 "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4022 btrfs_node_ptr_generation(subvol_parent
, subvol_slot
),
4023 btrfs_node_ptr_generation(reloc_parent
, reloc_slot
));
4027 block
= kmalloc(sizeof(*block
), GFP_NOFS
);
4034 * @reloc_parent/slot is still before swap, while @block is going to
4035 * record the bytenr after swap, so we do the swap here.
4037 block
->subvol_bytenr
= btrfs_node_blockptr(reloc_parent
, reloc_slot
);
4038 block
->subvol_generation
= btrfs_node_ptr_generation(reloc_parent
,
4040 block
->reloc_bytenr
= btrfs_node_blockptr(subvol_parent
, subvol_slot
);
4041 block
->reloc_generation
= btrfs_node_ptr_generation(subvol_parent
,
4043 block
->last_snapshot
= last_snapshot
;
4044 block
->level
= level
;
4047 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4048 * no one else can modify tree blocks thus we qgroup will not change
4049 * no matter the value of trace_leaf.
4051 if (bg
&& bg
->flags
& BTRFS_BLOCK_GROUP_DATA
)
4052 block
->trace_leaf
= true;
4054 block
->trace_leaf
= false;
4055 btrfs_node_key_to_cpu(reloc_parent
, &block
->first_key
, reloc_slot
);
4057 /* Insert @block into @blocks */
4058 spin_lock(&blocks
->lock
);
4059 cur
= &blocks
->blocks
[level
].rb_node
;
4061 struct btrfs_qgroup_swapped_block
*entry
;
4064 entry
= rb_entry(parent
, struct btrfs_qgroup_swapped_block
,
4067 if (entry
->subvol_bytenr
< block
->subvol_bytenr
) {
4068 cur
= &(*cur
)->rb_left
;
4069 } else if (entry
->subvol_bytenr
> block
->subvol_bytenr
) {
4070 cur
= &(*cur
)->rb_right
;
4072 if (entry
->subvol_generation
!=
4073 block
->subvol_generation
||
4074 entry
->reloc_bytenr
!= block
->reloc_bytenr
||
4075 entry
->reloc_generation
!=
4076 block
->reloc_generation
) {
4078 * Duplicated but mismatch entry found.
4081 * Marking qgroup inconsistent should be enough
4084 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG
));
4091 rb_link_node(&block
->node
, parent
, cur
);
4092 rb_insert_color(&block
->node
, &blocks
->blocks
[level
]);
4093 blocks
->swapped
= true;
4095 spin_unlock(&blocks
->lock
);
4098 fs_info
->qgroup_flags
|=
4099 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
4104 * Check if the tree block is a subtree root, and if so do the needed
4105 * delayed subtree trace for qgroup.
4107 * This is called during btrfs_cow_block().
4109 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle
*trans
,
4110 struct btrfs_root
*root
,
4111 struct extent_buffer
*subvol_eb
)
4113 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4114 struct btrfs_qgroup_swapped_blocks
*blocks
= &root
->swapped_blocks
;
4115 struct btrfs_qgroup_swapped_block
*block
;
4116 struct extent_buffer
*reloc_eb
= NULL
;
4117 struct rb_node
*node
;
4119 bool swapped
= false;
4120 int level
= btrfs_header_level(subvol_eb
);
4124 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
4126 if (!is_fstree(root
->root_key
.objectid
) || !root
->reloc_root
)
4129 spin_lock(&blocks
->lock
);
4130 if (!blocks
->swapped
) {
4131 spin_unlock(&blocks
->lock
);
4134 node
= blocks
->blocks
[level
].rb_node
;
4137 block
= rb_entry(node
, struct btrfs_qgroup_swapped_block
, node
);
4138 if (block
->subvol_bytenr
< subvol_eb
->start
) {
4139 node
= node
->rb_left
;
4140 } else if (block
->subvol_bytenr
> subvol_eb
->start
) {
4141 node
= node
->rb_right
;
4148 spin_unlock(&blocks
->lock
);
4151 /* Found one, remove it from @blocks first and update blocks->swapped */
4152 rb_erase(&block
->node
, &blocks
->blocks
[level
]);
4153 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
4154 if (RB_EMPTY_ROOT(&blocks
->blocks
[i
])) {
4159 blocks
->swapped
= swapped
;
4160 spin_unlock(&blocks
->lock
);
4162 /* Read out reloc subtree root */
4163 reloc_eb
= read_tree_block(fs_info
, block
->reloc_bytenr
,
4164 block
->reloc_generation
, block
->level
,
4166 if (IS_ERR(reloc_eb
)) {
4167 ret
= PTR_ERR(reloc_eb
);
4171 if (!extent_buffer_uptodate(reloc_eb
)) {
4176 ret
= qgroup_trace_subtree_swap(trans
, reloc_eb
, subvol_eb
,
4177 block
->last_snapshot
, block
->trace_leaf
);
4180 free_extent_buffer(reloc_eb
);
4183 btrfs_err_rl(fs_info
,
4184 "failed to account subtree at bytenr %llu: %d",
4185 subvol_eb
->start
, ret
);
4186 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
4191 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction
*trans
)
4193 struct btrfs_qgroup_extent_record
*entry
;
4194 struct btrfs_qgroup_extent_record
*next
;
4195 struct rb_root
*root
;
4197 root
= &trans
->delayed_refs
.dirty_extent_root
;
4198 rbtree_postorder_for_each_entry_safe(entry
, next
, root
, node
) {
4199 ulist_free(entry
->old_roots
);