1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 STRATO. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sched/mm.h>
17 #include "transaction.h"
22 #include "extent_io.h"
24 #include "block-group.h"
26 #include "tree-mod-log.h"
29 * - subvol delete -> delete when ref goes to 0? delete limits also?
33 * - copy also limits on subvol creation
36 * - performance benchmarks
37 * - check all ioctl parameters
41 * Helpers to access qgroup reservation
43 * Callers should ensure the lock context and type are valid
46 static u64
qgroup_rsv_total(const struct btrfs_qgroup
*qgroup
)
51 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
52 ret
+= qgroup
->rsv
.values
[i
];
57 #ifdef CONFIG_BTRFS_DEBUG
58 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type
)
60 if (type
== BTRFS_QGROUP_RSV_DATA
)
62 if (type
== BTRFS_QGROUP_RSV_META_PERTRANS
)
63 return "meta_pertrans";
64 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
)
65 return "meta_prealloc";
70 static void qgroup_rsv_add(struct btrfs_fs_info
*fs_info
,
71 struct btrfs_qgroup
*qgroup
, u64 num_bytes
,
72 enum btrfs_qgroup_rsv_type type
)
74 trace_qgroup_update_reserve(fs_info
, qgroup
, num_bytes
, type
);
75 qgroup
->rsv
.values
[type
] += num_bytes
;
78 static void qgroup_rsv_release(struct btrfs_fs_info
*fs_info
,
79 struct btrfs_qgroup
*qgroup
, u64 num_bytes
,
80 enum btrfs_qgroup_rsv_type type
)
82 trace_qgroup_update_reserve(fs_info
, qgroup
, -(s64
)num_bytes
, type
);
83 if (qgroup
->rsv
.values
[type
] >= num_bytes
) {
84 qgroup
->rsv
.values
[type
] -= num_bytes
;
87 #ifdef CONFIG_BTRFS_DEBUG
89 "qgroup %llu %s reserved space underflow, have %llu to free %llu",
90 qgroup
->qgroupid
, qgroup_rsv_type_str(type
),
91 qgroup
->rsv
.values
[type
], num_bytes
);
93 qgroup
->rsv
.values
[type
] = 0;
96 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info
*fs_info
,
97 struct btrfs_qgroup
*dest
,
98 struct btrfs_qgroup
*src
)
102 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
103 qgroup_rsv_add(fs_info
, dest
, src
->rsv
.values
[i
], i
);
106 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info
*fs_info
,
107 struct btrfs_qgroup
*dest
,
108 struct btrfs_qgroup
*src
)
112 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++)
113 qgroup_rsv_release(fs_info
, dest
, src
->rsv
.values
[i
], i
);
116 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup
*qg
, u64 seq
,
119 if (qg
->old_refcnt
< seq
)
120 qg
->old_refcnt
= seq
;
121 qg
->old_refcnt
+= mod
;
124 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup
*qg
, u64 seq
,
127 if (qg
->new_refcnt
< seq
)
128 qg
->new_refcnt
= seq
;
129 qg
->new_refcnt
+= mod
;
132 static inline u64
btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup
*qg
, u64 seq
)
134 if (qg
->old_refcnt
< seq
)
136 return qg
->old_refcnt
- seq
;
139 static inline u64
btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup
*qg
, u64 seq
)
141 if (qg
->new_refcnt
< seq
)
143 return qg
->new_refcnt
- seq
;
147 * glue structure to represent the relations between qgroups.
149 struct btrfs_qgroup_list
{
150 struct list_head next_group
;
151 struct list_head next_member
;
152 struct btrfs_qgroup
*group
;
153 struct btrfs_qgroup
*member
;
156 static inline u64
qgroup_to_aux(struct btrfs_qgroup
*qg
)
158 return (u64
)(uintptr_t)qg
;
161 static inline struct btrfs_qgroup
* unode_aux_to_qgroup(struct ulist_node
*n
)
163 return (struct btrfs_qgroup
*)(uintptr_t)n
->aux
;
167 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
169 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
);
171 /* must be called with qgroup_ioctl_lock held */
172 static struct btrfs_qgroup
*find_qgroup_rb(struct btrfs_fs_info
*fs_info
,
175 struct rb_node
*n
= fs_info
->qgroup_tree
.rb_node
;
176 struct btrfs_qgroup
*qgroup
;
179 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
180 if (qgroup
->qgroupid
< qgroupid
)
182 else if (qgroup
->qgroupid
> qgroupid
)
190 /* must be called with qgroup_lock held */
191 static struct btrfs_qgroup
*add_qgroup_rb(struct btrfs_fs_info
*fs_info
,
194 struct rb_node
**p
= &fs_info
->qgroup_tree
.rb_node
;
195 struct rb_node
*parent
= NULL
;
196 struct btrfs_qgroup
*qgroup
;
200 qgroup
= rb_entry(parent
, struct btrfs_qgroup
, node
);
202 if (qgroup
->qgroupid
< qgroupid
)
204 else if (qgroup
->qgroupid
> qgroupid
)
210 qgroup
= kzalloc(sizeof(*qgroup
), GFP_ATOMIC
);
212 return ERR_PTR(-ENOMEM
);
214 qgroup
->qgroupid
= qgroupid
;
215 INIT_LIST_HEAD(&qgroup
->groups
);
216 INIT_LIST_HEAD(&qgroup
->members
);
217 INIT_LIST_HEAD(&qgroup
->dirty
);
219 rb_link_node(&qgroup
->node
, parent
, p
);
220 rb_insert_color(&qgroup
->node
, &fs_info
->qgroup_tree
);
225 static void __del_qgroup_rb(struct btrfs_fs_info
*fs_info
,
226 struct btrfs_qgroup
*qgroup
)
228 struct btrfs_qgroup_list
*list
;
230 list_del(&qgroup
->dirty
);
231 while (!list_empty(&qgroup
->groups
)) {
232 list
= list_first_entry(&qgroup
->groups
,
233 struct btrfs_qgroup_list
, next_group
);
234 list_del(&list
->next_group
);
235 list_del(&list
->next_member
);
239 while (!list_empty(&qgroup
->members
)) {
240 list
= list_first_entry(&qgroup
->members
,
241 struct btrfs_qgroup_list
, next_member
);
242 list_del(&list
->next_group
);
243 list_del(&list
->next_member
);
248 /* must be called with qgroup_lock held */
249 static int del_qgroup_rb(struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
251 struct btrfs_qgroup
*qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
256 rb_erase(&qgroup
->node
, &fs_info
->qgroup_tree
);
257 __del_qgroup_rb(fs_info
, qgroup
);
261 /* must be called with qgroup_lock held */
262 static int add_relation_rb(struct btrfs_fs_info
*fs_info
,
263 u64 memberid
, u64 parentid
)
265 struct btrfs_qgroup
*member
;
266 struct btrfs_qgroup
*parent
;
267 struct btrfs_qgroup_list
*list
;
269 member
= find_qgroup_rb(fs_info
, memberid
);
270 parent
= find_qgroup_rb(fs_info
, parentid
);
271 if (!member
|| !parent
)
274 list
= kzalloc(sizeof(*list
), GFP_ATOMIC
);
278 list
->group
= parent
;
279 list
->member
= member
;
280 list_add_tail(&list
->next_group
, &member
->groups
);
281 list_add_tail(&list
->next_member
, &parent
->members
);
286 /* must be called with qgroup_lock held */
287 static int del_relation_rb(struct btrfs_fs_info
*fs_info
,
288 u64 memberid
, u64 parentid
)
290 struct btrfs_qgroup
*member
;
291 struct btrfs_qgroup
*parent
;
292 struct btrfs_qgroup_list
*list
;
294 member
= find_qgroup_rb(fs_info
, memberid
);
295 parent
= find_qgroup_rb(fs_info
, parentid
);
296 if (!member
|| !parent
)
299 list_for_each_entry(list
, &member
->groups
, next_group
) {
300 if (list
->group
== parent
) {
301 list_del(&list
->next_group
);
302 list_del(&list
->next_member
);
310 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
311 int btrfs_verify_qgroup_counts(struct btrfs_fs_info
*fs_info
, u64 qgroupid
,
314 struct btrfs_qgroup
*qgroup
;
316 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
319 if (qgroup
->rfer
!= rfer
|| qgroup
->excl
!= excl
)
326 * The full config is read in one go, only called from open_ctree()
327 * It doesn't use any locking, as at this point we're still single-threaded
329 int btrfs_read_qgroup_config(struct btrfs_fs_info
*fs_info
)
331 struct btrfs_key key
;
332 struct btrfs_key found_key
;
333 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
334 struct btrfs_path
*path
= NULL
;
335 struct extent_buffer
*l
;
339 u64 rescan_progress
= 0;
341 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
344 fs_info
->qgroup_ulist
= ulist_alloc(GFP_KERNEL
);
345 if (!fs_info
->qgroup_ulist
) {
350 path
= btrfs_alloc_path();
356 ret
= btrfs_sysfs_add_qgroups(fs_info
);
359 /* default this to quota off, in case no status key is found */
360 fs_info
->qgroup_flags
= 0;
363 * pass 1: read status, all qgroup infos and limits
368 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 1);
373 struct btrfs_qgroup
*qgroup
;
375 slot
= path
->slots
[0];
377 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
379 if (found_key
.type
== BTRFS_QGROUP_STATUS_KEY
) {
380 struct btrfs_qgroup_status_item
*ptr
;
382 ptr
= btrfs_item_ptr(l
, slot
,
383 struct btrfs_qgroup_status_item
);
385 if (btrfs_qgroup_status_version(l
, ptr
) !=
386 BTRFS_QGROUP_STATUS_VERSION
) {
388 "old qgroup version, quota disabled");
391 if (btrfs_qgroup_status_generation(l
, ptr
) !=
392 fs_info
->generation
) {
393 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
395 "qgroup generation mismatch, marked as inconsistent");
397 fs_info
->qgroup_flags
= btrfs_qgroup_status_flags(l
,
399 rescan_progress
= btrfs_qgroup_status_rescan(l
, ptr
);
403 if (found_key
.type
!= BTRFS_QGROUP_INFO_KEY
&&
404 found_key
.type
!= BTRFS_QGROUP_LIMIT_KEY
)
407 qgroup
= find_qgroup_rb(fs_info
, found_key
.offset
);
408 if ((qgroup
&& found_key
.type
== BTRFS_QGROUP_INFO_KEY
) ||
409 (!qgroup
&& found_key
.type
== BTRFS_QGROUP_LIMIT_KEY
)) {
410 btrfs_err(fs_info
, "inconsistent qgroup config");
411 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
414 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
415 if (IS_ERR(qgroup
)) {
416 ret
= PTR_ERR(qgroup
);
420 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, qgroup
);
424 switch (found_key
.type
) {
425 case BTRFS_QGROUP_INFO_KEY
: {
426 struct btrfs_qgroup_info_item
*ptr
;
428 ptr
= btrfs_item_ptr(l
, slot
,
429 struct btrfs_qgroup_info_item
);
430 qgroup
->rfer
= btrfs_qgroup_info_rfer(l
, ptr
);
431 qgroup
->rfer_cmpr
= btrfs_qgroup_info_rfer_cmpr(l
, ptr
);
432 qgroup
->excl
= btrfs_qgroup_info_excl(l
, ptr
);
433 qgroup
->excl_cmpr
= btrfs_qgroup_info_excl_cmpr(l
, ptr
);
434 /* generation currently unused */
437 case BTRFS_QGROUP_LIMIT_KEY
: {
438 struct btrfs_qgroup_limit_item
*ptr
;
440 ptr
= btrfs_item_ptr(l
, slot
,
441 struct btrfs_qgroup_limit_item
);
442 qgroup
->lim_flags
= btrfs_qgroup_limit_flags(l
, ptr
);
443 qgroup
->max_rfer
= btrfs_qgroup_limit_max_rfer(l
, ptr
);
444 qgroup
->max_excl
= btrfs_qgroup_limit_max_excl(l
, ptr
);
445 qgroup
->rsv_rfer
= btrfs_qgroup_limit_rsv_rfer(l
, ptr
);
446 qgroup
->rsv_excl
= btrfs_qgroup_limit_rsv_excl(l
, ptr
);
451 ret
= btrfs_next_item(quota_root
, path
);
457 btrfs_release_path(path
);
460 * pass 2: read all qgroup relations
463 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
465 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 0);
469 slot
= path
->slots
[0];
471 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
473 if (found_key
.type
!= BTRFS_QGROUP_RELATION_KEY
)
476 if (found_key
.objectid
> found_key
.offset
) {
477 /* parent <- member, not needed to build config */
478 /* FIXME should we omit the key completely? */
482 ret
= add_relation_rb(fs_info
, found_key
.objectid
,
484 if (ret
== -ENOENT
) {
486 "orphan qgroup relation 0x%llx->0x%llx",
487 found_key
.objectid
, found_key
.offset
);
488 ret
= 0; /* ignore the error */
493 ret
= btrfs_next_item(quota_root
, path
);
500 btrfs_free_path(path
);
501 fs_info
->qgroup_flags
|= flags
;
502 if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))
503 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
504 else if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
&&
506 ret
= qgroup_rescan_init(fs_info
, rescan_progress
, 0);
509 ulist_free(fs_info
->qgroup_ulist
);
510 fs_info
->qgroup_ulist
= NULL
;
511 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
512 btrfs_sysfs_del_qgroups(fs_info
);
515 return ret
< 0 ? ret
: 0;
519 * Called in close_ctree() when quota is still enabled. This verifies we don't
520 * leak some reserved space.
522 * Return false if no reserved space is left.
523 * Return true if some reserved space is leaked.
525 bool btrfs_check_quota_leak(struct btrfs_fs_info
*fs_info
)
527 struct rb_node
*node
;
530 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
533 * Since we're unmounting, there is no race and no need to grab qgroup
534 * lock. And here we don't go post-order to provide a more user
535 * friendly sorted result.
537 for (node
= rb_first(&fs_info
->qgroup_tree
); node
; node
= rb_next(node
)) {
538 struct btrfs_qgroup
*qgroup
;
541 qgroup
= rb_entry(node
, struct btrfs_qgroup
, node
);
542 for (i
= 0; i
< BTRFS_QGROUP_RSV_LAST
; i
++) {
543 if (qgroup
->rsv
.values
[i
]) {
546 "qgroup %hu/%llu has unreleased space, type %d rsv %llu",
547 btrfs_qgroup_level(qgroup
->qgroupid
),
548 btrfs_qgroup_subvolid(qgroup
->qgroupid
),
549 i
, qgroup
->rsv
.values
[i
]);
557 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
558 * first two are in single-threaded paths.And for the third one, we have set
559 * quota_root to be null with qgroup_lock held before, so it is safe to clean
560 * up the in-memory structures without qgroup_lock held.
562 void btrfs_free_qgroup_config(struct btrfs_fs_info
*fs_info
)
565 struct btrfs_qgroup
*qgroup
;
567 while ((n
= rb_first(&fs_info
->qgroup_tree
))) {
568 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
569 rb_erase(n
, &fs_info
->qgroup_tree
);
570 __del_qgroup_rb(fs_info
, qgroup
);
571 btrfs_sysfs_del_one_qgroup(fs_info
, qgroup
);
575 * We call btrfs_free_qgroup_config() when unmounting
576 * filesystem and disabling quota, so we set qgroup_ulist
577 * to be null here to avoid double free.
579 ulist_free(fs_info
->qgroup_ulist
);
580 fs_info
->qgroup_ulist
= NULL
;
581 btrfs_sysfs_del_qgroups(fs_info
);
584 static int add_qgroup_relation_item(struct btrfs_trans_handle
*trans
, u64 src
,
588 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
589 struct btrfs_path
*path
;
590 struct btrfs_key key
;
592 path
= btrfs_alloc_path();
597 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
600 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
, 0);
602 btrfs_mark_buffer_dirty(path
->nodes
[0]);
604 btrfs_free_path(path
);
608 static int del_qgroup_relation_item(struct btrfs_trans_handle
*trans
, u64 src
,
612 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
613 struct btrfs_path
*path
;
614 struct btrfs_key key
;
616 path
= btrfs_alloc_path();
621 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
624 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
633 ret
= btrfs_del_item(trans
, quota_root
, path
);
635 btrfs_free_path(path
);
639 static int add_qgroup_item(struct btrfs_trans_handle
*trans
,
640 struct btrfs_root
*quota_root
, u64 qgroupid
)
643 struct btrfs_path
*path
;
644 struct btrfs_qgroup_info_item
*qgroup_info
;
645 struct btrfs_qgroup_limit_item
*qgroup_limit
;
646 struct extent_buffer
*leaf
;
647 struct btrfs_key key
;
649 if (btrfs_is_testing(quota_root
->fs_info
))
652 path
= btrfs_alloc_path();
657 key
.type
= BTRFS_QGROUP_INFO_KEY
;
658 key
.offset
= qgroupid
;
661 * Avoid a transaction abort by catching -EEXIST here. In that
662 * case, we proceed by re-initializing the existing structure
666 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
667 sizeof(*qgroup_info
));
668 if (ret
&& ret
!= -EEXIST
)
671 leaf
= path
->nodes
[0];
672 qgroup_info
= btrfs_item_ptr(leaf
, path
->slots
[0],
673 struct btrfs_qgroup_info_item
);
674 btrfs_set_qgroup_info_generation(leaf
, qgroup_info
, trans
->transid
);
675 btrfs_set_qgroup_info_rfer(leaf
, qgroup_info
, 0);
676 btrfs_set_qgroup_info_rfer_cmpr(leaf
, qgroup_info
, 0);
677 btrfs_set_qgroup_info_excl(leaf
, qgroup_info
, 0);
678 btrfs_set_qgroup_info_excl_cmpr(leaf
, qgroup_info
, 0);
680 btrfs_mark_buffer_dirty(leaf
);
682 btrfs_release_path(path
);
684 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
685 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
686 sizeof(*qgroup_limit
));
687 if (ret
&& ret
!= -EEXIST
)
690 leaf
= path
->nodes
[0];
691 qgroup_limit
= btrfs_item_ptr(leaf
, path
->slots
[0],
692 struct btrfs_qgroup_limit_item
);
693 btrfs_set_qgroup_limit_flags(leaf
, qgroup_limit
, 0);
694 btrfs_set_qgroup_limit_max_rfer(leaf
, qgroup_limit
, 0);
695 btrfs_set_qgroup_limit_max_excl(leaf
, qgroup_limit
, 0);
696 btrfs_set_qgroup_limit_rsv_rfer(leaf
, qgroup_limit
, 0);
697 btrfs_set_qgroup_limit_rsv_excl(leaf
, qgroup_limit
, 0);
699 btrfs_mark_buffer_dirty(leaf
);
703 btrfs_free_path(path
);
707 static int del_qgroup_item(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
710 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
711 struct btrfs_path
*path
;
712 struct btrfs_key key
;
714 path
= btrfs_alloc_path();
719 key
.type
= BTRFS_QGROUP_INFO_KEY
;
720 key
.offset
= qgroupid
;
721 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
730 ret
= btrfs_del_item(trans
, quota_root
, path
);
734 btrfs_release_path(path
);
736 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
737 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
746 ret
= btrfs_del_item(trans
, quota_root
, path
);
749 btrfs_free_path(path
);
753 static int update_qgroup_limit_item(struct btrfs_trans_handle
*trans
,
754 struct btrfs_qgroup
*qgroup
)
756 struct btrfs_root
*quota_root
= trans
->fs_info
->quota_root
;
757 struct btrfs_path
*path
;
758 struct btrfs_key key
;
759 struct extent_buffer
*l
;
760 struct btrfs_qgroup_limit_item
*qgroup_limit
;
765 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
766 key
.offset
= qgroup
->qgroupid
;
768 path
= btrfs_alloc_path();
772 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
780 slot
= path
->slots
[0];
781 qgroup_limit
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_limit_item
);
782 btrfs_set_qgroup_limit_flags(l
, qgroup_limit
, qgroup
->lim_flags
);
783 btrfs_set_qgroup_limit_max_rfer(l
, qgroup_limit
, qgroup
->max_rfer
);
784 btrfs_set_qgroup_limit_max_excl(l
, qgroup_limit
, qgroup
->max_excl
);
785 btrfs_set_qgroup_limit_rsv_rfer(l
, qgroup_limit
, qgroup
->rsv_rfer
);
786 btrfs_set_qgroup_limit_rsv_excl(l
, qgroup_limit
, qgroup
->rsv_excl
);
788 btrfs_mark_buffer_dirty(l
);
791 btrfs_free_path(path
);
795 static int update_qgroup_info_item(struct btrfs_trans_handle
*trans
,
796 struct btrfs_qgroup
*qgroup
)
798 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
799 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
800 struct btrfs_path
*path
;
801 struct btrfs_key key
;
802 struct extent_buffer
*l
;
803 struct btrfs_qgroup_info_item
*qgroup_info
;
807 if (btrfs_is_testing(fs_info
))
811 key
.type
= BTRFS_QGROUP_INFO_KEY
;
812 key
.offset
= qgroup
->qgroupid
;
814 path
= btrfs_alloc_path();
818 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
826 slot
= path
->slots
[0];
827 qgroup_info
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_info_item
);
828 btrfs_set_qgroup_info_generation(l
, qgroup_info
, trans
->transid
);
829 btrfs_set_qgroup_info_rfer(l
, qgroup_info
, qgroup
->rfer
);
830 btrfs_set_qgroup_info_rfer_cmpr(l
, qgroup_info
, qgroup
->rfer_cmpr
);
831 btrfs_set_qgroup_info_excl(l
, qgroup_info
, qgroup
->excl
);
832 btrfs_set_qgroup_info_excl_cmpr(l
, qgroup_info
, qgroup
->excl_cmpr
);
834 btrfs_mark_buffer_dirty(l
);
837 btrfs_free_path(path
);
841 static int update_qgroup_status_item(struct btrfs_trans_handle
*trans
)
843 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
844 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
845 struct btrfs_path
*path
;
846 struct btrfs_key key
;
847 struct extent_buffer
*l
;
848 struct btrfs_qgroup_status_item
*ptr
;
853 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
856 path
= btrfs_alloc_path();
860 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, 0, 1);
868 slot
= path
->slots
[0];
869 ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_status_item
);
870 btrfs_set_qgroup_status_flags(l
, ptr
, fs_info
->qgroup_flags
);
871 btrfs_set_qgroup_status_generation(l
, ptr
, trans
->transid
);
872 btrfs_set_qgroup_status_rescan(l
, ptr
,
873 fs_info
->qgroup_rescan_progress
.objectid
);
875 btrfs_mark_buffer_dirty(l
);
878 btrfs_free_path(path
);
883 * called with qgroup_lock held
885 static int btrfs_clean_quota_tree(struct btrfs_trans_handle
*trans
,
886 struct btrfs_root
*root
)
888 struct btrfs_path
*path
;
889 struct btrfs_key key
;
890 struct extent_buffer
*leaf
= NULL
;
894 path
= btrfs_alloc_path();
903 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
906 leaf
= path
->nodes
[0];
907 nr
= btrfs_header_nritems(leaf
);
911 * delete the leaf one by one
912 * since the whole tree is going
916 ret
= btrfs_del_items(trans
, root
, path
, 0, nr
);
920 btrfs_release_path(path
);
924 btrfs_free_path(path
);
928 int btrfs_quota_enable(struct btrfs_fs_info
*fs_info
)
930 struct btrfs_root
*quota_root
;
931 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
932 struct btrfs_path
*path
= NULL
;
933 struct btrfs_qgroup_status_item
*ptr
;
934 struct extent_buffer
*leaf
;
935 struct btrfs_key key
;
936 struct btrfs_key found_key
;
937 struct btrfs_qgroup
*qgroup
= NULL
;
938 struct btrfs_trans_handle
*trans
= NULL
;
939 struct ulist
*ulist
= NULL
;
944 * We need to have subvol_sem write locked, to prevent races between
945 * concurrent tasks trying to enable quotas, because we will unlock
946 * and relock qgroup_ioctl_lock before setting fs_info->quota_root
947 * and before setting BTRFS_FS_QUOTA_ENABLED.
949 lockdep_assert_held_write(&fs_info
->subvol_sem
);
951 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
952 if (fs_info
->quota_root
)
955 ulist
= ulist_alloc(GFP_KERNEL
);
961 ret
= btrfs_sysfs_add_qgroups(fs_info
);
966 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
967 * avoid lock acquisition inversion problems (reported by lockdep) between
968 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
969 * start a transaction.
970 * After we started the transaction lock qgroup_ioctl_lock again and
971 * check if someone else created the quota root in the meanwhile. If so,
972 * just return success and release the transaction handle.
974 * Also we don't need to worry about someone else calling
975 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
976 * that function returns 0 (success) when the sysfs entries already exist.
978 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
981 * 1 for quota root item
982 * 1 for BTRFS_QGROUP_STATUS item
984 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
985 * per subvolume. However those are not currently reserved since it
986 * would be a lot of overkill.
988 trans
= btrfs_start_transaction(tree_root
, 2);
990 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
992 ret
= PTR_ERR(trans
);
997 if (fs_info
->quota_root
)
1000 fs_info
->qgroup_ulist
= ulist
;
1004 * initially create the quota tree
1006 quota_root
= btrfs_create_tree(trans
, BTRFS_QUOTA_TREE_OBJECTID
);
1007 if (IS_ERR(quota_root
)) {
1008 ret
= PTR_ERR(quota_root
);
1009 btrfs_abort_transaction(trans
, ret
);
1013 path
= btrfs_alloc_path();
1016 btrfs_abort_transaction(trans
, ret
);
1021 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
1024 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
1027 btrfs_abort_transaction(trans
, ret
);
1031 leaf
= path
->nodes
[0];
1032 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0],
1033 struct btrfs_qgroup_status_item
);
1034 btrfs_set_qgroup_status_generation(leaf
, ptr
, trans
->transid
);
1035 btrfs_set_qgroup_status_version(leaf
, ptr
, BTRFS_QGROUP_STATUS_VERSION
);
1036 fs_info
->qgroup_flags
= BTRFS_QGROUP_STATUS_FLAG_ON
|
1037 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1038 btrfs_set_qgroup_status_flags(leaf
, ptr
, fs_info
->qgroup_flags
);
1039 btrfs_set_qgroup_status_rescan(leaf
, ptr
, 0);
1041 btrfs_mark_buffer_dirty(leaf
);
1044 key
.type
= BTRFS_ROOT_REF_KEY
;
1047 btrfs_release_path(path
);
1048 ret
= btrfs_search_slot_for_read(tree_root
, &key
, path
, 1, 0);
1052 btrfs_abort_transaction(trans
, ret
);
1057 slot
= path
->slots
[0];
1058 leaf
= path
->nodes
[0];
1059 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1061 if (found_key
.type
== BTRFS_ROOT_REF_KEY
) {
1063 /* Release locks on tree_root before we access quota_root */
1064 btrfs_release_path(path
);
1066 ret
= add_qgroup_item(trans
, quota_root
,
1069 btrfs_abort_transaction(trans
, ret
);
1073 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
1074 if (IS_ERR(qgroup
)) {
1075 ret
= PTR_ERR(qgroup
);
1076 btrfs_abort_transaction(trans
, ret
);
1079 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, qgroup
);
1081 btrfs_abort_transaction(trans
, ret
);
1084 ret
= btrfs_search_slot_for_read(tree_root
, &found_key
,
1087 btrfs_abort_transaction(trans
, ret
);
1092 * Shouldn't happen, but in case it does we
1093 * don't need to do the btrfs_next_item, just
1099 ret
= btrfs_next_item(tree_root
, path
);
1101 btrfs_abort_transaction(trans
, ret
);
1109 btrfs_release_path(path
);
1110 ret
= add_qgroup_item(trans
, quota_root
, BTRFS_FS_TREE_OBJECTID
);
1112 btrfs_abort_transaction(trans
, ret
);
1116 qgroup
= add_qgroup_rb(fs_info
, BTRFS_FS_TREE_OBJECTID
);
1117 if (IS_ERR(qgroup
)) {
1118 ret
= PTR_ERR(qgroup
);
1119 btrfs_abort_transaction(trans
, ret
);
1122 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, qgroup
);
1124 btrfs_abort_transaction(trans
, ret
);
1128 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1130 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
1131 * a deadlock with tasks concurrently doing other qgroup operations, such
1132 * adding/removing qgroups or adding/deleting qgroup relations for example,
1133 * because all qgroup operations first start or join a transaction and then
1134 * lock the qgroup_ioctl_lock mutex.
1135 * We are safe from a concurrent task trying to enable quotas, by calling
1136 * this function, since we are serialized by fs_info->subvol_sem.
1138 ret
= btrfs_commit_transaction(trans
);
1140 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1145 * Set quota enabled flag after committing the transaction, to avoid
1146 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1149 spin_lock(&fs_info
->qgroup_lock
);
1150 fs_info
->quota_root
= quota_root
;
1151 set_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1152 spin_unlock(&fs_info
->qgroup_lock
);
1154 ret
= qgroup_rescan_init(fs_info
, 0, 1);
1156 qgroup_rescan_zero_tracking(fs_info
);
1157 fs_info
->qgroup_rescan_running
= true;
1158 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
1159 &fs_info
->qgroup_rescan_work
);
1163 btrfs_free_path(path
);
1166 btrfs_put_root(quota_root
);
1169 ulist_free(fs_info
->qgroup_ulist
);
1170 fs_info
->qgroup_ulist
= NULL
;
1171 btrfs_sysfs_del_qgroups(fs_info
);
1173 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1175 btrfs_end_transaction(trans
);
1177 ret
= btrfs_end_transaction(trans
);
1182 int btrfs_quota_disable(struct btrfs_fs_info
*fs_info
)
1184 struct btrfs_root
*quota_root
;
1185 struct btrfs_trans_handle
*trans
= NULL
;
1189 * We need to have subvol_sem write locked, to prevent races between
1190 * concurrent tasks trying to disable quotas, because we will unlock
1191 * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
1193 lockdep_assert_held_write(&fs_info
->subvol_sem
);
1195 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1196 if (!fs_info
->quota_root
)
1200 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
1201 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
1202 * to lock that mutex while holding a transaction handle and the rescan
1203 * worker needs to commit a transaction.
1205 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1208 * Request qgroup rescan worker to complete and wait for it. This wait
1209 * must be done before transaction start for quota disable since it may
1210 * deadlock with transaction by the qgroup rescan worker.
1212 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1213 btrfs_qgroup_wait_for_completion(fs_info
, false);
1216 * 1 For the root item
1218 * We should also reserve enough items for the quota tree deletion in
1219 * btrfs_clean_quota_tree but this is not done.
1221 * Also, we must always start a transaction without holding the mutex
1222 * qgroup_ioctl_lock, see btrfs_quota_enable().
1224 trans
= btrfs_start_transaction(fs_info
->tree_root
, 1);
1226 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1227 if (IS_ERR(trans
)) {
1228 ret
= PTR_ERR(trans
);
1230 set_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1234 if (!fs_info
->quota_root
)
1237 spin_lock(&fs_info
->qgroup_lock
);
1238 quota_root
= fs_info
->quota_root
;
1239 fs_info
->quota_root
= NULL
;
1240 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
1241 spin_unlock(&fs_info
->qgroup_lock
);
1243 btrfs_free_qgroup_config(fs_info
);
1245 ret
= btrfs_clean_quota_tree(trans
, quota_root
);
1247 btrfs_abort_transaction(trans
, ret
);
1251 ret
= btrfs_del_root(trans
, "a_root
->root_key
);
1253 btrfs_abort_transaction(trans
, ret
);
1257 list_del("a_root
->dirty_list
);
1259 btrfs_tree_lock(quota_root
->node
);
1260 btrfs_clean_tree_block(quota_root
->node
);
1261 btrfs_tree_unlock(quota_root
->node
);
1262 btrfs_free_tree_block(trans
, quota_root
, quota_root
->node
, 0, 1);
1264 btrfs_put_root(quota_root
);
1267 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1269 btrfs_end_transaction(trans
);
1271 ret
= btrfs_end_transaction(trans
);
1276 static void qgroup_dirty(struct btrfs_fs_info
*fs_info
,
1277 struct btrfs_qgroup
*qgroup
)
1279 if (list_empty(&qgroup
->dirty
))
1280 list_add(&qgroup
->dirty
, &fs_info
->dirty_qgroups
);
1284 * The easy accounting, we're updating qgroup relationship whose child qgroup
1285 * only has exclusive extents.
1287 * In this case, all exclusive extents will also be exclusive for parent, so
1288 * excl/rfer just get added/removed.
1290 * So is qgroup reservation space, which should also be added/removed to
1292 * Or when child tries to release reservation space, parent will underflow its
1293 * reservation (for relationship adding case).
1295 * Caller should hold fs_info->qgroup_lock.
1297 static int __qgroup_excl_accounting(struct btrfs_fs_info
*fs_info
,
1298 struct ulist
*tmp
, u64 ref_root
,
1299 struct btrfs_qgroup
*src
, int sign
)
1301 struct btrfs_qgroup
*qgroup
;
1302 struct btrfs_qgroup_list
*glist
;
1303 struct ulist_node
*unode
;
1304 struct ulist_iterator uiter
;
1305 u64 num_bytes
= src
->excl
;
1308 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
1312 qgroup
->rfer
+= sign
* num_bytes
;
1313 qgroup
->rfer_cmpr
+= sign
* num_bytes
;
1315 WARN_ON(sign
< 0 && qgroup
->excl
< num_bytes
);
1316 qgroup
->excl
+= sign
* num_bytes
;
1317 qgroup
->excl_cmpr
+= sign
* num_bytes
;
1320 qgroup_rsv_add_by_qgroup(fs_info
, qgroup
, src
);
1322 qgroup_rsv_release_by_qgroup(fs_info
, qgroup
, src
);
1324 qgroup_dirty(fs_info
, qgroup
);
1326 /* Get all of the parent groups that contain this qgroup */
1327 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1328 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1329 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
1334 /* Iterate all of the parents and adjust their reference counts */
1335 ULIST_ITER_INIT(&uiter
);
1336 while ((unode
= ulist_next(tmp
, &uiter
))) {
1337 qgroup
= unode_aux_to_qgroup(unode
);
1338 qgroup
->rfer
+= sign
* num_bytes
;
1339 qgroup
->rfer_cmpr
+= sign
* num_bytes
;
1340 WARN_ON(sign
< 0 && qgroup
->excl
< num_bytes
);
1341 qgroup
->excl
+= sign
* num_bytes
;
1343 qgroup_rsv_add_by_qgroup(fs_info
, qgroup
, src
);
1345 qgroup_rsv_release_by_qgroup(fs_info
, qgroup
, src
);
1346 qgroup
->excl_cmpr
+= sign
* num_bytes
;
1347 qgroup_dirty(fs_info
, qgroup
);
1349 /* Add any parents of the parents */
1350 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1351 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1352 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
1364 * Quick path for updating qgroup with only excl refs.
1366 * In that case, just update all parent will be enough.
1367 * Or we needs to do a full rescan.
1368 * Caller should also hold fs_info->qgroup_lock.
1370 * Return 0 for quick update, return >0 for need to full rescan
1371 * and mark INCONSISTENT flag.
1372 * Return < 0 for other error.
1374 static int quick_update_accounting(struct btrfs_fs_info
*fs_info
,
1375 struct ulist
*tmp
, u64 src
, u64 dst
,
1378 struct btrfs_qgroup
*qgroup
;
1382 qgroup
= find_qgroup_rb(fs_info
, src
);
1385 if (qgroup
->excl
== qgroup
->rfer
) {
1387 err
= __qgroup_excl_accounting(fs_info
, tmp
, dst
,
1396 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1400 int btrfs_add_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1403 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1404 struct btrfs_qgroup
*parent
;
1405 struct btrfs_qgroup
*member
;
1406 struct btrfs_qgroup_list
*list
;
1408 unsigned int nofs_flag
;
1411 /* Check the level of src and dst first */
1412 if (btrfs_qgroup_level(src
) >= btrfs_qgroup_level(dst
))
1415 /* We hold a transaction handle open, must do a NOFS allocation. */
1416 nofs_flag
= memalloc_nofs_save();
1417 tmp
= ulist_alloc(GFP_KERNEL
);
1418 memalloc_nofs_restore(nofs_flag
);
1422 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1423 if (!fs_info
->quota_root
) {
1427 member
= find_qgroup_rb(fs_info
, src
);
1428 parent
= find_qgroup_rb(fs_info
, dst
);
1429 if (!member
|| !parent
) {
1434 /* check if such qgroup relation exist firstly */
1435 list_for_each_entry(list
, &member
->groups
, next_group
) {
1436 if (list
->group
== parent
) {
1442 ret
= add_qgroup_relation_item(trans
, src
, dst
);
1446 ret
= add_qgroup_relation_item(trans
, dst
, src
);
1448 del_qgroup_relation_item(trans
, src
, dst
);
1452 spin_lock(&fs_info
->qgroup_lock
);
1453 ret
= add_relation_rb(fs_info
, src
, dst
);
1455 spin_unlock(&fs_info
->qgroup_lock
);
1458 ret
= quick_update_accounting(fs_info
, tmp
, src
, dst
, 1);
1459 spin_unlock(&fs_info
->qgroup_lock
);
1461 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1466 static int __del_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1469 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1470 struct btrfs_qgroup
*parent
;
1471 struct btrfs_qgroup
*member
;
1472 struct btrfs_qgroup_list
*list
;
1475 unsigned int nofs_flag
;
1479 /* We hold a transaction handle open, must do a NOFS allocation. */
1480 nofs_flag
= memalloc_nofs_save();
1481 tmp
= ulist_alloc(GFP_KERNEL
);
1482 memalloc_nofs_restore(nofs_flag
);
1486 if (!fs_info
->quota_root
) {
1491 member
= find_qgroup_rb(fs_info
, src
);
1492 parent
= find_qgroup_rb(fs_info
, dst
);
1494 * The parent/member pair doesn't exist, then try to delete the dead
1495 * relation items only.
1497 if (!member
|| !parent
)
1500 /* check if such qgroup relation exist firstly */
1501 list_for_each_entry(list
, &member
->groups
, next_group
) {
1502 if (list
->group
== parent
) {
1509 ret
= del_qgroup_relation_item(trans
, src
, dst
);
1510 if (ret
< 0 && ret
!= -ENOENT
)
1512 ret2
= del_qgroup_relation_item(trans
, dst
, src
);
1513 if (ret2
< 0 && ret2
!= -ENOENT
)
1516 /* At least one deletion succeeded, return 0 */
1521 spin_lock(&fs_info
->qgroup_lock
);
1522 del_relation_rb(fs_info
, src
, dst
);
1523 ret
= quick_update_accounting(fs_info
, tmp
, src
, dst
, -1);
1524 spin_unlock(&fs_info
->qgroup_lock
);
1531 int btrfs_del_qgroup_relation(struct btrfs_trans_handle
*trans
, u64 src
,
1534 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1537 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1538 ret
= __del_qgroup_relation(trans
, src
, dst
);
1539 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1544 int btrfs_create_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
1546 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1547 struct btrfs_root
*quota_root
;
1548 struct btrfs_qgroup
*qgroup
;
1551 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1552 if (!fs_info
->quota_root
) {
1556 quota_root
= fs_info
->quota_root
;
1557 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1563 ret
= add_qgroup_item(trans
, quota_root
, qgroupid
);
1567 spin_lock(&fs_info
->qgroup_lock
);
1568 qgroup
= add_qgroup_rb(fs_info
, qgroupid
);
1569 spin_unlock(&fs_info
->qgroup_lock
);
1571 if (IS_ERR(qgroup
)) {
1572 ret
= PTR_ERR(qgroup
);
1575 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, qgroup
);
1577 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1581 int btrfs_remove_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
)
1583 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1584 struct btrfs_qgroup
*qgroup
;
1585 struct btrfs_qgroup_list
*list
;
1588 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1589 if (!fs_info
->quota_root
) {
1594 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1600 /* Check if there are no children of this qgroup */
1601 if (!list_empty(&qgroup
->members
)) {
1606 ret
= del_qgroup_item(trans
, qgroupid
);
1607 if (ret
&& ret
!= -ENOENT
)
1610 while (!list_empty(&qgroup
->groups
)) {
1611 list
= list_first_entry(&qgroup
->groups
,
1612 struct btrfs_qgroup_list
, next_group
);
1613 ret
= __del_qgroup_relation(trans
, qgroupid
,
1614 list
->group
->qgroupid
);
1619 spin_lock(&fs_info
->qgroup_lock
);
1620 del_qgroup_rb(fs_info
, qgroupid
);
1621 spin_unlock(&fs_info
->qgroup_lock
);
1624 * Remove the qgroup from sysfs now without holding the qgroup_lock
1625 * spinlock, since the sysfs_remove_group() function needs to take
1626 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1628 btrfs_sysfs_del_one_qgroup(fs_info
, qgroup
);
1631 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1635 int btrfs_limit_qgroup(struct btrfs_trans_handle
*trans
, u64 qgroupid
,
1636 struct btrfs_qgroup_limit
*limit
)
1638 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1639 struct btrfs_qgroup
*qgroup
;
1641 /* Sometimes we would want to clear the limit on this qgroup.
1642 * To meet this requirement, we treat the -1 as a special value
1643 * which tell kernel to clear the limit on this qgroup.
1645 const u64 CLEAR_VALUE
= -1;
1647 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1648 if (!fs_info
->quota_root
) {
1653 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1659 spin_lock(&fs_info
->qgroup_lock
);
1660 if (limit
->flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) {
1661 if (limit
->max_rfer
== CLEAR_VALUE
) {
1662 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_MAX_RFER
;
1663 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_MAX_RFER
;
1664 qgroup
->max_rfer
= 0;
1666 qgroup
->max_rfer
= limit
->max_rfer
;
1669 if (limit
->flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) {
1670 if (limit
->max_excl
== CLEAR_VALUE
) {
1671 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_MAX_EXCL
;
1672 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_MAX_EXCL
;
1673 qgroup
->max_excl
= 0;
1675 qgroup
->max_excl
= limit
->max_excl
;
1678 if (limit
->flags
& BTRFS_QGROUP_LIMIT_RSV_RFER
) {
1679 if (limit
->rsv_rfer
== CLEAR_VALUE
) {
1680 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_RSV_RFER
;
1681 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_RSV_RFER
;
1682 qgroup
->rsv_rfer
= 0;
1684 qgroup
->rsv_rfer
= limit
->rsv_rfer
;
1687 if (limit
->flags
& BTRFS_QGROUP_LIMIT_RSV_EXCL
) {
1688 if (limit
->rsv_excl
== CLEAR_VALUE
) {
1689 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_RSV_EXCL
;
1690 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_RSV_EXCL
;
1691 qgroup
->rsv_excl
= 0;
1693 qgroup
->rsv_excl
= limit
->rsv_excl
;
1696 qgroup
->lim_flags
|= limit
->flags
;
1698 spin_unlock(&fs_info
->qgroup_lock
);
1700 ret
= update_qgroup_limit_item(trans
, qgroup
);
1702 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1703 btrfs_info(fs_info
, "unable to update quota limit for %llu",
1708 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1712 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info
*fs_info
,
1713 struct btrfs_delayed_ref_root
*delayed_refs
,
1714 struct btrfs_qgroup_extent_record
*record
)
1716 struct rb_node
**p
= &delayed_refs
->dirty_extent_root
.rb_node
;
1717 struct rb_node
*parent_node
= NULL
;
1718 struct btrfs_qgroup_extent_record
*entry
;
1719 u64 bytenr
= record
->bytenr
;
1721 lockdep_assert_held(&delayed_refs
->lock
);
1722 trace_btrfs_qgroup_trace_extent(fs_info
, record
);
1726 entry
= rb_entry(parent_node
, struct btrfs_qgroup_extent_record
,
1728 if (bytenr
< entry
->bytenr
) {
1730 } else if (bytenr
> entry
->bytenr
) {
1731 p
= &(*p
)->rb_right
;
1733 if (record
->data_rsv
&& !entry
->data_rsv
) {
1734 entry
->data_rsv
= record
->data_rsv
;
1735 entry
->data_rsv_refroot
=
1736 record
->data_rsv_refroot
;
1742 rb_link_node(&record
->node
, parent_node
, p
);
1743 rb_insert_color(&record
->node
, &delayed_refs
->dirty_extent_root
);
1747 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle
*trans
,
1748 struct btrfs_qgroup_extent_record
*qrecord
)
1750 struct ulist
*old_root
;
1751 u64 bytenr
= qrecord
->bytenr
;
1755 * We are always called in a context where we are already holding a
1756 * transaction handle. Often we are called when adding a data delayed
1757 * reference from btrfs_truncate_inode_items() (truncating or unlinking),
1758 * in which case we will be holding a write lock on extent buffer from a
1759 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
1760 * acquire fs_info->commit_root_sem, because that is a higher level lock
1761 * that must be acquired before locking any extent buffers.
1763 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
1764 * but we can't pass it a non-NULL transaction handle, because otherwise
1765 * it would not use commit roots and would lock extent buffers, causing
1766 * a deadlock if it ends up trying to read lock the same extent buffer
1767 * that was previously write locked at btrfs_truncate_inode_items().
1769 * So pass a NULL transaction handle to btrfs_find_all_roots() and
1770 * explicitly tell it to not acquire the commit_root_sem - if we are
1771 * holding a transaction handle we don't need its protection.
1773 ASSERT(trans
!= NULL
);
1775 ret
= btrfs_find_all_roots(NULL
, trans
->fs_info
, bytenr
, 0, &old_root
,
1778 trans
->fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1779 btrfs_warn(trans
->fs_info
,
1780 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1786 * Here we don't need to get the lock of
1787 * trans->transaction->delayed_refs, since inserted qrecord won't
1788 * be deleted, only qrecord->node may be modified (new qrecord insert)
1790 * So modifying qrecord->old_roots is safe here
1792 qrecord
->old_roots
= old_root
;
1796 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle
*trans
, u64 bytenr
,
1797 u64 num_bytes
, gfp_t gfp_flag
)
1799 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1800 struct btrfs_qgroup_extent_record
*record
;
1801 struct btrfs_delayed_ref_root
*delayed_refs
;
1804 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)
1805 || bytenr
== 0 || num_bytes
== 0)
1807 record
= kzalloc(sizeof(*record
), gfp_flag
);
1811 delayed_refs
= &trans
->transaction
->delayed_refs
;
1812 record
->bytenr
= bytenr
;
1813 record
->num_bytes
= num_bytes
;
1814 record
->old_roots
= NULL
;
1816 spin_lock(&delayed_refs
->lock
);
1817 ret
= btrfs_qgroup_trace_extent_nolock(fs_info
, delayed_refs
, record
);
1818 spin_unlock(&delayed_refs
->lock
);
1823 return btrfs_qgroup_trace_extent_post(trans
, record
);
1826 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle
*trans
,
1827 struct extent_buffer
*eb
)
1829 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1830 int nr
= btrfs_header_nritems(eb
);
1831 int i
, extent_type
, ret
;
1832 struct btrfs_key key
;
1833 struct btrfs_file_extent_item
*fi
;
1834 u64 bytenr
, num_bytes
;
1836 /* We can be called directly from walk_up_proc() */
1837 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
1840 for (i
= 0; i
< nr
; i
++) {
1841 btrfs_item_key_to_cpu(eb
, &key
, i
);
1843 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1846 fi
= btrfs_item_ptr(eb
, i
, struct btrfs_file_extent_item
);
1847 /* filter out non qgroup-accountable extents */
1848 extent_type
= btrfs_file_extent_type(eb
, fi
);
1850 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
1853 bytenr
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1857 num_bytes
= btrfs_file_extent_disk_num_bytes(eb
, fi
);
1859 ret
= btrfs_qgroup_trace_extent(trans
, bytenr
, num_bytes
,
1869 * Walk up the tree from the bottom, freeing leaves and any interior
1870 * nodes which have had all slots visited. If a node (leaf or
1871 * interior) is freed, the node above it will have it's slot
1872 * incremented. The root node will never be freed.
1874 * At the end of this function, we should have a path which has all
1875 * slots incremented to the next position for a search. If we need to
1876 * read a new node it will be NULL and the node above it will have the
1877 * correct slot selected for a later read.
1879 * If we increment the root nodes slot counter past the number of
1880 * elements, 1 is returned to signal completion of the search.
1882 static int adjust_slots_upwards(struct btrfs_path
*path
, int root_level
)
1886 struct extent_buffer
*eb
;
1888 if (root_level
== 0)
1891 while (level
<= root_level
) {
1892 eb
= path
->nodes
[level
];
1893 nr
= btrfs_header_nritems(eb
);
1894 path
->slots
[level
]++;
1895 slot
= path
->slots
[level
];
1896 if (slot
>= nr
|| level
== 0) {
1898 * Don't free the root - we will detect this
1899 * condition after our loop and return a
1900 * positive value for caller to stop walking the tree.
1902 if (level
!= root_level
) {
1903 btrfs_tree_unlock_rw(eb
, path
->locks
[level
]);
1904 path
->locks
[level
] = 0;
1906 free_extent_buffer(eb
);
1907 path
->nodes
[level
] = NULL
;
1908 path
->slots
[level
] = 0;
1912 * We have a valid slot to walk back down
1913 * from. Stop here so caller can process these
1922 eb
= path
->nodes
[root_level
];
1923 if (path
->slots
[root_level
] >= btrfs_header_nritems(eb
))
1930 * Helper function to trace a subtree tree block swap.
1932 * The swap will happen in highest tree block, but there may be a lot of
1933 * tree blocks involved.
1936 * OO = Old tree blocks
1937 * NN = New tree blocks allocated during balance
1939 * File tree (257) Reloc tree for 257
1942 * L1 OO OO (a) OO NN (a)
1944 * L0 OO OO OO OO OO OO NN NN
1947 * When calling qgroup_trace_extent_swap(), we will pass:
1949 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
1953 * In that case, qgroup_trace_extent_swap() will search from OO(a) to
1954 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
1956 * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
1958 * 1) Tree search from @src_eb
1959 * It should acts as a simplified btrfs_search_slot().
1960 * The key for search can be extracted from @dst_path->nodes[dst_level]
1963 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
1964 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1965 * They should be marked during previous (@dst_level = 1) iteration.
1967 * 3) Mark file extents in leaves dirty
1968 * We don't have good way to pick out new file extents only.
1969 * So we still follow the old method by scanning all file extents in
1972 * This function can free us from keeping two paths, thus later we only need
1973 * to care about how to iterate all new tree blocks in reloc tree.
1975 static int qgroup_trace_extent_swap(struct btrfs_trans_handle
* trans
,
1976 struct extent_buffer
*src_eb
,
1977 struct btrfs_path
*dst_path
,
1978 int dst_level
, int root_level
,
1981 struct btrfs_key key
;
1982 struct btrfs_path
*src_path
;
1983 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1984 u32 nodesize
= fs_info
->nodesize
;
1985 int cur_level
= root_level
;
1988 BUG_ON(dst_level
> root_level
);
1989 /* Level mismatch */
1990 if (btrfs_header_level(src_eb
) != root_level
)
1993 src_path
= btrfs_alloc_path();
2000 btrfs_node_key_to_cpu(dst_path
->nodes
[dst_level
], &key
, 0);
2002 btrfs_item_key_to_cpu(dst_path
->nodes
[dst_level
], &key
, 0);
2005 atomic_inc(&src_eb
->refs
);
2006 src_path
->nodes
[root_level
] = src_eb
;
2007 src_path
->slots
[root_level
] = dst_path
->slots
[root_level
];
2008 src_path
->locks
[root_level
] = 0;
2010 /* A simplified version of btrfs_search_slot() */
2011 while (cur_level
>= dst_level
) {
2012 struct btrfs_key src_key
;
2013 struct btrfs_key dst_key
;
2015 if (src_path
->nodes
[cur_level
] == NULL
) {
2016 struct extent_buffer
*eb
;
2019 eb
= src_path
->nodes
[cur_level
+ 1];
2020 parent_slot
= src_path
->slots
[cur_level
+ 1];
2022 eb
= btrfs_read_node_slot(eb
, parent_slot
);
2028 src_path
->nodes
[cur_level
] = eb
;
2030 btrfs_tree_read_lock(eb
);
2031 src_path
->locks
[cur_level
] = BTRFS_READ_LOCK
;
2034 src_path
->slots
[cur_level
] = dst_path
->slots
[cur_level
];
2036 btrfs_node_key_to_cpu(dst_path
->nodes
[cur_level
],
2037 &dst_key
, dst_path
->slots
[cur_level
]);
2038 btrfs_node_key_to_cpu(src_path
->nodes
[cur_level
],
2039 &src_key
, src_path
->slots
[cur_level
]);
2041 btrfs_item_key_to_cpu(dst_path
->nodes
[cur_level
],
2042 &dst_key
, dst_path
->slots
[cur_level
]);
2043 btrfs_item_key_to_cpu(src_path
->nodes
[cur_level
],
2044 &src_key
, src_path
->slots
[cur_level
]);
2046 /* Content mismatch, something went wrong */
2047 if (btrfs_comp_cpu_keys(&dst_key
, &src_key
)) {
2055 * Now both @dst_path and @src_path have been populated, record the tree
2056 * blocks for qgroup accounting.
2058 ret
= btrfs_qgroup_trace_extent(trans
, src_path
->nodes
[dst_level
]->start
,
2059 nodesize
, GFP_NOFS
);
2062 ret
= btrfs_qgroup_trace_extent(trans
,
2063 dst_path
->nodes
[dst_level
]->start
,
2064 nodesize
, GFP_NOFS
);
2068 /* Record leaf file extents */
2069 if (dst_level
== 0 && trace_leaf
) {
2070 ret
= btrfs_qgroup_trace_leaf_items(trans
, src_path
->nodes
[0]);
2073 ret
= btrfs_qgroup_trace_leaf_items(trans
, dst_path
->nodes
[0]);
2076 btrfs_free_path(src_path
);
2081 * Helper function to do recursive generation-aware depth-first search, to
2082 * locate all new tree blocks in a subtree of reloc tree.
2084 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2093 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2097 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2098 * above tree blocks along with their counter parts in file tree.
2099 * While during search, old tree blocks OO(c) will be skipped as tree block swap
2100 * won't affect OO(c).
2102 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle
* trans
,
2103 struct extent_buffer
*src_eb
,
2104 struct btrfs_path
*dst_path
,
2105 int cur_level
, int root_level
,
2106 u64 last_snapshot
, bool trace_leaf
)
2108 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2109 struct extent_buffer
*eb
;
2110 bool need_cleanup
= false;
2114 /* Level sanity check */
2115 if (cur_level
< 0 || cur_level
>= BTRFS_MAX_LEVEL
- 1 ||
2116 root_level
< 0 || root_level
>= BTRFS_MAX_LEVEL
- 1 ||
2117 root_level
< cur_level
) {
2118 btrfs_err_rl(fs_info
,
2119 "%s: bad levels, cur_level=%d root_level=%d",
2120 __func__
, cur_level
, root_level
);
2124 /* Read the tree block if needed */
2125 if (dst_path
->nodes
[cur_level
] == NULL
) {
2130 * dst_path->nodes[root_level] must be initialized before
2131 * calling this function.
2133 if (cur_level
== root_level
) {
2134 btrfs_err_rl(fs_info
,
2135 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2136 __func__
, root_level
, root_level
, cur_level
);
2141 * We need to get child blockptr/gen from parent before we can
2144 eb
= dst_path
->nodes
[cur_level
+ 1];
2145 parent_slot
= dst_path
->slots
[cur_level
+ 1];
2146 child_gen
= btrfs_node_ptr_generation(eb
, parent_slot
);
2148 /* This node is old, no need to trace */
2149 if (child_gen
< last_snapshot
)
2152 eb
= btrfs_read_node_slot(eb
, parent_slot
);
2158 dst_path
->nodes
[cur_level
] = eb
;
2159 dst_path
->slots
[cur_level
] = 0;
2161 btrfs_tree_read_lock(eb
);
2162 dst_path
->locks
[cur_level
] = BTRFS_READ_LOCK
;
2163 need_cleanup
= true;
2166 /* Now record this tree block and its counter part for qgroups */
2167 ret
= qgroup_trace_extent_swap(trans
, src_eb
, dst_path
, cur_level
,
2168 root_level
, trace_leaf
);
2172 eb
= dst_path
->nodes
[cur_level
];
2174 if (cur_level
> 0) {
2175 /* Iterate all child tree blocks */
2176 for (i
= 0; i
< btrfs_header_nritems(eb
); i
++) {
2177 /* Skip old tree blocks as they won't be swapped */
2178 if (btrfs_node_ptr_generation(eb
, i
) < last_snapshot
)
2180 dst_path
->slots
[cur_level
] = i
;
2182 /* Recursive call (at most 7 times) */
2183 ret
= qgroup_trace_new_subtree_blocks(trans
, src_eb
,
2184 dst_path
, cur_level
- 1, root_level
,
2185 last_snapshot
, trace_leaf
);
2194 btrfs_tree_unlock_rw(dst_path
->nodes
[cur_level
],
2195 dst_path
->locks
[cur_level
]);
2196 free_extent_buffer(dst_path
->nodes
[cur_level
]);
2197 dst_path
->nodes
[cur_level
] = NULL
;
2198 dst_path
->slots
[cur_level
] = 0;
2199 dst_path
->locks
[cur_level
] = 0;
2205 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle
*trans
,
2206 struct extent_buffer
*src_eb
,
2207 struct extent_buffer
*dst_eb
,
2208 u64 last_snapshot
, bool trace_leaf
)
2210 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2211 struct btrfs_path
*dst_path
= NULL
;
2215 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2218 /* Wrong parameter order */
2219 if (btrfs_header_generation(src_eb
) > btrfs_header_generation(dst_eb
)) {
2220 btrfs_err_rl(fs_info
,
2221 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__
,
2222 btrfs_header_generation(src_eb
),
2223 btrfs_header_generation(dst_eb
));
2227 if (!extent_buffer_uptodate(src_eb
) || !extent_buffer_uptodate(dst_eb
)) {
2232 level
= btrfs_header_level(dst_eb
);
2233 dst_path
= btrfs_alloc_path();
2239 atomic_inc(&dst_eb
->refs
);
2240 dst_path
->nodes
[level
] = dst_eb
;
2241 dst_path
->slots
[level
] = 0;
2242 dst_path
->locks
[level
] = 0;
2244 /* Do the generation aware breadth-first search */
2245 ret
= qgroup_trace_new_subtree_blocks(trans
, src_eb
, dst_path
, level
,
2246 level
, last_snapshot
, trace_leaf
);
2252 btrfs_free_path(dst_path
);
2254 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2258 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle
*trans
,
2259 struct extent_buffer
*root_eb
,
2260 u64 root_gen
, int root_level
)
2262 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2265 struct extent_buffer
*eb
= root_eb
;
2266 struct btrfs_path
*path
= NULL
;
2268 BUG_ON(root_level
< 0 || root_level
>= BTRFS_MAX_LEVEL
);
2269 BUG_ON(root_eb
== NULL
);
2271 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2274 if (!extent_buffer_uptodate(root_eb
)) {
2275 ret
= btrfs_read_buffer(root_eb
, root_gen
, root_level
, NULL
);
2280 if (root_level
== 0) {
2281 ret
= btrfs_qgroup_trace_leaf_items(trans
, root_eb
);
2285 path
= btrfs_alloc_path();
2290 * Walk down the tree. Missing extent blocks are filled in as
2291 * we go. Metadata is accounted every time we read a new
2294 * When we reach a leaf, we account for file extent items in it,
2295 * walk back up the tree (adjusting slot pointers as we go)
2296 * and restart the search process.
2298 atomic_inc(&root_eb
->refs
); /* For path */
2299 path
->nodes
[root_level
] = root_eb
;
2300 path
->slots
[root_level
] = 0;
2301 path
->locks
[root_level
] = 0; /* so release_path doesn't try to unlock */
2304 while (level
>= 0) {
2305 if (path
->nodes
[level
] == NULL
) {
2310 * We need to get child blockptr from parent before we
2313 eb
= path
->nodes
[level
+ 1];
2314 parent_slot
= path
->slots
[level
+ 1];
2315 child_bytenr
= btrfs_node_blockptr(eb
, parent_slot
);
2317 eb
= btrfs_read_node_slot(eb
, parent_slot
);
2323 path
->nodes
[level
] = eb
;
2324 path
->slots
[level
] = 0;
2326 btrfs_tree_read_lock(eb
);
2327 path
->locks
[level
] = BTRFS_READ_LOCK
;
2329 ret
= btrfs_qgroup_trace_extent(trans
, child_bytenr
,
2337 ret
= btrfs_qgroup_trace_leaf_items(trans
,
2338 path
->nodes
[level
]);
2342 /* Nonzero return here means we completed our search */
2343 ret
= adjust_slots_upwards(path
, root_level
);
2347 /* Restart search with new slots */
2356 btrfs_free_path(path
);
2361 #define UPDATE_NEW 0
2362 #define UPDATE_OLD 1
2364 * Walk all of the roots that points to the bytenr and adjust their refcnts.
2366 static int qgroup_update_refcnt(struct btrfs_fs_info
*fs_info
,
2367 struct ulist
*roots
, struct ulist
*tmp
,
2368 struct ulist
*qgroups
, u64 seq
, int update_old
)
2370 struct ulist_node
*unode
;
2371 struct ulist_iterator uiter
;
2372 struct ulist_node
*tmp_unode
;
2373 struct ulist_iterator tmp_uiter
;
2374 struct btrfs_qgroup
*qg
;
2379 ULIST_ITER_INIT(&uiter
);
2380 while ((unode
= ulist_next(roots
, &uiter
))) {
2381 qg
= find_qgroup_rb(fs_info
, unode
->val
);
2386 ret
= ulist_add(qgroups
, qg
->qgroupid
, qgroup_to_aux(qg
),
2390 ret
= ulist_add(tmp
, qg
->qgroupid
, qgroup_to_aux(qg
), GFP_ATOMIC
);
2393 ULIST_ITER_INIT(&tmp_uiter
);
2394 while ((tmp_unode
= ulist_next(tmp
, &tmp_uiter
))) {
2395 struct btrfs_qgroup_list
*glist
;
2397 qg
= unode_aux_to_qgroup(tmp_unode
);
2399 btrfs_qgroup_update_old_refcnt(qg
, seq
, 1);
2401 btrfs_qgroup_update_new_refcnt(qg
, seq
, 1);
2402 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2403 ret
= ulist_add(qgroups
, glist
->group
->qgroupid
,
2404 qgroup_to_aux(glist
->group
),
2408 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
2409 qgroup_to_aux(glist
->group
),
2420 * Update qgroup rfer/excl counters.
2421 * Rfer update is easy, codes can explain themselves.
2423 * Excl update is tricky, the update is split into 2 parts.
2424 * Part 1: Possible exclusive <-> sharing detect:
2426 * -------------------------------------
2428 * -------------------------------------
2430 * -------------------------------------
2433 * A: cur_old_roots < nr_old_roots (not exclusive before)
2434 * !A: cur_old_roots == nr_old_roots (possible exclusive before)
2435 * B: cur_new_roots < nr_new_roots (not exclusive now)
2436 * !B: cur_new_roots == nr_new_roots (possible exclusive now)
2439 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing
2440 * *: Definitely not changed. **: Possible unchanged.
2442 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2444 * To make the logic clear, we first use condition A and B to split
2445 * combination into 4 results.
2447 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2448 * only on variant maybe 0.
2450 * Lastly, check result **, since there are 2 variants maybe 0, split them
2452 * But this time we don't need to consider other things, the codes and logic
2453 * is easy to understand now.
2455 static int qgroup_update_counters(struct btrfs_fs_info
*fs_info
,
2456 struct ulist
*qgroups
,
2459 u64 num_bytes
, u64 seq
)
2461 struct ulist_node
*unode
;
2462 struct ulist_iterator uiter
;
2463 struct btrfs_qgroup
*qg
;
2464 u64 cur_new_count
, cur_old_count
;
2466 ULIST_ITER_INIT(&uiter
);
2467 while ((unode
= ulist_next(qgroups
, &uiter
))) {
2470 qg
= unode_aux_to_qgroup(unode
);
2471 cur_old_count
= btrfs_qgroup_get_old_refcnt(qg
, seq
);
2472 cur_new_count
= btrfs_qgroup_get_new_refcnt(qg
, seq
);
2474 trace_qgroup_update_counters(fs_info
, qg
, cur_old_count
,
2477 /* Rfer update part */
2478 if (cur_old_count
== 0 && cur_new_count
> 0) {
2479 qg
->rfer
+= num_bytes
;
2480 qg
->rfer_cmpr
+= num_bytes
;
2483 if (cur_old_count
> 0 && cur_new_count
== 0) {
2484 qg
->rfer
-= num_bytes
;
2485 qg
->rfer_cmpr
-= num_bytes
;
2489 /* Excl update part */
2490 /* Exclusive/none -> shared case */
2491 if (cur_old_count
== nr_old_roots
&&
2492 cur_new_count
< nr_new_roots
) {
2493 /* Exclusive -> shared */
2494 if (cur_old_count
!= 0) {
2495 qg
->excl
-= num_bytes
;
2496 qg
->excl_cmpr
-= num_bytes
;
2501 /* Shared -> exclusive/none case */
2502 if (cur_old_count
< nr_old_roots
&&
2503 cur_new_count
== nr_new_roots
) {
2504 /* Shared->exclusive */
2505 if (cur_new_count
!= 0) {
2506 qg
->excl
+= num_bytes
;
2507 qg
->excl_cmpr
+= num_bytes
;
2512 /* Exclusive/none -> exclusive/none case */
2513 if (cur_old_count
== nr_old_roots
&&
2514 cur_new_count
== nr_new_roots
) {
2515 if (cur_old_count
== 0) {
2516 /* None -> exclusive/none */
2518 if (cur_new_count
!= 0) {
2519 /* None -> exclusive */
2520 qg
->excl
+= num_bytes
;
2521 qg
->excl_cmpr
+= num_bytes
;
2524 /* None -> none, nothing changed */
2526 /* Exclusive -> exclusive/none */
2528 if (cur_new_count
== 0) {
2529 /* Exclusive -> none */
2530 qg
->excl
-= num_bytes
;
2531 qg
->excl_cmpr
-= num_bytes
;
2534 /* Exclusive -> exclusive, nothing changed */
2539 qgroup_dirty(fs_info
, qg
);
2545 * Check if the @roots potentially is a list of fs tree roots
2547 * Return 0 for definitely not a fs/subvol tree roots ulist
2548 * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2551 static int maybe_fs_roots(struct ulist
*roots
)
2553 struct ulist_node
*unode
;
2554 struct ulist_iterator uiter
;
2556 /* Empty one, still possible for fs roots */
2557 if (!roots
|| roots
->nnodes
== 0)
2560 ULIST_ITER_INIT(&uiter
);
2561 unode
= ulist_next(roots
, &uiter
);
2566 * If it contains fs tree roots, then it must belong to fs/subvol
2568 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2570 return is_fstree(unode
->val
);
2573 int btrfs_qgroup_account_extent(struct btrfs_trans_handle
*trans
, u64 bytenr
,
2574 u64 num_bytes
, struct ulist
*old_roots
,
2575 struct ulist
*new_roots
)
2577 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2578 struct ulist
*qgroups
= NULL
;
2579 struct ulist
*tmp
= NULL
;
2581 u64 nr_new_roots
= 0;
2582 u64 nr_old_roots
= 0;
2586 * If quotas get disabled meanwhile, the resources need to be freed and
2587 * we can't just exit here.
2589 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2593 if (!maybe_fs_roots(new_roots
))
2595 nr_new_roots
= new_roots
->nnodes
;
2598 if (!maybe_fs_roots(old_roots
))
2600 nr_old_roots
= old_roots
->nnodes
;
2603 /* Quick exit, either not fs tree roots, or won't affect any qgroup */
2604 if (nr_old_roots
== 0 && nr_new_roots
== 0)
2607 BUG_ON(!fs_info
->quota_root
);
2609 trace_btrfs_qgroup_account_extent(fs_info
, trans
->transid
, bytenr
,
2610 num_bytes
, nr_old_roots
, nr_new_roots
);
2612 qgroups
= ulist_alloc(GFP_NOFS
);
2617 tmp
= ulist_alloc(GFP_NOFS
);
2623 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2624 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
2625 if (fs_info
->qgroup_rescan_progress
.objectid
<= bytenr
) {
2626 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2631 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2633 spin_lock(&fs_info
->qgroup_lock
);
2634 seq
= fs_info
->qgroup_seq
;
2636 /* Update old refcnts using old_roots */
2637 ret
= qgroup_update_refcnt(fs_info
, old_roots
, tmp
, qgroups
, seq
,
2642 /* Update new refcnts using new_roots */
2643 ret
= qgroup_update_refcnt(fs_info
, new_roots
, tmp
, qgroups
, seq
,
2648 qgroup_update_counters(fs_info
, qgroups
, nr_old_roots
, nr_new_roots
,
2652 * Bump qgroup_seq to avoid seq overlap
2654 fs_info
->qgroup_seq
+= max(nr_old_roots
, nr_new_roots
) + 1;
2656 spin_unlock(&fs_info
->qgroup_lock
);
2659 ulist_free(qgroups
);
2660 ulist_free(old_roots
);
2661 ulist_free(new_roots
);
2665 int btrfs_qgroup_account_extents(struct btrfs_trans_handle
*trans
)
2667 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2668 struct btrfs_qgroup_extent_record
*record
;
2669 struct btrfs_delayed_ref_root
*delayed_refs
;
2670 struct ulist
*new_roots
= NULL
;
2671 struct rb_node
*node
;
2672 u64 num_dirty_extents
= 0;
2676 delayed_refs
= &trans
->transaction
->delayed_refs
;
2677 qgroup_to_skip
= delayed_refs
->qgroup_to_skip
;
2678 while ((node
= rb_first(&delayed_refs
->dirty_extent_root
))) {
2679 record
= rb_entry(node
, struct btrfs_qgroup_extent_record
,
2682 num_dirty_extents
++;
2683 trace_btrfs_qgroup_account_extents(fs_info
, record
);
2687 * Old roots should be searched when inserting qgroup
2690 if (WARN_ON(!record
->old_roots
)) {
2691 /* Search commit root to find old_roots */
2692 ret
= btrfs_find_all_roots(NULL
, fs_info
,
2694 &record
->old_roots
, false);
2699 /* Free the reserved data space */
2700 btrfs_qgroup_free_refroot(fs_info
,
2701 record
->data_rsv_refroot
,
2703 BTRFS_QGROUP_RSV_DATA
);
2705 * Use BTRFS_SEQ_LAST as time_seq to do special search,
2706 * which doesn't lock tree or delayed_refs and search
2707 * current root. It's safe inside commit_transaction().
2709 ret
= btrfs_find_all_roots(trans
, fs_info
,
2710 record
->bytenr
, BTRFS_SEQ_LAST
, &new_roots
, false);
2713 if (qgroup_to_skip
) {
2714 ulist_del(new_roots
, qgroup_to_skip
, 0);
2715 ulist_del(record
->old_roots
, qgroup_to_skip
,
2718 ret
= btrfs_qgroup_account_extent(trans
, record
->bytenr
,
2722 record
->old_roots
= NULL
;
2726 ulist_free(record
->old_roots
);
2727 ulist_free(new_roots
);
2729 rb_erase(node
, &delayed_refs
->dirty_extent_root
);
2733 trace_qgroup_num_dirty_extents(fs_info
, trans
->transid
,
2739 * called from commit_transaction. Writes all changed qgroups to disk.
2741 int btrfs_run_qgroups(struct btrfs_trans_handle
*trans
)
2743 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2746 if (!fs_info
->quota_root
)
2749 spin_lock(&fs_info
->qgroup_lock
);
2750 while (!list_empty(&fs_info
->dirty_qgroups
)) {
2751 struct btrfs_qgroup
*qgroup
;
2752 qgroup
= list_first_entry(&fs_info
->dirty_qgroups
,
2753 struct btrfs_qgroup
, dirty
);
2754 list_del_init(&qgroup
->dirty
);
2755 spin_unlock(&fs_info
->qgroup_lock
);
2756 ret
= update_qgroup_info_item(trans
, qgroup
);
2758 fs_info
->qgroup_flags
|=
2759 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2760 ret
= update_qgroup_limit_item(trans
, qgroup
);
2762 fs_info
->qgroup_flags
|=
2763 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2764 spin_lock(&fs_info
->qgroup_lock
);
2766 if (test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2767 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_ON
;
2769 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
2770 spin_unlock(&fs_info
->qgroup_lock
);
2772 ret
= update_qgroup_status_item(trans
);
2774 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2780 * Copy the accounting information between qgroups. This is necessary
2781 * when a snapshot or a subvolume is created. Throwing an error will
2782 * cause a transaction abort so we take extra care here to only error
2783 * when a readonly fs is a reasonable outcome.
2785 int btrfs_qgroup_inherit(struct btrfs_trans_handle
*trans
, u64 srcid
,
2786 u64 objectid
, struct btrfs_qgroup_inherit
*inherit
)
2791 bool committing
= false;
2792 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2793 struct btrfs_root
*quota_root
;
2794 struct btrfs_qgroup
*srcgroup
;
2795 struct btrfs_qgroup
*dstgroup
;
2796 bool need_rescan
= false;
2801 * There are only two callers of this function.
2803 * One in create_subvol() in the ioctl context, which needs to hold
2804 * the qgroup_ioctl_lock.
2806 * The other one in create_pending_snapshot() where no other qgroup
2807 * code can modify the fs as they all need to either start a new trans
2808 * or hold a trans handler, thus we don't need to hold
2809 * qgroup_ioctl_lock.
2810 * This would avoid long and complex lock chain and make lockdep happy.
2812 spin_lock(&fs_info
->trans_lock
);
2813 if (trans
->transaction
->state
== TRANS_STATE_COMMIT_DOING
)
2815 spin_unlock(&fs_info
->trans_lock
);
2818 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
2819 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
2822 quota_root
= fs_info
->quota_root
;
2829 i_qgroups
= (u64
*)(inherit
+ 1);
2830 nums
= inherit
->num_qgroups
+ 2 * inherit
->num_ref_copies
+
2831 2 * inherit
->num_excl_copies
;
2832 for (i
= 0; i
< nums
; ++i
) {
2833 srcgroup
= find_qgroup_rb(fs_info
, *i_qgroups
);
2836 * Zero out invalid groups so we can ignore
2840 ((srcgroup
->qgroupid
>> 48) <= (objectid
>> 48)))
2848 * create a tracking group for the subvol itself
2850 ret
= add_qgroup_item(trans
, quota_root
, objectid
);
2855 * add qgroup to all inherited groups
2858 i_qgroups
= (u64
*)(inherit
+ 1);
2859 for (i
= 0; i
< inherit
->num_qgroups
; ++i
, ++i_qgroups
) {
2860 if (*i_qgroups
== 0)
2862 ret
= add_qgroup_relation_item(trans
, objectid
,
2864 if (ret
&& ret
!= -EEXIST
)
2866 ret
= add_qgroup_relation_item(trans
, *i_qgroups
,
2868 if (ret
&& ret
!= -EEXIST
)
2875 spin_lock(&fs_info
->qgroup_lock
);
2877 dstgroup
= add_qgroup_rb(fs_info
, objectid
);
2878 if (IS_ERR(dstgroup
)) {
2879 ret
= PTR_ERR(dstgroup
);
2883 if (inherit
&& inherit
->flags
& BTRFS_QGROUP_INHERIT_SET_LIMITS
) {
2884 dstgroup
->lim_flags
= inherit
->lim
.flags
;
2885 dstgroup
->max_rfer
= inherit
->lim
.max_rfer
;
2886 dstgroup
->max_excl
= inherit
->lim
.max_excl
;
2887 dstgroup
->rsv_rfer
= inherit
->lim
.rsv_rfer
;
2888 dstgroup
->rsv_excl
= inherit
->lim
.rsv_excl
;
2890 ret
= update_qgroup_limit_item(trans
, dstgroup
);
2892 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2894 "unable to update quota limit for %llu",
2895 dstgroup
->qgroupid
);
2901 srcgroup
= find_qgroup_rb(fs_info
, srcid
);
2906 * We call inherit after we clone the root in order to make sure
2907 * our counts don't go crazy, so at this point the only
2908 * difference between the two roots should be the root node.
2910 level_size
= fs_info
->nodesize
;
2911 dstgroup
->rfer
= srcgroup
->rfer
;
2912 dstgroup
->rfer_cmpr
= srcgroup
->rfer_cmpr
;
2913 dstgroup
->excl
= level_size
;
2914 dstgroup
->excl_cmpr
= level_size
;
2915 srcgroup
->excl
= level_size
;
2916 srcgroup
->excl_cmpr
= level_size
;
2918 /* inherit the limit info */
2919 dstgroup
->lim_flags
= srcgroup
->lim_flags
;
2920 dstgroup
->max_rfer
= srcgroup
->max_rfer
;
2921 dstgroup
->max_excl
= srcgroup
->max_excl
;
2922 dstgroup
->rsv_rfer
= srcgroup
->rsv_rfer
;
2923 dstgroup
->rsv_excl
= srcgroup
->rsv_excl
;
2925 qgroup_dirty(fs_info
, dstgroup
);
2926 qgroup_dirty(fs_info
, srcgroup
);
2932 i_qgroups
= (u64
*)(inherit
+ 1);
2933 for (i
= 0; i
< inherit
->num_qgroups
; ++i
) {
2935 ret
= add_relation_rb(fs_info
, objectid
, *i_qgroups
);
2942 * If we're doing a snapshot, and adding the snapshot to a new
2943 * qgroup, the numbers are guaranteed to be incorrect.
2949 for (i
= 0; i
< inherit
->num_ref_copies
; ++i
, i_qgroups
+= 2) {
2950 struct btrfs_qgroup
*src
;
2951 struct btrfs_qgroup
*dst
;
2953 if (!i_qgroups
[0] || !i_qgroups
[1])
2956 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2957 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2964 dst
->rfer
= src
->rfer
- level_size
;
2965 dst
->rfer_cmpr
= src
->rfer_cmpr
- level_size
;
2967 /* Manually tweaking numbers certainly needs a rescan */
2970 for (i
= 0; i
< inherit
->num_excl_copies
; ++i
, i_qgroups
+= 2) {
2971 struct btrfs_qgroup
*src
;
2972 struct btrfs_qgroup
*dst
;
2974 if (!i_qgroups
[0] || !i_qgroups
[1])
2977 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2978 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2985 dst
->excl
= src
->excl
+ level_size
;
2986 dst
->excl_cmpr
= src
->excl_cmpr
+ level_size
;
2991 spin_unlock(&fs_info
->qgroup_lock
);
2993 ret
= btrfs_sysfs_add_one_qgroup(fs_info
, dstgroup
);
2996 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
2998 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
3002 static bool qgroup_check_limits(const struct btrfs_qgroup
*qg
, u64 num_bytes
)
3004 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) &&
3005 qgroup_rsv_total(qg
) + (s64
)qg
->rfer
+ num_bytes
> qg
->max_rfer
)
3008 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) &&
3009 qgroup_rsv_total(qg
) + (s64
)qg
->excl
+ num_bytes
> qg
->max_excl
)
3015 static int qgroup_reserve(struct btrfs_root
*root
, u64 num_bytes
, bool enforce
,
3016 enum btrfs_qgroup_rsv_type type
)
3018 struct btrfs_qgroup
*qgroup
;
3019 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3020 u64 ref_root
= root
->root_key
.objectid
;
3022 struct ulist_node
*unode
;
3023 struct ulist_iterator uiter
;
3025 if (!is_fstree(ref_root
))
3031 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE
, &fs_info
->flags
) &&
3032 capable(CAP_SYS_RESOURCE
))
3035 spin_lock(&fs_info
->qgroup_lock
);
3036 if (!fs_info
->quota_root
)
3039 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
3044 * in a first step, we check all affected qgroups if any limits would
3047 ulist_reinit(fs_info
->qgroup_ulist
);
3048 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
3049 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
3052 ULIST_ITER_INIT(&uiter
);
3053 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
3054 struct btrfs_qgroup
*qg
;
3055 struct btrfs_qgroup_list
*glist
;
3057 qg
= unode_aux_to_qgroup(unode
);
3059 if (enforce
&& !qgroup_check_limits(qg
, num_bytes
)) {
3064 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
3065 ret
= ulist_add(fs_info
->qgroup_ulist
,
3066 glist
->group
->qgroupid
,
3067 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
3074 * no limits exceeded, now record the reservation into all qgroups
3076 ULIST_ITER_INIT(&uiter
);
3077 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
3078 struct btrfs_qgroup
*qg
;
3080 qg
= unode_aux_to_qgroup(unode
);
3082 qgroup_rsv_add(fs_info
, qg
, num_bytes
, type
);
3086 spin_unlock(&fs_info
->qgroup_lock
);
3091 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0
3094 * Will handle all higher level qgroup too.
3096 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3097 * This special case is only used for META_PERTRANS type.
3099 void btrfs_qgroup_free_refroot(struct btrfs_fs_info
*fs_info
,
3100 u64 ref_root
, u64 num_bytes
,
3101 enum btrfs_qgroup_rsv_type type
)
3103 struct btrfs_qgroup
*qgroup
;
3104 struct ulist_node
*unode
;
3105 struct ulist_iterator uiter
;
3108 if (!is_fstree(ref_root
))
3114 if (num_bytes
== (u64
)-1 && type
!= BTRFS_QGROUP_RSV_META_PERTRANS
) {
3115 WARN(1, "%s: Invalid type to free", __func__
);
3118 spin_lock(&fs_info
->qgroup_lock
);
3120 if (!fs_info
->quota_root
)
3123 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
3127 if (num_bytes
== (u64
)-1)
3129 * We're freeing all pertrans rsv, get reserved value from
3130 * level 0 qgroup as real num_bytes to free.
3132 num_bytes
= qgroup
->rsv
.values
[type
];
3134 ulist_reinit(fs_info
->qgroup_ulist
);
3135 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
3136 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
3139 ULIST_ITER_INIT(&uiter
);
3140 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
3141 struct btrfs_qgroup
*qg
;
3142 struct btrfs_qgroup_list
*glist
;
3144 qg
= unode_aux_to_qgroup(unode
);
3146 qgroup_rsv_release(fs_info
, qg
, num_bytes
, type
);
3148 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
3149 ret
= ulist_add(fs_info
->qgroup_ulist
,
3150 glist
->group
->qgroupid
,
3151 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
3158 spin_unlock(&fs_info
->qgroup_lock
);
3162 * Check if the leaf is the last leaf. Which means all node pointers
3163 * are at their last position.
3165 static bool is_last_leaf(struct btrfs_path
*path
)
3169 for (i
= 1; i
< BTRFS_MAX_LEVEL
&& path
->nodes
[i
]; i
++) {
3170 if (path
->slots
[i
] != btrfs_header_nritems(path
->nodes
[i
]) - 1)
3177 * returns < 0 on error, 0 when more leafs are to be scanned.
3178 * returns 1 when done.
3180 static int qgroup_rescan_leaf(struct btrfs_trans_handle
*trans
,
3181 struct btrfs_path
*path
)
3183 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3184 struct btrfs_key found
;
3185 struct extent_buffer
*scratch_leaf
= NULL
;
3186 struct ulist
*roots
= NULL
;
3192 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3193 ret
= btrfs_search_slot_for_read(fs_info
->extent_root
,
3194 &fs_info
->qgroup_rescan_progress
,
3197 btrfs_debug(fs_info
,
3198 "current progress key (%llu %u %llu), search_slot ret %d",
3199 fs_info
->qgroup_rescan_progress
.objectid
,
3200 fs_info
->qgroup_rescan_progress
.type
,
3201 fs_info
->qgroup_rescan_progress
.offset
, ret
);
3205 * The rescan is about to end, we will not be scanning any
3206 * further blocks. We cannot unset the RESCAN flag here, because
3207 * we want to commit the transaction if everything went well.
3208 * To make the live accounting work in this phase, we set our
3209 * scan progress pointer such that every real extent objectid
3212 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
3213 btrfs_release_path(path
);
3214 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3217 done
= is_last_leaf(path
);
3219 btrfs_item_key_to_cpu(path
->nodes
[0], &found
,
3220 btrfs_header_nritems(path
->nodes
[0]) - 1);
3221 fs_info
->qgroup_rescan_progress
.objectid
= found
.objectid
+ 1;
3223 scratch_leaf
= btrfs_clone_extent_buffer(path
->nodes
[0]);
3224 if (!scratch_leaf
) {
3226 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3229 slot
= path
->slots
[0];
3230 btrfs_release_path(path
);
3231 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3233 for (; slot
< btrfs_header_nritems(scratch_leaf
); ++slot
) {
3234 btrfs_item_key_to_cpu(scratch_leaf
, &found
, slot
);
3235 if (found
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3236 found
.type
!= BTRFS_METADATA_ITEM_KEY
)
3238 if (found
.type
== BTRFS_METADATA_ITEM_KEY
)
3239 num_bytes
= fs_info
->nodesize
;
3241 num_bytes
= found
.offset
;
3243 ret
= btrfs_find_all_roots(NULL
, fs_info
, found
.objectid
, 0,
3247 /* For rescan, just pass old_roots as NULL */
3248 ret
= btrfs_qgroup_account_extent(trans
, found
.objectid
,
3249 num_bytes
, NULL
, roots
);
3255 free_extent_buffer(scratch_leaf
);
3259 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
3264 static bool rescan_should_stop(struct btrfs_fs_info
*fs_info
)
3266 return btrfs_fs_closing(fs_info
) ||
3267 test_bit(BTRFS_FS_STATE_REMOUNTING
, &fs_info
->fs_state
);
3270 static void btrfs_qgroup_rescan_worker(struct btrfs_work
*work
)
3272 struct btrfs_fs_info
*fs_info
= container_of(work
, struct btrfs_fs_info
,
3273 qgroup_rescan_work
);
3274 struct btrfs_path
*path
;
3275 struct btrfs_trans_handle
*trans
= NULL
;
3278 bool stopped
= false;
3280 path
= btrfs_alloc_path();
3284 * Rescan should only search for commit root, and any later difference
3285 * should be recorded by qgroup
3287 path
->search_commit_root
= 1;
3288 path
->skip_locking
= 1;
3291 while (!err
&& !(stopped
= rescan_should_stop(fs_info
))) {
3292 trans
= btrfs_start_transaction(fs_info
->fs_root
, 0);
3293 if (IS_ERR(trans
)) {
3294 err
= PTR_ERR(trans
);
3297 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)) {
3300 err
= qgroup_rescan_leaf(trans
, path
);
3303 btrfs_commit_transaction(trans
);
3305 btrfs_end_transaction(trans
);
3309 btrfs_free_path(path
);
3311 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3313 fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
) {
3314 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
3315 } else if (err
< 0) {
3316 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
3318 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3321 * only update status, since the previous part has already updated the
3324 trans
= btrfs_start_transaction(fs_info
->quota_root
, 1);
3325 if (IS_ERR(trans
)) {
3326 err
= PTR_ERR(trans
);
3329 "fail to start transaction for status update: %d",
3333 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3335 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3337 ret
= update_qgroup_status_item(trans
);
3340 btrfs_err(fs_info
, "fail to update qgroup status: %d",
3344 fs_info
->qgroup_rescan_running
= false;
3345 complete_all(&fs_info
->qgroup_rescan_completion
);
3346 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3351 btrfs_end_transaction(trans
);
3354 btrfs_info(fs_info
, "qgroup scan paused");
3355 } else if (err
>= 0) {
3356 btrfs_info(fs_info
, "qgroup scan completed%s",
3357 err
> 0 ? " (inconsistency flag cleared)" : "");
3359 btrfs_err(fs_info
, "qgroup scan failed with %d", err
);
3364 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3365 * memory required for the rescan context.
3368 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
3374 /* we're resuming qgroup rescan at mount time */
3375 if (!(fs_info
->qgroup_flags
&
3376 BTRFS_QGROUP_STATUS_FLAG_RESCAN
)) {
3378 "qgroup rescan init failed, qgroup rescan is not queued");
3380 } else if (!(fs_info
->qgroup_flags
&
3381 BTRFS_QGROUP_STATUS_FLAG_ON
)) {
3383 "qgroup rescan init failed, qgroup is not enabled");
3391 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3394 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
3396 "qgroup rescan is already in progress");
3398 } else if (!(fs_info
->qgroup_flags
&
3399 BTRFS_QGROUP_STATUS_FLAG_ON
)) {
3401 "qgroup rescan init failed, qgroup is not enabled");
3403 } else if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)) {
3404 /* Quota disable is in progress */
3409 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3412 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3415 memset(&fs_info
->qgroup_rescan_progress
, 0,
3416 sizeof(fs_info
->qgroup_rescan_progress
));
3417 fs_info
->qgroup_rescan_progress
.objectid
= progress_objectid
;
3418 init_completion(&fs_info
->qgroup_rescan_completion
);
3419 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3421 btrfs_init_work(&fs_info
->qgroup_rescan_work
,
3422 btrfs_qgroup_rescan_worker
, NULL
, NULL
);
3427 qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
)
3430 struct btrfs_qgroup
*qgroup
;
3432 spin_lock(&fs_info
->qgroup_lock
);
3433 /* clear all current qgroup tracking information */
3434 for (n
= rb_first(&fs_info
->qgroup_tree
); n
; n
= rb_next(n
)) {
3435 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
3437 qgroup
->rfer_cmpr
= 0;
3439 qgroup
->excl_cmpr
= 0;
3440 qgroup_dirty(fs_info
, qgroup
);
3442 spin_unlock(&fs_info
->qgroup_lock
);
3446 btrfs_qgroup_rescan(struct btrfs_fs_info
*fs_info
)
3449 struct btrfs_trans_handle
*trans
;
3451 ret
= qgroup_rescan_init(fs_info
, 0, 1);
3456 * We have set the rescan_progress to 0, which means no more
3457 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3458 * However, btrfs_qgroup_account_ref may be right after its call
3459 * to btrfs_find_all_roots, in which case it would still do the
3461 * To solve this, we're committing the transaction, which will
3462 * ensure we run all delayed refs and only after that, we are
3463 * going to clear all tracking information for a clean start.
3466 trans
= btrfs_join_transaction(fs_info
->fs_root
);
3467 if (IS_ERR(trans
)) {
3468 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3469 return PTR_ERR(trans
);
3471 ret
= btrfs_commit_transaction(trans
);
3473 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
3477 qgroup_rescan_zero_tracking(fs_info
);
3479 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3480 fs_info
->qgroup_rescan_running
= true;
3481 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
3482 &fs_info
->qgroup_rescan_work
);
3483 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3488 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info
*fs_info
,
3494 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3495 running
= fs_info
->qgroup_rescan_running
;
3496 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3502 ret
= wait_for_completion_interruptible(
3503 &fs_info
->qgroup_rescan_completion
);
3505 wait_for_completion(&fs_info
->qgroup_rescan_completion
);
3511 * this is only called from open_ctree where we're still single threaded, thus
3512 * locking is omitted here.
3515 btrfs_qgroup_rescan_resume(struct btrfs_fs_info
*fs_info
)
3517 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
3518 mutex_lock(&fs_info
->qgroup_rescan_lock
);
3519 fs_info
->qgroup_rescan_running
= true;
3520 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
3521 &fs_info
->qgroup_rescan_work
);
3522 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
3526 #define rbtree_iterate_from_safe(node, next, start) \
3527 for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
3529 static int qgroup_unreserve_range(struct btrfs_inode
*inode
,
3530 struct extent_changeset
*reserved
, u64 start
,
3533 struct rb_node
*node
;
3534 struct rb_node
*next
;
3535 struct ulist_node
*entry
;
3538 node
= reserved
->range_changed
.root
.rb_node
;
3542 entry
= rb_entry(node
, struct ulist_node
, rb_node
);
3543 if (entry
->val
< start
)
3544 node
= node
->rb_right
;
3546 node
= node
->rb_left
;
3549 if (entry
->val
> start
&& rb_prev(&entry
->rb_node
))
3550 entry
= rb_entry(rb_prev(&entry
->rb_node
), struct ulist_node
,
3553 rbtree_iterate_from_safe(node
, next
, &entry
->rb_node
) {
3559 entry
= rb_entry(node
, struct ulist_node
, rb_node
);
3560 entry_start
= entry
->val
;
3561 entry_end
= entry
->aux
;
3562 entry_len
= entry_end
- entry_start
+ 1;
3564 if (entry_start
>= start
+ len
)
3566 if (entry_start
+ entry_len
<= start
)
3569 * Now the entry is in [start, start + len), revert the
3570 * EXTENT_QGROUP_RESERVED bit.
3572 clear_ret
= clear_extent_bits(&inode
->io_tree
, entry_start
,
3573 entry_end
, EXTENT_QGROUP_RESERVED
);
3574 if (!ret
&& clear_ret
< 0)
3577 ulist_del(&reserved
->range_changed
, entry
->val
, entry
->aux
);
3578 if (likely(reserved
->bytes_changed
>= entry_len
)) {
3579 reserved
->bytes_changed
-= entry_len
;
3582 reserved
->bytes_changed
= 0;
3590 * Try to free some space for qgroup.
3592 * For qgroup, there are only 3 ways to free qgroup space:
3593 * - Flush nodatacow write
3594 * Any nodatacow write will free its reserved data space at run_delalloc_range().
3595 * In theory, we should only flush nodatacow inodes, but it's not yet
3596 * possible, so we need to flush the whole root.
3598 * - Wait for ordered extents
3599 * When ordered extents are finished, their reserved metadata is finally
3600 * converted to per_trans status, which can be freed by later commit
3603 * - Commit transaction
3604 * This would free the meta_per_trans space.
3605 * In theory this shouldn't provide much space, but any more qgroup space
3608 static int try_flush_qgroup(struct btrfs_root
*root
)
3610 struct btrfs_trans_handle
*trans
;
3613 /* Can't hold an open transaction or we run the risk of deadlocking. */
3614 ASSERT(current
->journal_info
== NULL
);
3615 if (WARN_ON(current
->journal_info
))
3619 * We don't want to run flush again and again, so if there is a running
3620 * one, we won't try to start a new flush, but exit directly.
3622 if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING
, &root
->state
)) {
3623 wait_event(root
->qgroup_flush_wait
,
3624 !test_bit(BTRFS_ROOT_QGROUP_FLUSHING
, &root
->state
));
3628 ret
= btrfs_start_delalloc_snapshot(root
, true);
3631 btrfs_wait_ordered_extents(root
, U64_MAX
, 0, (u64
)-1);
3633 trans
= btrfs_join_transaction(root
);
3634 if (IS_ERR(trans
)) {
3635 ret
= PTR_ERR(trans
);
3639 ret
= btrfs_commit_transaction(trans
);
3641 clear_bit(BTRFS_ROOT_QGROUP_FLUSHING
, &root
->state
);
3642 wake_up(&root
->qgroup_flush_wait
);
3646 static int qgroup_reserve_data(struct btrfs_inode
*inode
,
3647 struct extent_changeset
**reserved_ret
, u64 start
,
3650 struct btrfs_root
*root
= inode
->root
;
3651 struct extent_changeset
*reserved
;
3652 bool new_reserved
= false;
3657 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &root
->fs_info
->flags
) ||
3658 !is_fstree(root
->root_key
.objectid
) || len
== 0)
3661 /* @reserved parameter is mandatory for qgroup */
3662 if (WARN_ON(!reserved_ret
))
3664 if (!*reserved_ret
) {
3665 new_reserved
= true;
3666 *reserved_ret
= extent_changeset_alloc();
3670 reserved
= *reserved_ret
;
3671 /* Record already reserved space */
3672 orig_reserved
= reserved
->bytes_changed
;
3673 ret
= set_record_extent_bits(&inode
->io_tree
, start
,
3674 start
+ len
-1, EXTENT_QGROUP_RESERVED
, reserved
);
3676 /* Newly reserved space */
3677 to_reserve
= reserved
->bytes_changed
- orig_reserved
;
3678 trace_btrfs_qgroup_reserve_data(&inode
->vfs_inode
, start
, len
,
3679 to_reserve
, QGROUP_RESERVE
);
3682 ret
= qgroup_reserve(root
, to_reserve
, true, BTRFS_QGROUP_RSV_DATA
);
3689 qgroup_unreserve_range(inode
, reserved
, start
, len
);
3692 extent_changeset_free(reserved
);
3693 *reserved_ret
= NULL
;
3699 * Reserve qgroup space for range [start, start + len).
3701 * This function will either reserve space from related qgroups or do nothing
3702 * if the range is already reserved.
3704 * Return 0 for successful reservation
3705 * Return <0 for error (including -EQUOT)
3707 * NOTE: This function may sleep for memory allocation, dirty page flushing and
3708 * commit transaction. So caller should not hold any dirty page locked.
3710 int btrfs_qgroup_reserve_data(struct btrfs_inode
*inode
,
3711 struct extent_changeset
**reserved_ret
, u64 start
,
3716 ret
= qgroup_reserve_data(inode
, reserved_ret
, start
, len
);
3717 if (ret
<= 0 && ret
!= -EDQUOT
)
3720 ret
= try_flush_qgroup(inode
->root
);
3723 return qgroup_reserve_data(inode
, reserved_ret
, start
, len
);
3726 /* Free ranges specified by @reserved, normally in error path */
3727 static int qgroup_free_reserved_data(struct btrfs_inode
*inode
,
3728 struct extent_changeset
*reserved
, u64 start
, u64 len
)
3730 struct btrfs_root
*root
= inode
->root
;
3731 struct ulist_node
*unode
;
3732 struct ulist_iterator uiter
;
3733 struct extent_changeset changeset
;
3737 extent_changeset_init(&changeset
);
3738 len
= round_up(start
+ len
, root
->fs_info
->sectorsize
);
3739 start
= round_down(start
, root
->fs_info
->sectorsize
);
3741 ULIST_ITER_INIT(&uiter
);
3742 while ((unode
= ulist_next(&reserved
->range_changed
, &uiter
))) {
3743 u64 range_start
= unode
->val
;
3744 /* unode->aux is the inclusive end */
3745 u64 range_len
= unode
->aux
- range_start
+ 1;
3749 extent_changeset_release(&changeset
);
3751 /* Only free range in range [start, start + len) */
3752 if (range_start
>= start
+ len
||
3753 range_start
+ range_len
<= start
)
3755 free_start
= max(range_start
, start
);
3756 free_len
= min(start
+ len
, range_start
+ range_len
) -
3759 * TODO: To also modify reserved->ranges_reserved to reflect
3762 * However as long as we free qgroup reserved according to
3763 * EXTENT_QGROUP_RESERVED, we won't double free.
3764 * So not need to rush.
3766 ret
= clear_record_extent_bits(&inode
->io_tree
, free_start
,
3767 free_start
+ free_len
- 1,
3768 EXTENT_QGROUP_RESERVED
, &changeset
);
3771 freed
+= changeset
.bytes_changed
;
3773 btrfs_qgroup_free_refroot(root
->fs_info
, root
->root_key
.objectid
, freed
,
3774 BTRFS_QGROUP_RSV_DATA
);
3777 extent_changeset_release(&changeset
);
3781 static int __btrfs_qgroup_release_data(struct btrfs_inode
*inode
,
3782 struct extent_changeset
*reserved
, u64 start
, u64 len
,
3785 struct extent_changeset changeset
;
3786 int trace_op
= QGROUP_RELEASE
;
3789 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &inode
->root
->fs_info
->flags
))
3792 /* In release case, we shouldn't have @reserved */
3793 WARN_ON(!free
&& reserved
);
3794 if (free
&& reserved
)
3795 return qgroup_free_reserved_data(inode
, reserved
, start
, len
);
3796 extent_changeset_init(&changeset
);
3797 ret
= clear_record_extent_bits(&inode
->io_tree
, start
, start
+ len
-1,
3798 EXTENT_QGROUP_RESERVED
, &changeset
);
3803 trace_op
= QGROUP_FREE
;
3804 trace_btrfs_qgroup_release_data(&inode
->vfs_inode
, start
, len
,
3805 changeset
.bytes_changed
, trace_op
);
3807 btrfs_qgroup_free_refroot(inode
->root
->fs_info
,
3808 inode
->root
->root_key
.objectid
,
3809 changeset
.bytes_changed
, BTRFS_QGROUP_RSV_DATA
);
3810 ret
= changeset
.bytes_changed
;
3812 extent_changeset_release(&changeset
);
3817 * Free a reserved space range from io_tree and related qgroups
3819 * Should be called when a range of pages get invalidated before reaching disk.
3820 * Or for error cleanup case.
3821 * if @reserved is given, only reserved range in [@start, @start + @len) will
3824 * For data written to disk, use btrfs_qgroup_release_data().
3826 * NOTE: This function may sleep for memory allocation.
3828 int btrfs_qgroup_free_data(struct btrfs_inode
*inode
,
3829 struct extent_changeset
*reserved
, u64 start
, u64 len
)
3831 return __btrfs_qgroup_release_data(inode
, reserved
, start
, len
, 1);
3835 * Release a reserved space range from io_tree only.
3837 * Should be called when a range of pages get written to disk and corresponding
3838 * FILE_EXTENT is inserted into corresponding root.
3840 * Since new qgroup accounting framework will only update qgroup numbers at
3841 * commit_transaction() time, its reserved space shouldn't be freed from
3844 * But we should release the range from io_tree, to allow further write to be
3847 * NOTE: This function may sleep for memory allocation.
3849 int btrfs_qgroup_release_data(struct btrfs_inode
*inode
, u64 start
, u64 len
)
3851 return __btrfs_qgroup_release_data(inode
, NULL
, start
, len
, 0);
3854 static void add_root_meta_rsv(struct btrfs_root
*root
, int num_bytes
,
3855 enum btrfs_qgroup_rsv_type type
)
3857 if (type
!= BTRFS_QGROUP_RSV_META_PREALLOC
&&
3858 type
!= BTRFS_QGROUP_RSV_META_PERTRANS
)
3863 spin_lock(&root
->qgroup_meta_rsv_lock
);
3864 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
)
3865 root
->qgroup_meta_rsv_prealloc
+= num_bytes
;
3867 root
->qgroup_meta_rsv_pertrans
+= num_bytes
;
3868 spin_unlock(&root
->qgroup_meta_rsv_lock
);
3871 static int sub_root_meta_rsv(struct btrfs_root
*root
, int num_bytes
,
3872 enum btrfs_qgroup_rsv_type type
)
3874 if (type
!= BTRFS_QGROUP_RSV_META_PREALLOC
&&
3875 type
!= BTRFS_QGROUP_RSV_META_PERTRANS
)
3880 spin_lock(&root
->qgroup_meta_rsv_lock
);
3881 if (type
== BTRFS_QGROUP_RSV_META_PREALLOC
) {
3882 num_bytes
= min_t(u64
, root
->qgroup_meta_rsv_prealloc
,
3884 root
->qgroup_meta_rsv_prealloc
-= num_bytes
;
3886 num_bytes
= min_t(u64
, root
->qgroup_meta_rsv_pertrans
,
3888 root
->qgroup_meta_rsv_pertrans
-= num_bytes
;
3890 spin_unlock(&root
->qgroup_meta_rsv_lock
);
3894 int btrfs_qgroup_reserve_meta(struct btrfs_root
*root
, int num_bytes
,
3895 enum btrfs_qgroup_rsv_type type
, bool enforce
)
3897 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3900 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3901 !is_fstree(root
->root_key
.objectid
) || num_bytes
== 0)
3904 BUG_ON(num_bytes
!= round_down(num_bytes
, fs_info
->nodesize
));
3905 trace_qgroup_meta_reserve(root
, (s64
)num_bytes
, type
);
3906 ret
= qgroup_reserve(root
, num_bytes
, enforce
, type
);
3910 * Record what we have reserved into root.
3912 * To avoid quota disabled->enabled underflow.
3913 * In that case, we may try to free space we haven't reserved
3914 * (since quota was disabled), so record what we reserved into root.
3915 * And ensure later release won't underflow this number.
3917 add_root_meta_rsv(root
, num_bytes
, type
);
3921 int __btrfs_qgroup_reserve_meta(struct btrfs_root
*root
, int num_bytes
,
3922 enum btrfs_qgroup_rsv_type type
, bool enforce
)
3926 ret
= btrfs_qgroup_reserve_meta(root
, num_bytes
, type
, enforce
);
3927 if (ret
<= 0 && ret
!= -EDQUOT
)
3930 ret
= try_flush_qgroup(root
);
3933 return btrfs_qgroup_reserve_meta(root
, num_bytes
, type
, enforce
);
3936 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root
*root
)
3938 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3940 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3941 !is_fstree(root
->root_key
.objectid
))
3944 /* TODO: Update trace point to handle such free */
3945 trace_qgroup_meta_free_all_pertrans(root
);
3946 /* Special value -1 means to free all reserved space */
3947 btrfs_qgroup_free_refroot(fs_info
, root
->root_key
.objectid
, (u64
)-1,
3948 BTRFS_QGROUP_RSV_META_PERTRANS
);
3951 void __btrfs_qgroup_free_meta(struct btrfs_root
*root
, int num_bytes
,
3952 enum btrfs_qgroup_rsv_type type
)
3954 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3956 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
3957 !is_fstree(root
->root_key
.objectid
))
3961 * reservation for META_PREALLOC can happen before quota is enabled,
3962 * which can lead to underflow.
3963 * Here ensure we will only free what we really have reserved.
3965 num_bytes
= sub_root_meta_rsv(root
, num_bytes
, type
);
3966 BUG_ON(num_bytes
!= round_down(num_bytes
, fs_info
->nodesize
));
3967 trace_qgroup_meta_reserve(root
, -(s64
)num_bytes
, type
);
3968 btrfs_qgroup_free_refroot(fs_info
, root
->root_key
.objectid
,
3972 static void qgroup_convert_meta(struct btrfs_fs_info
*fs_info
, u64 ref_root
,
3975 struct btrfs_qgroup
*qgroup
;
3976 struct ulist_node
*unode
;
3977 struct ulist_iterator uiter
;
3982 if (!fs_info
->quota_root
)
3985 spin_lock(&fs_info
->qgroup_lock
);
3986 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
3989 ulist_reinit(fs_info
->qgroup_ulist
);
3990 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
3991 qgroup_to_aux(qgroup
), GFP_ATOMIC
);
3994 ULIST_ITER_INIT(&uiter
);
3995 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
3996 struct btrfs_qgroup
*qg
;
3997 struct btrfs_qgroup_list
*glist
;
3999 qg
= unode_aux_to_qgroup(unode
);
4001 qgroup_rsv_release(fs_info
, qg
, num_bytes
,
4002 BTRFS_QGROUP_RSV_META_PREALLOC
);
4003 qgroup_rsv_add(fs_info
, qg
, num_bytes
,
4004 BTRFS_QGROUP_RSV_META_PERTRANS
);
4005 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
4006 ret
= ulist_add(fs_info
->qgroup_ulist
,
4007 glist
->group
->qgroupid
,
4008 qgroup_to_aux(glist
->group
), GFP_ATOMIC
);
4014 spin_unlock(&fs_info
->qgroup_lock
);
4017 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root
*root
, int num_bytes
)
4019 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4021 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) ||
4022 !is_fstree(root
->root_key
.objectid
))
4024 /* Same as btrfs_qgroup_free_meta_prealloc() */
4025 num_bytes
= sub_root_meta_rsv(root
, num_bytes
,
4026 BTRFS_QGROUP_RSV_META_PREALLOC
);
4027 trace_qgroup_meta_convert(root
, num_bytes
);
4028 qgroup_convert_meta(fs_info
, root
->root_key
.objectid
, num_bytes
);
4032 * Check qgroup reserved space leaking, normally at destroy inode
4035 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode
*inode
)
4037 struct extent_changeset changeset
;
4038 struct ulist_node
*unode
;
4039 struct ulist_iterator iter
;
4042 extent_changeset_init(&changeset
);
4043 ret
= clear_record_extent_bits(&inode
->io_tree
, 0, (u64
)-1,
4044 EXTENT_QGROUP_RESERVED
, &changeset
);
4047 if (WARN_ON(changeset
.bytes_changed
)) {
4048 ULIST_ITER_INIT(&iter
);
4049 while ((unode
= ulist_next(&changeset
.range_changed
, &iter
))) {
4050 btrfs_warn(inode
->root
->fs_info
,
4051 "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4052 btrfs_ino(inode
), unode
->val
, unode
->aux
);
4054 btrfs_qgroup_free_refroot(inode
->root
->fs_info
,
4055 inode
->root
->root_key
.objectid
,
4056 changeset
.bytes_changed
, BTRFS_QGROUP_RSV_DATA
);
4059 extent_changeset_release(&changeset
);
4062 void btrfs_qgroup_init_swapped_blocks(
4063 struct btrfs_qgroup_swapped_blocks
*swapped_blocks
)
4067 spin_lock_init(&swapped_blocks
->lock
);
4068 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++)
4069 swapped_blocks
->blocks
[i
] = RB_ROOT
;
4070 swapped_blocks
->swapped
= false;
4074 * Delete all swapped blocks record of @root.
4075 * Every record here means we skipped a full subtree scan for qgroup.
4077 * Gets called when committing one transaction.
4079 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root
*root
)
4081 struct btrfs_qgroup_swapped_blocks
*swapped_blocks
;
4084 swapped_blocks
= &root
->swapped_blocks
;
4086 spin_lock(&swapped_blocks
->lock
);
4087 if (!swapped_blocks
->swapped
)
4089 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
4090 struct rb_root
*cur_root
= &swapped_blocks
->blocks
[i
];
4091 struct btrfs_qgroup_swapped_block
*entry
;
4092 struct btrfs_qgroup_swapped_block
*next
;
4094 rbtree_postorder_for_each_entry_safe(entry
, next
, cur_root
,
4097 swapped_blocks
->blocks
[i
] = RB_ROOT
;
4099 swapped_blocks
->swapped
= false;
4101 spin_unlock(&swapped_blocks
->lock
);
4105 * Add subtree roots record into @subvol_root.
4107 * @subvol_root: tree root of the subvolume tree get swapped
4108 * @bg: block group under balance
4109 * @subvol_parent/slot: pointer to the subtree root in subvolume tree
4110 * @reloc_parent/slot: pointer to the subtree root in reloc tree
4111 * BOTH POINTERS ARE BEFORE TREE SWAP
4112 * @last_snapshot: last snapshot generation of the subvolume tree
4114 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle
*trans
,
4115 struct btrfs_root
*subvol_root
,
4116 struct btrfs_block_group
*bg
,
4117 struct extent_buffer
*subvol_parent
, int subvol_slot
,
4118 struct extent_buffer
*reloc_parent
, int reloc_slot
,
4121 struct btrfs_fs_info
*fs_info
= subvol_root
->fs_info
;
4122 struct btrfs_qgroup_swapped_blocks
*blocks
= &subvol_root
->swapped_blocks
;
4123 struct btrfs_qgroup_swapped_block
*block
;
4124 struct rb_node
**cur
;
4125 struct rb_node
*parent
= NULL
;
4126 int level
= btrfs_header_level(subvol_parent
) - 1;
4129 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
4132 if (btrfs_node_ptr_generation(subvol_parent
, subvol_slot
) >
4133 btrfs_node_ptr_generation(reloc_parent
, reloc_slot
)) {
4134 btrfs_err_rl(fs_info
,
4135 "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4137 btrfs_node_ptr_generation(subvol_parent
, subvol_slot
),
4138 btrfs_node_ptr_generation(reloc_parent
, reloc_slot
));
4142 block
= kmalloc(sizeof(*block
), GFP_NOFS
);
4149 * @reloc_parent/slot is still before swap, while @block is going to
4150 * record the bytenr after swap, so we do the swap here.
4152 block
->subvol_bytenr
= btrfs_node_blockptr(reloc_parent
, reloc_slot
);
4153 block
->subvol_generation
= btrfs_node_ptr_generation(reloc_parent
,
4155 block
->reloc_bytenr
= btrfs_node_blockptr(subvol_parent
, subvol_slot
);
4156 block
->reloc_generation
= btrfs_node_ptr_generation(subvol_parent
,
4158 block
->last_snapshot
= last_snapshot
;
4159 block
->level
= level
;
4162 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4163 * no one else can modify tree blocks thus we qgroup will not change
4164 * no matter the value of trace_leaf.
4166 if (bg
&& bg
->flags
& BTRFS_BLOCK_GROUP_DATA
)
4167 block
->trace_leaf
= true;
4169 block
->trace_leaf
= false;
4170 btrfs_node_key_to_cpu(reloc_parent
, &block
->first_key
, reloc_slot
);
4172 /* Insert @block into @blocks */
4173 spin_lock(&blocks
->lock
);
4174 cur
= &blocks
->blocks
[level
].rb_node
;
4176 struct btrfs_qgroup_swapped_block
*entry
;
4179 entry
= rb_entry(parent
, struct btrfs_qgroup_swapped_block
,
4182 if (entry
->subvol_bytenr
< block
->subvol_bytenr
) {
4183 cur
= &(*cur
)->rb_left
;
4184 } else if (entry
->subvol_bytenr
> block
->subvol_bytenr
) {
4185 cur
= &(*cur
)->rb_right
;
4187 if (entry
->subvol_generation
!=
4188 block
->subvol_generation
||
4189 entry
->reloc_bytenr
!= block
->reloc_bytenr
||
4190 entry
->reloc_generation
!=
4191 block
->reloc_generation
) {
4193 * Duplicated but mismatch entry found.
4196 * Marking qgroup inconsistent should be enough
4199 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG
));
4206 rb_link_node(&block
->node
, parent
, cur
);
4207 rb_insert_color(&block
->node
, &blocks
->blocks
[level
]);
4208 blocks
->swapped
= true;
4210 spin_unlock(&blocks
->lock
);
4213 fs_info
->qgroup_flags
|=
4214 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
4219 * Check if the tree block is a subtree root, and if so do the needed
4220 * delayed subtree trace for qgroup.
4222 * This is called during btrfs_cow_block().
4224 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle
*trans
,
4225 struct btrfs_root
*root
,
4226 struct extent_buffer
*subvol_eb
)
4228 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4229 struct btrfs_qgroup_swapped_blocks
*blocks
= &root
->swapped_blocks
;
4230 struct btrfs_qgroup_swapped_block
*block
;
4231 struct extent_buffer
*reloc_eb
= NULL
;
4232 struct rb_node
*node
;
4234 bool swapped
= false;
4235 int level
= btrfs_header_level(subvol_eb
);
4239 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
4241 if (!is_fstree(root
->root_key
.objectid
) || !root
->reloc_root
)
4244 spin_lock(&blocks
->lock
);
4245 if (!blocks
->swapped
) {
4246 spin_unlock(&blocks
->lock
);
4249 node
= blocks
->blocks
[level
].rb_node
;
4252 block
= rb_entry(node
, struct btrfs_qgroup_swapped_block
, node
);
4253 if (block
->subvol_bytenr
< subvol_eb
->start
) {
4254 node
= node
->rb_left
;
4255 } else if (block
->subvol_bytenr
> subvol_eb
->start
) {
4256 node
= node
->rb_right
;
4263 spin_unlock(&blocks
->lock
);
4266 /* Found one, remove it from @blocks first and update blocks->swapped */
4267 rb_erase(&block
->node
, &blocks
->blocks
[level
]);
4268 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
4269 if (RB_EMPTY_ROOT(&blocks
->blocks
[i
])) {
4274 blocks
->swapped
= swapped
;
4275 spin_unlock(&blocks
->lock
);
4277 /* Read out reloc subtree root */
4278 reloc_eb
= read_tree_block(fs_info
, block
->reloc_bytenr
, 0,
4279 block
->reloc_generation
, block
->level
,
4281 if (IS_ERR(reloc_eb
)) {
4282 ret
= PTR_ERR(reloc_eb
);
4286 if (!extent_buffer_uptodate(reloc_eb
)) {
4291 ret
= qgroup_trace_subtree_swap(trans
, reloc_eb
, subvol_eb
,
4292 block
->last_snapshot
, block
->trace_leaf
);
4295 free_extent_buffer(reloc_eb
);
4298 btrfs_err_rl(fs_info
,
4299 "failed to account subtree at bytenr %llu: %d",
4300 subvol_eb
->start
, ret
);
4301 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
4306 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction
*trans
)
4308 struct btrfs_qgroup_extent_record
*entry
;
4309 struct btrfs_qgroup_extent_record
*next
;
4310 struct rb_root
*root
;
4312 root
= &trans
->delayed_refs
.dirty_extent_root
;
4313 rbtree_postorder_for_each_entry_safe(entry
, next
, root
, node
) {
4314 ulist_free(entry
->old_roots
);