2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
29 #include "transaction.h"
34 #include "extent_io.h"
38 * - subvol delete -> delete when ref goes to 0? delete limits also?
42 * - copy also limits on subvol creation
44 * - caches fuer ulists
45 * - performance benchmarks
46 * - check all ioctl parameters
50 * one struct for each qgroup, organized in fs_info->qgroup_tree.
58 u64 rfer
; /* referenced */
59 u64 rfer_cmpr
; /* referenced compressed */
60 u64 excl
; /* exclusive */
61 u64 excl_cmpr
; /* exclusive compressed */
66 u64 lim_flags
; /* which limits are set */
73 * reservation tracking
80 struct list_head groups
; /* groups this group is member of */
81 struct list_head members
; /* groups that are members of this group */
82 struct list_head dirty
; /* dirty groups */
83 struct rb_node node
; /* tree of qgroups */
86 * temp variables for accounting operations
93 * glue structure to represent the relations between qgroups.
95 struct btrfs_qgroup_list
{
96 struct list_head next_group
;
97 struct list_head next_member
;
98 struct btrfs_qgroup
*group
;
99 struct btrfs_qgroup
*member
;
102 #define ptr_to_u64(x) ((u64)(uintptr_t)x)
103 #define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
106 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
108 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
);
110 /* must be called with qgroup_ioctl_lock held */
111 static struct btrfs_qgroup
*find_qgroup_rb(struct btrfs_fs_info
*fs_info
,
114 struct rb_node
*n
= fs_info
->qgroup_tree
.rb_node
;
115 struct btrfs_qgroup
*qgroup
;
118 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
119 if (qgroup
->qgroupid
< qgroupid
)
121 else if (qgroup
->qgroupid
> qgroupid
)
129 /* must be called with qgroup_lock held */
130 static struct btrfs_qgroup
*add_qgroup_rb(struct btrfs_fs_info
*fs_info
,
133 struct rb_node
**p
= &fs_info
->qgroup_tree
.rb_node
;
134 struct rb_node
*parent
= NULL
;
135 struct btrfs_qgroup
*qgroup
;
139 qgroup
= rb_entry(parent
, struct btrfs_qgroup
, node
);
141 if (qgroup
->qgroupid
< qgroupid
)
143 else if (qgroup
->qgroupid
> qgroupid
)
149 qgroup
= kzalloc(sizeof(*qgroup
), GFP_ATOMIC
);
151 return ERR_PTR(-ENOMEM
);
153 qgroup
->qgroupid
= qgroupid
;
154 INIT_LIST_HEAD(&qgroup
->groups
);
155 INIT_LIST_HEAD(&qgroup
->members
);
156 INIT_LIST_HEAD(&qgroup
->dirty
);
158 rb_link_node(&qgroup
->node
, parent
, p
);
159 rb_insert_color(&qgroup
->node
, &fs_info
->qgroup_tree
);
164 static void __del_qgroup_rb(struct btrfs_qgroup
*qgroup
)
166 struct btrfs_qgroup_list
*list
;
168 list_del(&qgroup
->dirty
);
169 while (!list_empty(&qgroup
->groups
)) {
170 list
= list_first_entry(&qgroup
->groups
,
171 struct btrfs_qgroup_list
, next_group
);
172 list_del(&list
->next_group
);
173 list_del(&list
->next_member
);
177 while (!list_empty(&qgroup
->members
)) {
178 list
= list_first_entry(&qgroup
->members
,
179 struct btrfs_qgroup_list
, next_member
);
180 list_del(&list
->next_group
);
181 list_del(&list
->next_member
);
187 /* must be called with qgroup_lock held */
188 static int del_qgroup_rb(struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
190 struct btrfs_qgroup
*qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
195 rb_erase(&qgroup
->node
, &fs_info
->qgroup_tree
);
196 __del_qgroup_rb(qgroup
);
200 /* must be called with qgroup_lock held */
201 static int add_relation_rb(struct btrfs_fs_info
*fs_info
,
202 u64 memberid
, u64 parentid
)
204 struct btrfs_qgroup
*member
;
205 struct btrfs_qgroup
*parent
;
206 struct btrfs_qgroup_list
*list
;
208 member
= find_qgroup_rb(fs_info
, memberid
);
209 parent
= find_qgroup_rb(fs_info
, parentid
);
210 if (!member
|| !parent
)
213 list
= kzalloc(sizeof(*list
), GFP_ATOMIC
);
217 list
->group
= parent
;
218 list
->member
= member
;
219 list_add_tail(&list
->next_group
, &member
->groups
);
220 list_add_tail(&list
->next_member
, &parent
->members
);
225 /* must be called with qgroup_lock held */
226 static int del_relation_rb(struct btrfs_fs_info
*fs_info
,
227 u64 memberid
, u64 parentid
)
229 struct btrfs_qgroup
*member
;
230 struct btrfs_qgroup
*parent
;
231 struct btrfs_qgroup_list
*list
;
233 member
= find_qgroup_rb(fs_info
, memberid
);
234 parent
= find_qgroup_rb(fs_info
, parentid
);
235 if (!member
|| !parent
)
238 list_for_each_entry(list
, &member
->groups
, next_group
) {
239 if (list
->group
== parent
) {
240 list_del(&list
->next_group
);
241 list_del(&list
->next_member
);
249 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
250 int btrfs_verify_qgroup_counts(struct btrfs_fs_info
*fs_info
, u64 qgroupid
,
253 struct btrfs_qgroup
*qgroup
;
255 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
258 if (qgroup
->rfer
!= rfer
|| qgroup
->excl
!= excl
)
265 * The full config is read in one go, only called from open_ctree()
266 * It doesn't use any locking, as at this point we're still single-threaded
268 int btrfs_read_qgroup_config(struct btrfs_fs_info
*fs_info
)
270 struct btrfs_key key
;
271 struct btrfs_key found_key
;
272 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
273 struct btrfs_path
*path
= NULL
;
274 struct extent_buffer
*l
;
278 u64 rescan_progress
= 0;
280 if (!fs_info
->quota_enabled
)
283 fs_info
->qgroup_ulist
= ulist_alloc(GFP_NOFS
);
284 if (!fs_info
->qgroup_ulist
) {
289 path
= btrfs_alloc_path();
295 /* default this to quota off, in case no status key is found */
296 fs_info
->qgroup_flags
= 0;
299 * pass 1: read status, all qgroup infos and limits
304 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 1);
309 struct btrfs_qgroup
*qgroup
;
311 slot
= path
->slots
[0];
313 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
315 if (found_key
.type
== BTRFS_QGROUP_STATUS_KEY
) {
316 struct btrfs_qgroup_status_item
*ptr
;
318 ptr
= btrfs_item_ptr(l
, slot
,
319 struct btrfs_qgroup_status_item
);
321 if (btrfs_qgroup_status_version(l
, ptr
) !=
322 BTRFS_QGROUP_STATUS_VERSION
) {
324 "old qgroup version, quota disabled");
327 if (btrfs_qgroup_status_generation(l
, ptr
) !=
328 fs_info
->generation
) {
329 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
331 "qgroup generation mismatch, "
332 "marked as inconsistent");
334 fs_info
->qgroup_flags
= btrfs_qgroup_status_flags(l
,
336 rescan_progress
= btrfs_qgroup_status_rescan(l
, ptr
);
340 if (found_key
.type
!= BTRFS_QGROUP_INFO_KEY
&&
341 found_key
.type
!= BTRFS_QGROUP_LIMIT_KEY
)
344 qgroup
= find_qgroup_rb(fs_info
, found_key
.offset
);
345 if ((qgroup
&& found_key
.type
== BTRFS_QGROUP_INFO_KEY
) ||
346 (!qgroup
&& found_key
.type
== BTRFS_QGROUP_LIMIT_KEY
)) {
347 btrfs_err(fs_info
, "inconsitent qgroup config");
348 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
351 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
352 if (IS_ERR(qgroup
)) {
353 ret
= PTR_ERR(qgroup
);
357 switch (found_key
.type
) {
358 case BTRFS_QGROUP_INFO_KEY
: {
359 struct btrfs_qgroup_info_item
*ptr
;
361 ptr
= btrfs_item_ptr(l
, slot
,
362 struct btrfs_qgroup_info_item
);
363 qgroup
->rfer
= btrfs_qgroup_info_rfer(l
, ptr
);
364 qgroup
->rfer_cmpr
= btrfs_qgroup_info_rfer_cmpr(l
, ptr
);
365 qgroup
->excl
= btrfs_qgroup_info_excl(l
, ptr
);
366 qgroup
->excl_cmpr
= btrfs_qgroup_info_excl_cmpr(l
, ptr
);
367 /* generation currently unused */
370 case BTRFS_QGROUP_LIMIT_KEY
: {
371 struct btrfs_qgroup_limit_item
*ptr
;
373 ptr
= btrfs_item_ptr(l
, slot
,
374 struct btrfs_qgroup_limit_item
);
375 qgroup
->lim_flags
= btrfs_qgroup_limit_flags(l
, ptr
);
376 qgroup
->max_rfer
= btrfs_qgroup_limit_max_rfer(l
, ptr
);
377 qgroup
->max_excl
= btrfs_qgroup_limit_max_excl(l
, ptr
);
378 qgroup
->rsv_rfer
= btrfs_qgroup_limit_rsv_rfer(l
, ptr
);
379 qgroup
->rsv_excl
= btrfs_qgroup_limit_rsv_excl(l
, ptr
);
384 ret
= btrfs_next_item(quota_root
, path
);
390 btrfs_release_path(path
);
393 * pass 2: read all qgroup relations
396 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
398 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 0);
402 slot
= path
->slots
[0];
404 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
406 if (found_key
.type
!= BTRFS_QGROUP_RELATION_KEY
)
409 if (found_key
.objectid
> found_key
.offset
) {
410 /* parent <- member, not needed to build config */
411 /* FIXME should we omit the key completely? */
415 ret
= add_relation_rb(fs_info
, found_key
.objectid
,
417 if (ret
== -ENOENT
) {
419 "orphan qgroup relation 0x%llx->0x%llx",
420 found_key
.objectid
, found_key
.offset
);
421 ret
= 0; /* ignore the error */
426 ret
= btrfs_next_item(quota_root
, path
);
433 fs_info
->qgroup_flags
|= flags
;
434 if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
)) {
435 fs_info
->quota_enabled
= 0;
436 fs_info
->pending_quota_state
= 0;
437 } else if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
&&
439 ret
= qgroup_rescan_init(fs_info
, rescan_progress
, 0);
441 btrfs_free_path(path
);
444 ulist_free(fs_info
->qgroup_ulist
);
445 fs_info
->qgroup_ulist
= NULL
;
446 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
449 return ret
< 0 ? ret
: 0;
453 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
454 * first two are in single-threaded paths.And for the third one, we have set
455 * quota_root to be null with qgroup_lock held before, so it is safe to clean
456 * up the in-memory structures without qgroup_lock held.
458 void btrfs_free_qgroup_config(struct btrfs_fs_info
*fs_info
)
461 struct btrfs_qgroup
*qgroup
;
463 while ((n
= rb_first(&fs_info
->qgroup_tree
))) {
464 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
465 rb_erase(n
, &fs_info
->qgroup_tree
);
466 __del_qgroup_rb(qgroup
);
469 * we call btrfs_free_qgroup_config() when umounting
470 * filesystem and disabling quota, so we set qgroup_ulit
471 * to be null here to avoid double free.
473 ulist_free(fs_info
->qgroup_ulist
);
474 fs_info
->qgroup_ulist
= NULL
;
477 static int add_qgroup_relation_item(struct btrfs_trans_handle
*trans
,
478 struct btrfs_root
*quota_root
,
482 struct btrfs_path
*path
;
483 struct btrfs_key key
;
485 path
= btrfs_alloc_path();
490 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
493 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
, 0);
495 btrfs_mark_buffer_dirty(path
->nodes
[0]);
497 btrfs_free_path(path
);
501 static int del_qgroup_relation_item(struct btrfs_trans_handle
*trans
,
502 struct btrfs_root
*quota_root
,
506 struct btrfs_path
*path
;
507 struct btrfs_key key
;
509 path
= btrfs_alloc_path();
514 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
517 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
526 ret
= btrfs_del_item(trans
, quota_root
, path
);
528 btrfs_free_path(path
);
532 static int add_qgroup_item(struct btrfs_trans_handle
*trans
,
533 struct btrfs_root
*quota_root
, u64 qgroupid
)
536 struct btrfs_path
*path
;
537 struct btrfs_qgroup_info_item
*qgroup_info
;
538 struct btrfs_qgroup_limit_item
*qgroup_limit
;
539 struct extent_buffer
*leaf
;
540 struct btrfs_key key
;
542 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
543 if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT
, "a_root
->state
)))
546 path
= btrfs_alloc_path();
551 key
.type
= BTRFS_QGROUP_INFO_KEY
;
552 key
.offset
= qgroupid
;
554 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
555 sizeof(*qgroup_info
));
559 leaf
= path
->nodes
[0];
560 qgroup_info
= btrfs_item_ptr(leaf
, path
->slots
[0],
561 struct btrfs_qgroup_info_item
);
562 btrfs_set_qgroup_info_generation(leaf
, qgroup_info
, trans
->transid
);
563 btrfs_set_qgroup_info_rfer(leaf
, qgroup_info
, 0);
564 btrfs_set_qgroup_info_rfer_cmpr(leaf
, qgroup_info
, 0);
565 btrfs_set_qgroup_info_excl(leaf
, qgroup_info
, 0);
566 btrfs_set_qgroup_info_excl_cmpr(leaf
, qgroup_info
, 0);
568 btrfs_mark_buffer_dirty(leaf
);
570 btrfs_release_path(path
);
572 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
573 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
574 sizeof(*qgroup_limit
));
578 leaf
= path
->nodes
[0];
579 qgroup_limit
= btrfs_item_ptr(leaf
, path
->slots
[0],
580 struct btrfs_qgroup_limit_item
);
581 btrfs_set_qgroup_limit_flags(leaf
, qgroup_limit
, 0);
582 btrfs_set_qgroup_limit_max_rfer(leaf
, qgroup_limit
, 0);
583 btrfs_set_qgroup_limit_max_excl(leaf
, qgroup_limit
, 0);
584 btrfs_set_qgroup_limit_rsv_rfer(leaf
, qgroup_limit
, 0);
585 btrfs_set_qgroup_limit_rsv_excl(leaf
, qgroup_limit
, 0);
587 btrfs_mark_buffer_dirty(leaf
);
591 btrfs_free_path(path
);
595 static int del_qgroup_item(struct btrfs_trans_handle
*trans
,
596 struct btrfs_root
*quota_root
, u64 qgroupid
)
599 struct btrfs_path
*path
;
600 struct btrfs_key key
;
602 path
= btrfs_alloc_path();
607 key
.type
= BTRFS_QGROUP_INFO_KEY
;
608 key
.offset
= qgroupid
;
609 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
618 ret
= btrfs_del_item(trans
, quota_root
, path
);
622 btrfs_release_path(path
);
624 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
625 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
634 ret
= btrfs_del_item(trans
, quota_root
, path
);
637 btrfs_free_path(path
);
641 static int update_qgroup_limit_item(struct btrfs_trans_handle
*trans
,
642 struct btrfs_root
*root
, u64 qgroupid
,
643 u64 flags
, u64 max_rfer
, u64 max_excl
,
644 u64 rsv_rfer
, u64 rsv_excl
)
646 struct btrfs_path
*path
;
647 struct btrfs_key key
;
648 struct extent_buffer
*l
;
649 struct btrfs_qgroup_limit_item
*qgroup_limit
;
654 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
655 key
.offset
= qgroupid
;
657 path
= btrfs_alloc_path();
661 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
669 slot
= path
->slots
[0];
670 qgroup_limit
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_limit_item
);
671 btrfs_set_qgroup_limit_flags(l
, qgroup_limit
, flags
);
672 btrfs_set_qgroup_limit_max_rfer(l
, qgroup_limit
, max_rfer
);
673 btrfs_set_qgroup_limit_max_excl(l
, qgroup_limit
, max_excl
);
674 btrfs_set_qgroup_limit_rsv_rfer(l
, qgroup_limit
, rsv_rfer
);
675 btrfs_set_qgroup_limit_rsv_excl(l
, qgroup_limit
, rsv_excl
);
677 btrfs_mark_buffer_dirty(l
);
680 btrfs_free_path(path
);
684 static int update_qgroup_info_item(struct btrfs_trans_handle
*trans
,
685 struct btrfs_root
*root
,
686 struct btrfs_qgroup
*qgroup
)
688 struct btrfs_path
*path
;
689 struct btrfs_key key
;
690 struct extent_buffer
*l
;
691 struct btrfs_qgroup_info_item
*qgroup_info
;
695 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
696 if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT
, &root
->state
)))
700 key
.type
= BTRFS_QGROUP_INFO_KEY
;
701 key
.offset
= qgroup
->qgroupid
;
703 path
= btrfs_alloc_path();
707 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
715 slot
= path
->slots
[0];
716 qgroup_info
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_info_item
);
717 btrfs_set_qgroup_info_generation(l
, qgroup_info
, trans
->transid
);
718 btrfs_set_qgroup_info_rfer(l
, qgroup_info
, qgroup
->rfer
);
719 btrfs_set_qgroup_info_rfer_cmpr(l
, qgroup_info
, qgroup
->rfer_cmpr
);
720 btrfs_set_qgroup_info_excl(l
, qgroup_info
, qgroup
->excl
);
721 btrfs_set_qgroup_info_excl_cmpr(l
, qgroup_info
, qgroup
->excl_cmpr
);
723 btrfs_mark_buffer_dirty(l
);
726 btrfs_free_path(path
);
730 static int update_qgroup_status_item(struct btrfs_trans_handle
*trans
,
731 struct btrfs_fs_info
*fs_info
,
732 struct btrfs_root
*root
)
734 struct btrfs_path
*path
;
735 struct btrfs_key key
;
736 struct extent_buffer
*l
;
737 struct btrfs_qgroup_status_item
*ptr
;
742 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
745 path
= btrfs_alloc_path();
749 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
757 slot
= path
->slots
[0];
758 ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_status_item
);
759 btrfs_set_qgroup_status_flags(l
, ptr
, fs_info
->qgroup_flags
);
760 btrfs_set_qgroup_status_generation(l
, ptr
, trans
->transid
);
761 btrfs_set_qgroup_status_rescan(l
, ptr
,
762 fs_info
->qgroup_rescan_progress
.objectid
);
764 btrfs_mark_buffer_dirty(l
);
767 btrfs_free_path(path
);
772 * called with qgroup_lock held
774 static int btrfs_clean_quota_tree(struct btrfs_trans_handle
*trans
,
775 struct btrfs_root
*root
)
777 struct btrfs_path
*path
;
778 struct btrfs_key key
;
779 struct extent_buffer
*leaf
= NULL
;
783 path
= btrfs_alloc_path();
787 path
->leave_spinning
= 1;
794 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
797 leaf
= path
->nodes
[0];
798 nr
= btrfs_header_nritems(leaf
);
802 * delete the leaf one by one
803 * since the whole tree is going
807 ret
= btrfs_del_items(trans
, root
, path
, 0, nr
);
811 btrfs_release_path(path
);
815 root
->fs_info
->pending_quota_state
= 0;
816 btrfs_free_path(path
);
820 int btrfs_quota_enable(struct btrfs_trans_handle
*trans
,
821 struct btrfs_fs_info
*fs_info
)
823 struct btrfs_root
*quota_root
;
824 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
825 struct btrfs_path
*path
= NULL
;
826 struct btrfs_qgroup_status_item
*ptr
;
827 struct extent_buffer
*leaf
;
828 struct btrfs_key key
;
829 struct btrfs_key found_key
;
830 struct btrfs_qgroup
*qgroup
= NULL
;
834 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
835 if (fs_info
->quota_root
) {
836 fs_info
->pending_quota_state
= 1;
840 fs_info
->qgroup_ulist
= ulist_alloc(GFP_NOFS
);
841 if (!fs_info
->qgroup_ulist
) {
847 * initially create the quota tree
849 quota_root
= btrfs_create_tree(trans
, fs_info
,
850 BTRFS_QUOTA_TREE_OBJECTID
);
851 if (IS_ERR(quota_root
)) {
852 ret
= PTR_ERR(quota_root
);
856 path
= btrfs_alloc_path();
863 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
866 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
871 leaf
= path
->nodes
[0];
872 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0],
873 struct btrfs_qgroup_status_item
);
874 btrfs_set_qgroup_status_generation(leaf
, ptr
, trans
->transid
);
875 btrfs_set_qgroup_status_version(leaf
, ptr
, BTRFS_QGROUP_STATUS_VERSION
);
876 fs_info
->qgroup_flags
= BTRFS_QGROUP_STATUS_FLAG_ON
|
877 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
878 btrfs_set_qgroup_status_flags(leaf
, ptr
, fs_info
->qgroup_flags
);
879 btrfs_set_qgroup_status_rescan(leaf
, ptr
, 0);
881 btrfs_mark_buffer_dirty(leaf
);
884 key
.type
= BTRFS_ROOT_REF_KEY
;
887 btrfs_release_path(path
);
888 ret
= btrfs_search_slot_for_read(tree_root
, &key
, path
, 1, 0);
896 slot
= path
->slots
[0];
897 leaf
= path
->nodes
[0];
898 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
900 if (found_key
.type
== BTRFS_ROOT_REF_KEY
) {
901 ret
= add_qgroup_item(trans
, quota_root
,
906 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
907 if (IS_ERR(qgroup
)) {
908 ret
= PTR_ERR(qgroup
);
912 ret
= btrfs_next_item(tree_root
, path
);
920 btrfs_release_path(path
);
921 ret
= add_qgroup_item(trans
, quota_root
, BTRFS_FS_TREE_OBJECTID
);
925 qgroup
= add_qgroup_rb(fs_info
, BTRFS_FS_TREE_OBJECTID
);
926 if (IS_ERR(qgroup
)) {
927 ret
= PTR_ERR(qgroup
);
930 spin_lock(&fs_info
->qgroup_lock
);
931 fs_info
->quota_root
= quota_root
;
932 fs_info
->pending_quota_state
= 1;
933 spin_unlock(&fs_info
->qgroup_lock
);
935 btrfs_free_path(path
);
938 free_extent_buffer(quota_root
->node
);
939 free_extent_buffer(quota_root
->commit_root
);
944 ulist_free(fs_info
->qgroup_ulist
);
945 fs_info
->qgroup_ulist
= NULL
;
947 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
951 int btrfs_quota_disable(struct btrfs_trans_handle
*trans
,
952 struct btrfs_fs_info
*fs_info
)
954 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
955 struct btrfs_root
*quota_root
;
958 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
959 if (!fs_info
->quota_root
)
961 spin_lock(&fs_info
->qgroup_lock
);
962 fs_info
->quota_enabled
= 0;
963 fs_info
->pending_quota_state
= 0;
964 quota_root
= fs_info
->quota_root
;
965 fs_info
->quota_root
= NULL
;
966 spin_unlock(&fs_info
->qgroup_lock
);
968 btrfs_free_qgroup_config(fs_info
);
970 ret
= btrfs_clean_quota_tree(trans
, quota_root
);
974 ret
= btrfs_del_root(trans
, tree_root
, "a_root
->root_key
);
978 list_del("a_root
->dirty_list
);
980 btrfs_tree_lock(quota_root
->node
);
981 clean_tree_block(trans
, tree_root
, quota_root
->node
);
982 btrfs_tree_unlock(quota_root
->node
);
983 btrfs_free_tree_block(trans
, quota_root
, quota_root
->node
, 0, 1);
985 free_extent_buffer(quota_root
->node
);
986 free_extent_buffer(quota_root
->commit_root
);
989 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
993 static void qgroup_dirty(struct btrfs_fs_info
*fs_info
,
994 struct btrfs_qgroup
*qgroup
)
996 if (list_empty(&qgroup
->dirty
))
997 list_add(&qgroup
->dirty
, &fs_info
->dirty_qgroups
);
1000 int btrfs_add_qgroup_relation(struct btrfs_trans_handle
*trans
,
1001 struct btrfs_fs_info
*fs_info
, u64 src
, u64 dst
)
1003 struct btrfs_root
*quota_root
;
1004 struct btrfs_qgroup
*parent
;
1005 struct btrfs_qgroup
*member
;
1006 struct btrfs_qgroup_list
*list
;
1009 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1010 quota_root
= fs_info
->quota_root
;
1015 member
= find_qgroup_rb(fs_info
, src
);
1016 parent
= find_qgroup_rb(fs_info
, dst
);
1017 if (!member
|| !parent
) {
1022 /* check if such qgroup relation exist firstly */
1023 list_for_each_entry(list
, &member
->groups
, next_group
) {
1024 if (list
->group
== parent
) {
1030 ret
= add_qgroup_relation_item(trans
, quota_root
, src
, dst
);
1034 ret
= add_qgroup_relation_item(trans
, quota_root
, dst
, src
);
1036 del_qgroup_relation_item(trans
, quota_root
, src
, dst
);
1040 spin_lock(&fs_info
->qgroup_lock
);
1041 ret
= add_relation_rb(quota_root
->fs_info
, src
, dst
);
1042 spin_unlock(&fs_info
->qgroup_lock
);
1044 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1048 int btrfs_del_qgroup_relation(struct btrfs_trans_handle
*trans
,
1049 struct btrfs_fs_info
*fs_info
, u64 src
, u64 dst
)
1051 struct btrfs_root
*quota_root
;
1052 struct btrfs_qgroup
*parent
;
1053 struct btrfs_qgroup
*member
;
1054 struct btrfs_qgroup_list
*list
;
1058 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1059 quota_root
= fs_info
->quota_root
;
1065 member
= find_qgroup_rb(fs_info
, src
);
1066 parent
= find_qgroup_rb(fs_info
, dst
);
1067 if (!member
|| !parent
) {
1072 /* check if such qgroup relation exist firstly */
1073 list_for_each_entry(list
, &member
->groups
, next_group
) {
1074 if (list
->group
== parent
)
1080 ret
= del_qgroup_relation_item(trans
, quota_root
, src
, dst
);
1081 err
= del_qgroup_relation_item(trans
, quota_root
, dst
, src
);
1085 spin_lock(&fs_info
->qgroup_lock
);
1086 del_relation_rb(fs_info
, src
, dst
);
1087 spin_unlock(&fs_info
->qgroup_lock
);
1089 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1093 int btrfs_create_qgroup(struct btrfs_trans_handle
*trans
,
1094 struct btrfs_fs_info
*fs_info
, u64 qgroupid
, char *name
)
1096 struct btrfs_root
*quota_root
;
1097 struct btrfs_qgroup
*qgroup
;
1100 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1101 quota_root
= fs_info
->quota_root
;
1106 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1112 ret
= add_qgroup_item(trans
, quota_root
, qgroupid
);
1116 spin_lock(&fs_info
->qgroup_lock
);
1117 qgroup
= add_qgroup_rb(fs_info
, qgroupid
);
1118 spin_unlock(&fs_info
->qgroup_lock
);
1121 ret
= PTR_ERR(qgroup
);
1123 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1127 int btrfs_remove_qgroup(struct btrfs_trans_handle
*trans
,
1128 struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
1130 struct btrfs_root
*quota_root
;
1131 struct btrfs_qgroup
*qgroup
;
1134 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1135 quota_root
= fs_info
->quota_root
;
1141 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1146 /* check if there are no relations to this qgroup */
1147 if (!list_empty(&qgroup
->groups
) ||
1148 !list_empty(&qgroup
->members
)) {
1153 ret
= del_qgroup_item(trans
, quota_root
, qgroupid
);
1155 spin_lock(&fs_info
->qgroup_lock
);
1156 del_qgroup_rb(quota_root
->fs_info
, qgroupid
);
1157 spin_unlock(&fs_info
->qgroup_lock
);
1159 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1163 int btrfs_limit_qgroup(struct btrfs_trans_handle
*trans
,
1164 struct btrfs_fs_info
*fs_info
, u64 qgroupid
,
1165 struct btrfs_qgroup_limit
*limit
)
1167 struct btrfs_root
*quota_root
;
1168 struct btrfs_qgroup
*qgroup
;
1171 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1172 quota_root
= fs_info
->quota_root
;
1178 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1183 ret
= update_qgroup_limit_item(trans
, quota_root
, qgroupid
,
1184 limit
->flags
, limit
->max_rfer
,
1185 limit
->max_excl
, limit
->rsv_rfer
,
1188 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1189 btrfs_info(fs_info
, "unable to update quota limit for %llu",
1193 spin_lock(&fs_info
->qgroup_lock
);
1194 qgroup
->lim_flags
= limit
->flags
;
1195 qgroup
->max_rfer
= limit
->max_rfer
;
1196 qgroup
->max_excl
= limit
->max_excl
;
1197 qgroup
->rsv_rfer
= limit
->rsv_rfer
;
1198 qgroup
->rsv_excl
= limit
->rsv_excl
;
1199 spin_unlock(&fs_info
->qgroup_lock
);
1201 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1205 static int comp_oper_exist(struct btrfs_qgroup_operation
*oper1
,
1206 struct btrfs_qgroup_operation
*oper2
)
1209 * Ignore seq and type here, we're looking for any operation
1210 * at all related to this extent on that root.
1212 if (oper1
->bytenr
< oper2
->bytenr
)
1214 if (oper1
->bytenr
> oper2
->bytenr
)
1216 if (oper1
->ref_root
< oper2
->ref_root
)
1218 if (oper1
->ref_root
> oper2
->ref_root
)
1223 static int qgroup_oper_exists(struct btrfs_fs_info
*fs_info
,
1224 struct btrfs_qgroup_operation
*oper
)
1227 struct btrfs_qgroup_operation
*cur
;
1230 spin_lock(&fs_info
->qgroup_op_lock
);
1231 n
= fs_info
->qgroup_op_tree
.rb_node
;
1233 cur
= rb_entry(n
, struct btrfs_qgroup_operation
, n
);
1234 cmp
= comp_oper_exist(cur
, oper
);
1240 spin_unlock(&fs_info
->qgroup_op_lock
);
1244 spin_unlock(&fs_info
->qgroup_op_lock
);
1248 static int comp_oper(struct btrfs_qgroup_operation
*oper1
,
1249 struct btrfs_qgroup_operation
*oper2
)
1251 if (oper1
->bytenr
< oper2
->bytenr
)
1253 if (oper1
->bytenr
> oper2
->bytenr
)
1255 if (oper1
->seq
< oper2
->seq
)
1257 if (oper1
->seq
> oper2
->seq
)
1259 if (oper1
->ref_root
< oper2
->ref_root
)
1261 if (oper1
->ref_root
> oper2
->ref_root
)
1263 if (oper1
->type
< oper2
->type
)
1265 if (oper1
->type
> oper2
->type
)
1270 static int insert_qgroup_oper(struct btrfs_fs_info
*fs_info
,
1271 struct btrfs_qgroup_operation
*oper
)
1274 struct rb_node
*parent
= NULL
;
1275 struct btrfs_qgroup_operation
*cur
;
1278 spin_lock(&fs_info
->qgroup_op_lock
);
1279 p
= &fs_info
->qgroup_op_tree
.rb_node
;
1282 cur
= rb_entry(parent
, struct btrfs_qgroup_operation
, n
);
1283 cmp
= comp_oper(cur
, oper
);
1285 p
= &(*p
)->rb_right
;
1289 spin_unlock(&fs_info
->qgroup_op_lock
);
1293 rb_link_node(&oper
->n
, parent
, p
);
1294 rb_insert_color(&oper
->n
, &fs_info
->qgroup_op_tree
);
1295 spin_unlock(&fs_info
->qgroup_op_lock
);
1300 * Record a quota operation for processing later on.
1301 * @trans: the transaction we are adding the delayed op to.
1302 * @fs_info: the fs_info for this fs.
1303 * @ref_root: the root of the reference we are acting on,
1304 * @bytenr: the bytenr we are acting on.
1305 * @num_bytes: the number of bytes in the reference.
1306 * @type: the type of operation this is.
1307 * @mod_seq: do we need to get a sequence number for looking up roots.
1309 * We just add it to our trans qgroup_ref_list and carry on and process these
1310 * operations in order at some later point. If the reference root isn't a fs
1311 * root then we don't bother with doing anything.
1313 * MUST BE HOLDING THE REF LOCK.
1315 int btrfs_qgroup_record_ref(struct btrfs_trans_handle
*trans
,
1316 struct btrfs_fs_info
*fs_info
, u64 ref_root
,
1317 u64 bytenr
, u64 num_bytes
,
1318 enum btrfs_qgroup_operation_type type
, int mod_seq
)
1320 struct btrfs_qgroup_operation
*oper
;
1323 if (!is_fstree(ref_root
) || !fs_info
->quota_enabled
)
1326 oper
= kmalloc(sizeof(*oper
), GFP_NOFS
);
1330 oper
->ref_root
= ref_root
;
1331 oper
->bytenr
= bytenr
;
1332 oper
->num_bytes
= num_bytes
;
1334 oper
->seq
= atomic_inc_return(&fs_info
->qgroup_op_seq
);
1335 INIT_LIST_HEAD(&oper
->elem
.list
);
1338 if (type
== BTRFS_QGROUP_OPER_SUB_SUBTREE
) {
1340 * If any operation for this bytenr/ref_root combo
1341 * exists, then we know it's not exclusively owned and
1342 * shouldn't be queued up.
1344 * This also catches the case where we have a cloned
1345 * extent that gets queued up multiple times during
1348 if (qgroup_oper_exists(fs_info
, oper
)) {
1354 ret
= insert_qgroup_oper(fs_info
, oper
);
1356 /* Shouldn't happen so have an assert for developers */
1361 list_add_tail(&oper
->list
, &trans
->qgroup_ref_list
);
1364 btrfs_get_tree_mod_seq(fs_info
, &oper
->elem
);
1370 * The easy accounting, if we are adding/removing the only ref for an extent
1371 * then this qgroup and all of the parent qgroups get their refrence and
1372 * exclusive counts adjusted.
1374 static int qgroup_excl_accounting(struct btrfs_fs_info
*fs_info
,
1375 struct btrfs_qgroup_operation
*oper
)
1377 struct btrfs_qgroup
*qgroup
;
1379 struct btrfs_qgroup_list
*glist
;
1380 struct ulist_node
*unode
;
1381 struct ulist_iterator uiter
;
1385 tmp
= ulist_alloc(GFP_NOFS
);
1389 spin_lock(&fs_info
->qgroup_lock
);
1390 if (!fs_info
->quota_root
)
1392 qgroup
= find_qgroup_rb(fs_info
, oper
->ref_root
);
1395 switch (oper
->type
) {
1396 case BTRFS_QGROUP_OPER_ADD_EXCL
:
1399 case BTRFS_QGROUP_OPER_SUB_EXCL
:
1405 qgroup
->rfer
+= sign
* oper
->num_bytes
;
1406 qgroup
->rfer_cmpr
+= sign
* oper
->num_bytes
;
1408 WARN_ON(sign
< 0 && qgroup
->excl
< oper
->num_bytes
);
1409 qgroup
->excl
+= sign
* oper
->num_bytes
;
1410 qgroup
->excl_cmpr
+= sign
* oper
->num_bytes
;
1412 qgroup_dirty(fs_info
, qgroup
);
1414 /* Get all of the parent groups that contain this qgroup */
1415 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1416 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1417 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1422 /* Iterate all of the parents and adjust their reference counts */
1423 ULIST_ITER_INIT(&uiter
);
1424 while ((unode
= ulist_next(tmp
, &uiter
))) {
1425 qgroup
= u64_to_ptr(unode
->aux
);
1426 qgroup
->rfer
+= sign
* oper
->num_bytes
;
1427 qgroup
->rfer_cmpr
+= sign
* oper
->num_bytes
;
1428 qgroup
->excl
+= sign
* oper
->num_bytes
;
1430 WARN_ON(qgroup
->excl
< oper
->num_bytes
);
1431 qgroup
->excl_cmpr
+= sign
* oper
->num_bytes
;
1432 qgroup_dirty(fs_info
, qgroup
);
1434 /* Add any parents of the parents */
1435 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1436 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1437 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1444 spin_unlock(&fs_info
->qgroup_lock
);
1450 * Walk all of the roots that pointed to our bytenr and adjust their refcnts as
1453 static int qgroup_calc_old_refcnt(struct btrfs_fs_info
*fs_info
,
1454 u64 root_to_skip
, struct ulist
*tmp
,
1455 struct ulist
*roots
, struct ulist
*qgroups
,
1456 u64 seq
, int *old_roots
, int rescan
)
1458 struct ulist_node
*unode
;
1459 struct ulist_iterator uiter
;
1460 struct ulist_node
*tmp_unode
;
1461 struct ulist_iterator tmp_uiter
;
1462 struct btrfs_qgroup
*qg
;
1465 ULIST_ITER_INIT(&uiter
);
1466 while ((unode
= ulist_next(roots
, &uiter
))) {
1467 /* We don't count our current root here */
1468 if (unode
->val
== root_to_skip
)
1470 qg
= find_qgroup_rb(fs_info
, unode
->val
);
1474 * We could have a pending removal of this same ref so we may
1475 * not have actually found our ref root when doing
1476 * btrfs_find_all_roots, so we need to keep track of how many
1477 * old roots we find in case we removed ours and added a
1478 * different one at the same time. I don't think this could
1479 * happen in practice but that sort of thinking leads to pain
1480 * and suffering and to the dark side.
1485 ret
= ulist_add(qgroups
, qg
->qgroupid
, ptr_to_u64(qg
),
1489 ret
= ulist_add(tmp
, qg
->qgroupid
, ptr_to_u64(qg
), GFP_ATOMIC
);
1492 ULIST_ITER_INIT(&tmp_uiter
);
1493 while ((tmp_unode
= ulist_next(tmp
, &tmp_uiter
))) {
1494 struct btrfs_qgroup_list
*glist
;
1496 qg
= u64_to_ptr(tmp_unode
->aux
);
1498 * We use this sequence number to keep from having to
1499 * run the whole list and 0 out the refcnt every time.
1500 * We basically use sequnce as the known 0 count and
1501 * then add 1 everytime we see a qgroup. This is how we
1502 * get how many of the roots actually point up to the
1503 * upper level qgroups in order to determine exclusive
1506 * For rescan we want to set old_refcnt to seq so our
1507 * exclusive calculations end up correct.
1510 qg
->old_refcnt
= seq
;
1511 else if (qg
->old_refcnt
< seq
)
1512 qg
->old_refcnt
= seq
+ 1;
1516 if (qg
->new_refcnt
< seq
)
1517 qg
->new_refcnt
= seq
+ 1;
1520 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1521 ret
= ulist_add(qgroups
, glist
->group
->qgroupid
,
1522 ptr_to_u64(glist
->group
),
1526 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1527 ptr_to_u64(glist
->group
),
1538 * We need to walk forward in our operation tree and account for any roots that
1539 * were deleted after we made this operation.
1541 static int qgroup_account_deleted_refs(struct btrfs_fs_info
*fs_info
,
1542 struct btrfs_qgroup_operation
*oper
,
1544 struct ulist
*qgroups
, u64 seq
,
1547 struct ulist_node
*unode
;
1548 struct ulist_iterator uiter
;
1549 struct btrfs_qgroup
*qg
;
1550 struct btrfs_qgroup_operation
*tmp_oper
;
1557 * We only walk forward in the tree since we're only interested in
1558 * removals that happened _after_ our operation.
1560 spin_lock(&fs_info
->qgroup_op_lock
);
1561 n
= rb_next(&oper
->n
);
1562 spin_unlock(&fs_info
->qgroup_op_lock
);
1565 tmp_oper
= rb_entry(n
, struct btrfs_qgroup_operation
, n
);
1566 while (tmp_oper
->bytenr
== oper
->bytenr
) {
1568 * If it's not a removal we don't care, additions work out
1569 * properly with our refcnt tracking.
1571 if (tmp_oper
->type
!= BTRFS_QGROUP_OPER_SUB_SHARED
&&
1572 tmp_oper
->type
!= BTRFS_QGROUP_OPER_SUB_EXCL
)
1574 qg
= find_qgroup_rb(fs_info
, tmp_oper
->ref_root
);
1577 ret
= ulist_add(qgroups
, qg
->qgroupid
, ptr_to_u64(qg
),
1583 * We only want to increase old_roots if this qgroup is
1584 * not already in the list of qgroups. If it is already
1585 * there then that means it must have been re-added or
1586 * the delete will be discarded because we had an
1587 * existing ref that we haven't looked up yet. In this
1588 * case we don't want to increase old_roots. So if ret
1589 * == 1 then we know that this is the first time we've
1590 * seen this qgroup and we can bump the old_roots.
1593 ret
= ulist_add(tmp
, qg
->qgroupid
, ptr_to_u64(qg
),
1599 spin_lock(&fs_info
->qgroup_op_lock
);
1600 n
= rb_next(&tmp_oper
->n
);
1601 spin_unlock(&fs_info
->qgroup_op_lock
);
1604 tmp_oper
= rb_entry(n
, struct btrfs_qgroup_operation
, n
);
1607 /* Ok now process the qgroups we found */
1608 ULIST_ITER_INIT(&uiter
);
1609 while ((unode
= ulist_next(tmp
, &uiter
))) {
1610 struct btrfs_qgroup_list
*glist
;
1612 qg
= u64_to_ptr(unode
->aux
);
1613 if (qg
->old_refcnt
< seq
)
1614 qg
->old_refcnt
= seq
+ 1;
1617 if (qg
->new_refcnt
< seq
)
1618 qg
->new_refcnt
= seq
+ 1;
1621 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1622 ret
= ulist_add(qgroups
, glist
->group
->qgroupid
,
1623 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1626 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1627 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1635 /* Add refcnt for the newly added reference. */
1636 static int qgroup_calc_new_refcnt(struct btrfs_fs_info
*fs_info
,
1637 struct btrfs_qgroup_operation
*oper
,
1638 struct btrfs_qgroup
*qgroup
,
1639 struct ulist
*tmp
, struct ulist
*qgroups
,
1642 struct ulist_node
*unode
;
1643 struct ulist_iterator uiter
;
1644 struct btrfs_qgroup
*qg
;
1648 ret
= ulist_add(qgroups
, qgroup
->qgroupid
, ptr_to_u64(qgroup
),
1652 ret
= ulist_add(tmp
, qgroup
->qgroupid
, ptr_to_u64(qgroup
),
1656 ULIST_ITER_INIT(&uiter
);
1657 while ((unode
= ulist_next(tmp
, &uiter
))) {
1658 struct btrfs_qgroup_list
*glist
;
1660 qg
= u64_to_ptr(unode
->aux
);
1661 if (oper
->type
== BTRFS_QGROUP_OPER_ADD_SHARED
) {
1662 if (qg
->new_refcnt
< seq
)
1663 qg
->new_refcnt
= seq
+ 1;
1667 if (qg
->old_refcnt
< seq
)
1668 qg
->old_refcnt
= seq
+ 1;
1672 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1673 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1674 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1677 ret
= ulist_add(qgroups
, glist
->group
->qgroupid
,
1678 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1687 * This adjusts the counters for all referenced qgroups if need be.
1689 static int qgroup_adjust_counters(struct btrfs_fs_info
*fs_info
,
1690 u64 root_to_skip
, u64 num_bytes
,
1691 struct ulist
*qgroups
, u64 seq
,
1692 int old_roots
, int new_roots
, int rescan
)
1694 struct ulist_node
*unode
;
1695 struct ulist_iterator uiter
;
1696 struct btrfs_qgroup
*qg
;
1697 u64 cur_new_count
, cur_old_count
;
1699 ULIST_ITER_INIT(&uiter
);
1700 while ((unode
= ulist_next(qgroups
, &uiter
))) {
1703 qg
= u64_to_ptr(unode
->aux
);
1705 * Wasn't referenced before but is now, add to the reference
1708 if (qg
->old_refcnt
<= seq
&& qg
->new_refcnt
> seq
) {
1709 qg
->rfer
+= num_bytes
;
1710 qg
->rfer_cmpr
+= num_bytes
;
1715 * Was referenced before but isn't now, subtract from the
1716 * reference counters.
1718 if (qg
->old_refcnt
> seq
&& qg
->new_refcnt
<= seq
) {
1719 qg
->rfer
-= num_bytes
;
1720 qg
->rfer_cmpr
-= num_bytes
;
1724 if (qg
->old_refcnt
< seq
)
1727 cur_old_count
= qg
->old_refcnt
- seq
;
1728 if (qg
->new_refcnt
< seq
)
1731 cur_new_count
= qg
->new_refcnt
- seq
;
1734 * If our refcount was the same as the roots previously but our
1735 * new count isn't the same as the number of roots now then we
1736 * went from having a exclusive reference on this range to not.
1738 if (old_roots
&& cur_old_count
== old_roots
&&
1739 (cur_new_count
!= new_roots
|| new_roots
== 0)) {
1740 WARN_ON(cur_new_count
!= new_roots
&& new_roots
== 0);
1741 qg
->excl
-= num_bytes
;
1742 qg
->excl_cmpr
-= num_bytes
;
1747 * If we didn't reference all the roots before but now we do we
1748 * have an exclusive reference to this range.
1750 if ((!old_roots
|| (old_roots
&& cur_old_count
!= old_roots
))
1751 && cur_new_count
== new_roots
) {
1752 qg
->excl
+= num_bytes
;
1753 qg
->excl_cmpr
+= num_bytes
;
1758 qgroup_dirty(fs_info
, qg
);
1764 * If we removed a data extent and there were other references for that bytenr
1765 * then we need to lookup all referenced roots to make sure we still don't
1766 * reference this bytenr. If we do then we can just discard this operation.
1768 static int check_existing_refs(struct btrfs_trans_handle
*trans
,
1769 struct btrfs_fs_info
*fs_info
,
1770 struct btrfs_qgroup_operation
*oper
)
1772 struct ulist
*roots
= NULL
;
1773 struct ulist_node
*unode
;
1774 struct ulist_iterator uiter
;
1777 ret
= btrfs_find_all_roots(trans
, fs_info
, oper
->bytenr
,
1778 oper
->elem
.seq
, &roots
);
1783 ULIST_ITER_INIT(&uiter
);
1784 while ((unode
= ulist_next(roots
, &uiter
))) {
1785 if (unode
->val
== oper
->ref_root
) {
1791 btrfs_put_tree_mod_seq(fs_info
, &oper
->elem
);
1797 * If we share a reference across multiple roots then we may need to adjust
1798 * various qgroups referenced and exclusive counters. The basic premise is this
1800 * 1) We have seq to represent a 0 count. Instead of looping through all of the
1801 * qgroups and resetting their refcount to 0 we just constantly bump this
1802 * sequence number to act as the base reference count. This means that if
1803 * anybody is equal to or below this sequence they were never referenced. We
1804 * jack this sequence up by the number of roots we found each time in order to
1805 * make sure we don't have any overlap.
1807 * 2) We first search all the roots that reference the area _except_ the root
1808 * we're acting on currently. This makes up the old_refcnt of all the qgroups
1811 * 3) We walk all of the qgroups referenced by the root we are currently acting
1812 * on, and will either adjust old_refcnt in the case of a removal or the
1813 * new_refcnt in the case of an addition.
1815 * 4) Finally we walk all the qgroups that are referenced by this range
1816 * including the root we are acting on currently. We will adjust the counters
1817 * based on the number of roots we had and will have after this operation.
1819 * Take this example as an illustration
1823 * [qg 0/0] [qg 0/1] [qg 0/2]
1827 * Say we are adding a reference that is covered by qg 0/0. The first step
1828 * would give a refcnt of 1 to qg 0/1 and 0/2 and a refcnt of 2 to qg 1/0 with
1829 * old_roots being 2. Because it is adding new_roots will be 1. We then go
1830 * through qg 0/0 which will get the new_refcnt set to 1 and add 1 to qg 1/0's
1831 * new_refcnt, bringing it to 3. We then walk through all of the qgroups, we
1832 * notice that the old refcnt for qg 0/0 < the new refcnt, so we added a
1833 * reference and thus must add the size to the referenced bytes. Everything
1834 * else is the same so nothing else changes.
1836 static int qgroup_shared_accounting(struct btrfs_trans_handle
*trans
,
1837 struct btrfs_fs_info
*fs_info
,
1838 struct btrfs_qgroup_operation
*oper
)
1840 struct ulist
*roots
= NULL
;
1841 struct ulist
*qgroups
, *tmp
;
1842 struct btrfs_qgroup
*qgroup
;
1843 struct seq_list elem
= {};
1849 if (oper
->elem
.seq
) {
1850 ret
= check_existing_refs(trans
, fs_info
, oper
);
1857 qgroups
= ulist_alloc(GFP_NOFS
);
1861 tmp
= ulist_alloc(GFP_NOFS
);
1863 ulist_free(qgroups
);
1867 btrfs_get_tree_mod_seq(fs_info
, &elem
);
1868 ret
= btrfs_find_all_roots(trans
, fs_info
, oper
->bytenr
, elem
.seq
,
1870 btrfs_put_tree_mod_seq(fs_info
, &elem
);
1872 ulist_free(qgroups
);
1876 spin_lock(&fs_info
->qgroup_lock
);
1877 qgroup
= find_qgroup_rb(fs_info
, oper
->ref_root
);
1880 seq
= fs_info
->qgroup_seq
;
1883 * So roots is the list of all the roots currently pointing at the
1884 * bytenr, including the ref we are adding if we are adding, or not if
1885 * we are removing a ref. So we pass in the ref_root to skip that root
1886 * in our calculations. We set old_refnct and new_refcnt cause who the
1887 * hell knows what everything looked like before, and it doesn't matter
1890 ret
= qgroup_calc_old_refcnt(fs_info
, oper
->ref_root
, tmp
, roots
, qgroups
,
1891 seq
, &old_roots
, 0);
1896 * Now adjust the refcounts of the qgroups that care about this
1897 * reference, either the old_count in the case of removal or new_count
1898 * in the case of an addition.
1900 ret
= qgroup_calc_new_refcnt(fs_info
, oper
, qgroup
, tmp
, qgroups
,
1906 * ...in the case of removals. If we had a removal before we got around
1907 * to processing this operation then we need to find that guy and count
1908 * his references as if they really existed so we don't end up screwing
1909 * up the exclusive counts. Then whenever we go to process the delete
1910 * everything will be grand and we can account for whatever exclusive
1911 * changes need to be made there. We also have to pass in old_roots so
1912 * we have an accurate count of the roots as it pertains to this
1913 * operations view of the world.
1915 ret
= qgroup_account_deleted_refs(fs_info
, oper
, tmp
, qgroups
, seq
,
1921 * We are adding our root, need to adjust up the number of roots,
1922 * otherwise old_roots is the number of roots we want.
1924 if (oper
->type
== BTRFS_QGROUP_OPER_ADD_SHARED
) {
1925 new_roots
= old_roots
+ 1;
1927 new_roots
= old_roots
;
1930 fs_info
->qgroup_seq
+= old_roots
+ 1;
1934 * And now the magic happens, bless Arne for having a pretty elegant
1935 * solution for this.
1937 qgroup_adjust_counters(fs_info
, oper
->ref_root
, oper
->num_bytes
,
1938 qgroups
, seq
, old_roots
, new_roots
, 0);
1940 spin_unlock(&fs_info
->qgroup_lock
);
1941 ulist_free(qgroups
);
1948 * Process a reference to a shared subtree. This type of operation is
1949 * queued during snapshot removal when we encounter extents which are
1950 * shared between more than one root.
1952 static int qgroup_subtree_accounting(struct btrfs_trans_handle
*trans
,
1953 struct btrfs_fs_info
*fs_info
,
1954 struct btrfs_qgroup_operation
*oper
)
1956 struct ulist
*roots
= NULL
;
1957 struct ulist_node
*unode
;
1958 struct ulist_iterator uiter
;
1959 struct btrfs_qgroup_list
*glist
;
1960 struct ulist
*parents
;
1963 struct btrfs_qgroup
*qg
;
1965 struct seq_list elem
= {};
1967 parents
= ulist_alloc(GFP_NOFS
);
1971 btrfs_get_tree_mod_seq(fs_info
, &elem
);
1972 ret
= btrfs_find_all_roots(trans
, fs_info
, oper
->bytenr
,
1974 btrfs_put_tree_mod_seq(fs_info
, &elem
);
1978 if (roots
->nnodes
!= 1)
1981 ULIST_ITER_INIT(&uiter
);
1982 unode
= ulist_next(roots
, &uiter
); /* Only want 1 so no need to loop */
1984 * If we find our ref root then that means all refs
1985 * this extent has to the root have not yet been
1986 * deleted. In that case, we do nothing and let the
1987 * last ref for this bytenr drive our update.
1989 * This can happen for example if an extent is
1990 * referenced multiple times in a snapshot (clone,
1991 * etc). If we are in the middle of snapshot removal,
1992 * queued updates for such an extent will find the
1993 * root if we have not yet finished removing the
1996 if (unode
->val
== oper
->ref_root
)
1999 root_obj
= unode
->val
;
2002 spin_lock(&fs_info
->qgroup_lock
);
2003 qg
= find_qgroup_rb(fs_info
, root_obj
);
2007 qg
->excl
+= oper
->num_bytes
;
2008 qg
->excl_cmpr
+= oper
->num_bytes
;
2009 qgroup_dirty(fs_info
, qg
);
2012 * Adjust counts for parent groups. First we find all
2013 * parents, then in the 2nd loop we do the adjustment
2014 * while adding parents of the parents to our ulist.
2016 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2017 err
= ulist_add(parents
, glist
->group
->qgroupid
,
2018 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
2025 ULIST_ITER_INIT(&uiter
);
2026 while ((unode
= ulist_next(parents
, &uiter
))) {
2027 qg
= u64_to_ptr(unode
->aux
);
2028 qg
->excl
+= oper
->num_bytes
;
2029 qg
->excl_cmpr
+= oper
->num_bytes
;
2030 qgroup_dirty(fs_info
, qg
);
2032 /* Add any parents of the parents */
2033 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2034 err
= ulist_add(parents
, glist
->group
->qgroupid
,
2035 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
2044 spin_unlock(&fs_info
->qgroup_lock
);
2048 ulist_free(parents
);
2053 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
2054 * from the fs. First, all roots referencing the extent are searched, and
2055 * then the space is accounted accordingly to the different roots. The
2056 * accounting algorithm works in 3 steps documented inline.
2058 static int btrfs_qgroup_account(struct btrfs_trans_handle
*trans
,
2059 struct btrfs_fs_info
*fs_info
,
2060 struct btrfs_qgroup_operation
*oper
)
2064 if (!fs_info
->quota_enabled
)
2067 BUG_ON(!fs_info
->quota_root
);
2069 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2070 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
2071 if (fs_info
->qgroup_rescan_progress
.objectid
<= oper
->bytenr
) {
2072 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2076 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2078 ASSERT(is_fstree(oper
->ref_root
));
2080 switch (oper
->type
) {
2081 case BTRFS_QGROUP_OPER_ADD_EXCL
:
2082 case BTRFS_QGROUP_OPER_SUB_EXCL
:
2083 ret
= qgroup_excl_accounting(fs_info
, oper
);
2085 case BTRFS_QGROUP_OPER_ADD_SHARED
:
2086 case BTRFS_QGROUP_OPER_SUB_SHARED
:
2087 ret
= qgroup_shared_accounting(trans
, fs_info
, oper
);
2089 case BTRFS_QGROUP_OPER_SUB_SUBTREE
:
2090 ret
= qgroup_subtree_accounting(trans
, fs_info
, oper
);
2099 * Needs to be called everytime we run delayed refs, even if there is an error
2100 * in order to cleanup outstanding operations.
2102 int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle
*trans
,
2103 struct btrfs_fs_info
*fs_info
)
2105 struct btrfs_qgroup_operation
*oper
;
2108 while (!list_empty(&trans
->qgroup_ref_list
)) {
2109 oper
= list_first_entry(&trans
->qgroup_ref_list
,
2110 struct btrfs_qgroup_operation
, list
);
2111 list_del_init(&oper
->list
);
2112 if (!ret
|| !trans
->aborted
)
2113 ret
= btrfs_qgroup_account(trans
, fs_info
, oper
);
2114 spin_lock(&fs_info
->qgroup_op_lock
);
2115 rb_erase(&oper
->n
, &fs_info
->qgroup_op_tree
);
2116 spin_unlock(&fs_info
->qgroup_op_lock
);
2117 btrfs_put_tree_mod_seq(fs_info
, &oper
->elem
);
2124 * called from commit_transaction. Writes all changed qgroups to disk.
2126 int btrfs_run_qgroups(struct btrfs_trans_handle
*trans
,
2127 struct btrfs_fs_info
*fs_info
)
2129 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
2131 int start_rescan_worker
= 0;
2136 if (!fs_info
->quota_enabled
&& fs_info
->pending_quota_state
)
2137 start_rescan_worker
= 1;
2139 fs_info
->quota_enabled
= fs_info
->pending_quota_state
;
2141 spin_lock(&fs_info
->qgroup_lock
);
2142 while (!list_empty(&fs_info
->dirty_qgroups
)) {
2143 struct btrfs_qgroup
*qgroup
;
2144 qgroup
= list_first_entry(&fs_info
->dirty_qgroups
,
2145 struct btrfs_qgroup
, dirty
);
2146 list_del_init(&qgroup
->dirty
);
2147 spin_unlock(&fs_info
->qgroup_lock
);
2148 ret
= update_qgroup_info_item(trans
, quota_root
, qgroup
);
2150 fs_info
->qgroup_flags
|=
2151 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2152 spin_lock(&fs_info
->qgroup_lock
);
2154 if (fs_info
->quota_enabled
)
2155 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_ON
;
2157 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
2158 spin_unlock(&fs_info
->qgroup_lock
);
2160 ret
= update_qgroup_status_item(trans
, fs_info
, quota_root
);
2162 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2164 if (!ret
&& start_rescan_worker
) {
2165 ret
= qgroup_rescan_init(fs_info
, 0, 1);
2167 qgroup_rescan_zero_tracking(fs_info
);
2168 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
2169 &fs_info
->qgroup_rescan_work
);
2180 * copy the acounting information between qgroups. This is necessary when a
2181 * snapshot or a subvolume is created
2183 int btrfs_qgroup_inherit(struct btrfs_trans_handle
*trans
,
2184 struct btrfs_fs_info
*fs_info
, u64 srcid
, u64 objectid
,
2185 struct btrfs_qgroup_inherit
*inherit
)
2190 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
2191 struct btrfs_qgroup
*srcgroup
;
2192 struct btrfs_qgroup
*dstgroup
;
2196 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
2197 if (!fs_info
->quota_enabled
)
2206 i_qgroups
= (u64
*)(inherit
+ 1);
2207 nums
= inherit
->num_qgroups
+ 2 * inherit
->num_ref_copies
+
2208 2 * inherit
->num_excl_copies
;
2209 for (i
= 0; i
< nums
; ++i
) {
2210 srcgroup
= find_qgroup_rb(fs_info
, *i_qgroups
);
2220 * create a tracking group for the subvol itself
2222 ret
= add_qgroup_item(trans
, quota_root
, objectid
);
2226 if (inherit
&& inherit
->flags
& BTRFS_QGROUP_INHERIT_SET_LIMITS
) {
2227 ret
= update_qgroup_limit_item(trans
, quota_root
, objectid
,
2229 inherit
->lim
.max_rfer
,
2230 inherit
->lim
.max_excl
,
2231 inherit
->lim
.rsv_rfer
,
2232 inherit
->lim
.rsv_excl
);
2238 struct btrfs_root
*srcroot
;
2239 struct btrfs_key srckey
;
2242 srckey
.objectid
= srcid
;
2243 srckey
.type
= BTRFS_ROOT_ITEM_KEY
;
2244 srckey
.offset
= (u64
)-1;
2245 srcroot
= btrfs_read_fs_root_no_name(fs_info
, &srckey
);
2246 if (IS_ERR(srcroot
)) {
2247 ret
= PTR_ERR(srcroot
);
2252 srcroot_level
= btrfs_header_level(srcroot
->node
);
2253 level_size
= btrfs_level_size(srcroot
, srcroot_level
);
2258 * add qgroup to all inherited groups
2261 i_qgroups
= (u64
*)(inherit
+ 1);
2262 for (i
= 0; i
< inherit
->num_qgroups
; ++i
) {
2263 ret
= add_qgroup_relation_item(trans
, quota_root
,
2264 objectid
, *i_qgroups
);
2267 ret
= add_qgroup_relation_item(trans
, quota_root
,
2268 *i_qgroups
, objectid
);
2276 spin_lock(&fs_info
->qgroup_lock
);
2278 dstgroup
= add_qgroup_rb(fs_info
, objectid
);
2279 if (IS_ERR(dstgroup
)) {
2280 ret
= PTR_ERR(dstgroup
);
2285 srcgroup
= find_qgroup_rb(fs_info
, srcid
);
2290 * We call inherit after we clone the root in order to make sure
2291 * our counts don't go crazy, so at this point the only
2292 * difference between the two roots should be the root node.
2294 dstgroup
->rfer
= srcgroup
->rfer
;
2295 dstgroup
->rfer_cmpr
= srcgroup
->rfer_cmpr
;
2296 dstgroup
->excl
= level_size
;
2297 dstgroup
->excl_cmpr
= level_size
;
2298 srcgroup
->excl
= level_size
;
2299 srcgroup
->excl_cmpr
= level_size
;
2300 qgroup_dirty(fs_info
, dstgroup
);
2301 qgroup_dirty(fs_info
, srcgroup
);
2307 i_qgroups
= (u64
*)(inherit
+ 1);
2308 for (i
= 0; i
< inherit
->num_qgroups
; ++i
) {
2309 ret
= add_relation_rb(quota_root
->fs_info
, objectid
,
2316 for (i
= 0; i
< inherit
->num_ref_copies
; ++i
) {
2317 struct btrfs_qgroup
*src
;
2318 struct btrfs_qgroup
*dst
;
2320 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2321 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2328 dst
->rfer
= src
->rfer
- level_size
;
2329 dst
->rfer_cmpr
= src
->rfer_cmpr
- level_size
;
2332 for (i
= 0; i
< inherit
->num_excl_copies
; ++i
) {
2333 struct btrfs_qgroup
*src
;
2334 struct btrfs_qgroup
*dst
;
2336 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2337 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2344 dst
->excl
= src
->excl
+ level_size
;
2345 dst
->excl_cmpr
= src
->excl_cmpr
+ level_size
;
2350 spin_unlock(&fs_info
->qgroup_lock
);
2352 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
2357 * reserve some space for a qgroup and all its parents. The reservation takes
2358 * place with start_transaction or dealloc_reserve, similar to ENOSPC
2359 * accounting. If not enough space is available, EDQUOT is returned.
2360 * We assume that the requested space is new for all qgroups.
2362 int btrfs_qgroup_reserve(struct btrfs_root
*root
, u64 num_bytes
)
2364 struct btrfs_root
*quota_root
;
2365 struct btrfs_qgroup
*qgroup
;
2366 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2367 u64 ref_root
= root
->root_key
.objectid
;
2369 struct ulist_node
*unode
;
2370 struct ulist_iterator uiter
;
2372 if (!is_fstree(ref_root
))
2378 spin_lock(&fs_info
->qgroup_lock
);
2379 quota_root
= fs_info
->quota_root
;
2383 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
2388 * in a first step, we check all affected qgroups if any limits would
2391 ulist_reinit(fs_info
->qgroup_ulist
);
2392 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
2393 (uintptr_t)qgroup
, GFP_ATOMIC
);
2396 ULIST_ITER_INIT(&uiter
);
2397 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2398 struct btrfs_qgroup
*qg
;
2399 struct btrfs_qgroup_list
*glist
;
2401 qg
= u64_to_ptr(unode
->aux
);
2403 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) &&
2404 qg
->reserved
+ (s64
)qg
->rfer
+ num_bytes
>
2410 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) &&
2411 qg
->reserved
+ (s64
)qg
->excl
+ num_bytes
>
2417 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2418 ret
= ulist_add(fs_info
->qgroup_ulist
,
2419 glist
->group
->qgroupid
,
2420 (uintptr_t)glist
->group
, GFP_ATOMIC
);
2427 * no limits exceeded, now record the reservation into all qgroups
2429 ULIST_ITER_INIT(&uiter
);
2430 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2431 struct btrfs_qgroup
*qg
;
2433 qg
= u64_to_ptr(unode
->aux
);
2435 qg
->reserved
+= num_bytes
;
2439 spin_unlock(&fs_info
->qgroup_lock
);
2443 void btrfs_qgroup_free(struct btrfs_root
*root
, u64 num_bytes
)
2445 struct btrfs_root
*quota_root
;
2446 struct btrfs_qgroup
*qgroup
;
2447 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2448 struct ulist_node
*unode
;
2449 struct ulist_iterator uiter
;
2450 u64 ref_root
= root
->root_key
.objectid
;
2453 if (!is_fstree(ref_root
))
2459 spin_lock(&fs_info
->qgroup_lock
);
2461 quota_root
= fs_info
->quota_root
;
2465 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
2469 ulist_reinit(fs_info
->qgroup_ulist
);
2470 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
2471 (uintptr_t)qgroup
, GFP_ATOMIC
);
2474 ULIST_ITER_INIT(&uiter
);
2475 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2476 struct btrfs_qgroup
*qg
;
2477 struct btrfs_qgroup_list
*glist
;
2479 qg
= u64_to_ptr(unode
->aux
);
2481 qg
->reserved
-= num_bytes
;
2483 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2484 ret
= ulist_add(fs_info
->qgroup_ulist
,
2485 glist
->group
->qgroupid
,
2486 (uintptr_t)glist
->group
, GFP_ATOMIC
);
2493 spin_unlock(&fs_info
->qgroup_lock
);
2496 void assert_qgroups_uptodate(struct btrfs_trans_handle
*trans
)
2498 if (list_empty(&trans
->qgroup_ref_list
) && !trans
->delayed_ref_elem
.seq
)
2500 btrfs_err(trans
->root
->fs_info
,
2501 "qgroups not uptodate in trans handle %p: list is%s empty, "
2503 trans
, list_empty(&trans
->qgroup_ref_list
) ? "" : " not",
2504 (u32
)(trans
->delayed_ref_elem
.seq
>> 32),
2505 (u32
)trans
->delayed_ref_elem
.seq
);
2510 * returns < 0 on error, 0 when more leafs are to be scanned.
2511 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
2514 qgroup_rescan_leaf(struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
2515 struct btrfs_trans_handle
*trans
, struct ulist
*qgroups
,
2516 struct ulist
*tmp
, struct extent_buffer
*scratch_leaf
)
2518 struct btrfs_key found
;
2519 struct ulist
*roots
= NULL
;
2520 struct seq_list tree_mod_seq_elem
= {};
2527 path
->leave_spinning
= 1;
2528 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2529 ret
= btrfs_search_slot_for_read(fs_info
->extent_root
,
2530 &fs_info
->qgroup_rescan_progress
,
2533 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
2534 fs_info
->qgroup_rescan_progress
.objectid
,
2535 fs_info
->qgroup_rescan_progress
.type
,
2536 fs_info
->qgroup_rescan_progress
.offset
, ret
);
2540 * The rescan is about to end, we will not be scanning any
2541 * further blocks. We cannot unset the RESCAN flag here, because
2542 * we want to commit the transaction if everything went well.
2543 * To make the live accounting work in this phase, we set our
2544 * scan progress pointer such that every real extent objectid
2547 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
2548 btrfs_release_path(path
);
2549 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2553 btrfs_item_key_to_cpu(path
->nodes
[0], &found
,
2554 btrfs_header_nritems(path
->nodes
[0]) - 1);
2555 fs_info
->qgroup_rescan_progress
.objectid
= found
.objectid
+ 1;
2557 btrfs_get_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
2558 memcpy(scratch_leaf
, path
->nodes
[0], sizeof(*scratch_leaf
));
2559 slot
= path
->slots
[0];
2560 btrfs_release_path(path
);
2561 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2563 for (; slot
< btrfs_header_nritems(scratch_leaf
); ++slot
) {
2564 btrfs_item_key_to_cpu(scratch_leaf
, &found
, slot
);
2565 if (found
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2566 found
.type
!= BTRFS_METADATA_ITEM_KEY
)
2568 if (found
.type
== BTRFS_METADATA_ITEM_KEY
)
2569 num_bytes
= fs_info
->extent_root
->leafsize
;
2571 num_bytes
= found
.offset
;
2573 ulist_reinit(qgroups
);
2574 ret
= btrfs_find_all_roots(NULL
, fs_info
, found
.objectid
, 0,
2578 spin_lock(&fs_info
->qgroup_lock
);
2579 seq
= fs_info
->qgroup_seq
;
2580 fs_info
->qgroup_seq
+= roots
->nnodes
+ 1; /* max refcnt */
2583 ret
= qgroup_calc_old_refcnt(fs_info
, 0, tmp
, roots
, qgroups
,
2584 seq
, &new_roots
, 1);
2586 spin_unlock(&fs_info
->qgroup_lock
);
2591 ret
= qgroup_adjust_counters(fs_info
, 0, num_bytes
, qgroups
,
2592 seq
, 0, new_roots
, 1);
2594 spin_unlock(&fs_info
->qgroup_lock
);
2598 spin_unlock(&fs_info
->qgroup_lock
);
2602 btrfs_put_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
2607 static void btrfs_qgroup_rescan_worker(struct btrfs_work
*work
)
2609 struct btrfs_fs_info
*fs_info
= container_of(work
, struct btrfs_fs_info
,
2610 qgroup_rescan_work
);
2611 struct btrfs_path
*path
;
2612 struct btrfs_trans_handle
*trans
= NULL
;
2613 struct ulist
*tmp
= NULL
, *qgroups
= NULL
;
2614 struct extent_buffer
*scratch_leaf
= NULL
;
2617 path
= btrfs_alloc_path();
2620 qgroups
= ulist_alloc(GFP_NOFS
);
2623 tmp
= ulist_alloc(GFP_NOFS
);
2626 scratch_leaf
= kmalloc(sizeof(*scratch_leaf
), GFP_NOFS
);
2632 trans
= btrfs_start_transaction(fs_info
->fs_root
, 0);
2633 if (IS_ERR(trans
)) {
2634 err
= PTR_ERR(trans
);
2637 if (!fs_info
->quota_enabled
) {
2640 err
= qgroup_rescan_leaf(fs_info
, path
, trans
,
2641 qgroups
, tmp
, scratch_leaf
);
2644 btrfs_commit_transaction(trans
, fs_info
->fs_root
);
2646 btrfs_end_transaction(trans
, fs_info
->fs_root
);
2650 kfree(scratch_leaf
);
2651 ulist_free(qgroups
);
2653 btrfs_free_path(path
);
2655 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2656 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2659 fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
) {
2660 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2661 } else if (err
< 0) {
2662 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2664 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2667 btrfs_info(fs_info
, "qgroup scan completed%s",
2668 err
== 2 ? " (inconsistency flag cleared)" : "");
2670 btrfs_err(fs_info
, "qgroup scan failed with %d", err
);
2673 complete_all(&fs_info
->qgroup_rescan_completion
);
2677 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2678 * memory required for the rescan context.
2681 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
2687 (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) ||
2688 !(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))) {
2693 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2694 spin_lock(&fs_info
->qgroup_lock
);
2697 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
)
2699 else if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))
2703 spin_unlock(&fs_info
->qgroup_lock
);
2704 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2708 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2711 memset(&fs_info
->qgroup_rescan_progress
, 0,
2712 sizeof(fs_info
->qgroup_rescan_progress
));
2713 fs_info
->qgroup_rescan_progress
.objectid
= progress_objectid
;
2715 spin_unlock(&fs_info
->qgroup_lock
);
2716 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2718 init_completion(&fs_info
->qgroup_rescan_completion
);
2720 memset(&fs_info
->qgroup_rescan_work
, 0,
2721 sizeof(fs_info
->qgroup_rescan_work
));
2722 btrfs_init_work(&fs_info
->qgroup_rescan_work
,
2723 btrfs_qgroup_rescan_helper
,
2724 btrfs_qgroup_rescan_worker
, NULL
, NULL
);
2728 btrfs_info(fs_info
, "qgroup_rescan_init failed with %d", ret
);
2736 qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
)
2739 struct btrfs_qgroup
*qgroup
;
2741 spin_lock(&fs_info
->qgroup_lock
);
2742 /* clear all current qgroup tracking information */
2743 for (n
= rb_first(&fs_info
->qgroup_tree
); n
; n
= rb_next(n
)) {
2744 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
2746 qgroup
->rfer_cmpr
= 0;
2748 qgroup
->excl_cmpr
= 0;
2750 spin_unlock(&fs_info
->qgroup_lock
);
2754 btrfs_qgroup_rescan(struct btrfs_fs_info
*fs_info
)
2757 struct btrfs_trans_handle
*trans
;
2759 ret
= qgroup_rescan_init(fs_info
, 0, 1);
2764 * We have set the rescan_progress to 0, which means no more
2765 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2766 * However, btrfs_qgroup_account_ref may be right after its call
2767 * to btrfs_find_all_roots, in which case it would still do the
2769 * To solve this, we're committing the transaction, which will
2770 * ensure we run all delayed refs and only after that, we are
2771 * going to clear all tracking information for a clean start.
2774 trans
= btrfs_join_transaction(fs_info
->fs_root
);
2775 if (IS_ERR(trans
)) {
2776 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2777 return PTR_ERR(trans
);
2779 ret
= btrfs_commit_transaction(trans
, fs_info
->fs_root
);
2781 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2785 qgroup_rescan_zero_tracking(fs_info
);
2787 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
2788 &fs_info
->qgroup_rescan_work
);
2793 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info
*fs_info
)
2798 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2799 spin_lock(&fs_info
->qgroup_lock
);
2800 running
= fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2801 spin_unlock(&fs_info
->qgroup_lock
);
2802 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2805 ret
= wait_for_completion_interruptible(
2806 &fs_info
->qgroup_rescan_completion
);
2812 * this is only called from open_ctree where we're still single threaded, thus
2813 * locking is omitted here.
2816 btrfs_qgroup_rescan_resume(struct btrfs_fs_info
*fs_info
)
2818 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
)
2819 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
2820 &fs_info
->qgroup_rescan_work
);