1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_error.h"
27 #include "xfs_ialloc.h"
30 * The global quota manager. There is only one of these for the entire
31 * system, _not_ one per file system. XQM keeps track of the overall
32 * quota functionality, including maintaining the freelist and hash
35 STATIC
int xfs_qm_init_quotainos(struct xfs_mount
*mp
);
36 STATIC
int xfs_qm_init_quotainfo(struct xfs_mount
*mp
);
38 STATIC
void xfs_qm_destroy_quotainos(struct xfs_quotainfo
*qi
);
39 STATIC
void xfs_qm_dqfree_one(struct xfs_dquot
*dqp
);
41 * We use the batch lookup interface to iterate over the dquots as it
42 * currently is the only interface into the radix tree code that allows
43 * fuzzy lookups instead of exact matches. Holding the lock over multiple
44 * operations is fine as all callers are used either during mount/umount
47 #define XFS_DQ_LOOKUP_BATCH 32
53 int (*execute
)(struct xfs_dquot
*dqp
, void *data
),
56 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
57 struct radix_tree_root
*tree
= xfs_dquot_tree(qi
, type
);
69 struct xfs_dquot
*batch
[XFS_DQ_LOOKUP_BATCH
];
73 mutex_lock(&qi
->qi_tree_lock
);
74 nr_found
= radix_tree_gang_lookup(tree
, (void **)batch
,
75 next_index
, XFS_DQ_LOOKUP_BATCH
);
77 mutex_unlock(&qi
->qi_tree_lock
);
81 for (i
= 0; i
< nr_found
; i
++) {
82 struct xfs_dquot
*dqp
= batch
[i
];
84 next_index
= dqp
->q_id
+ 1;
86 error
= execute(batch
[i
], data
);
87 if (error
== -EAGAIN
) {
91 if (error
&& last_error
!= -EFSCORRUPTED
)
95 mutex_unlock(&qi
->qi_tree_lock
);
97 /* bail out if the filesystem is corrupted. */
98 if (last_error
== -EFSCORRUPTED
) {
102 /* we're done if id overflows back to zero */
117 * Purge a dquot from all tracking data structures and free it.
121 struct xfs_dquot
*dqp
,
124 struct xfs_mount
*mp
= dqp
->q_mount
;
125 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
129 if ((dqp
->q_flags
& XFS_DQFLAG_FREEING
) || dqp
->q_nrefs
!= 0)
132 dqp
->q_flags
|= XFS_DQFLAG_FREEING
;
137 * If we are turning this type of quotas off, we don't care
138 * about the dirty metadata sitting in this dquot. OTOH, if
139 * we're unmounting, we do care, so we flush it and wait.
141 if (XFS_DQ_IS_DIRTY(dqp
)) {
142 struct xfs_buf
*bp
= NULL
;
145 * We don't care about getting disk errors here. We need
146 * to purge this dquot anyway, so we go ahead regardless.
148 error
= xfs_qm_dqflush(dqp
, &bp
);
150 error
= xfs_bwrite(bp
);
152 } else if (error
== -EAGAIN
) {
153 dqp
->q_flags
&= ~XFS_DQFLAG_FREEING
;
159 ASSERT(atomic_read(&dqp
->q_pincount
) == 0);
160 ASSERT(XFS_FORCED_SHUTDOWN(mp
) ||
161 !test_bit(XFS_LI_IN_AIL
, &dqp
->q_logitem
.qli_item
.li_flags
));
166 radix_tree_delete(xfs_dquot_tree(qi
, xfs_dquot_type(dqp
)), dqp
->q_id
);
170 * We move dquots to the freelist as soon as their reference count
171 * hits zero, so it really should be on the freelist here.
173 ASSERT(!list_empty(&dqp
->q_lru
));
174 list_lru_del(&qi
->qi_lru
, &dqp
->q_lru
);
175 XFS_STATS_DEC(mp
, xs_qm_dquot_unused
);
177 xfs_qm_dqdestroy(dqp
);
186 * Purge the dquot cache.
190 struct xfs_mount
*mp
,
193 if (flags
& XFS_QMOPT_UQUOTA
)
194 xfs_qm_dquot_walk(mp
, XFS_DQTYPE_USER
, xfs_qm_dqpurge
, NULL
);
195 if (flags
& XFS_QMOPT_GQUOTA
)
196 xfs_qm_dquot_walk(mp
, XFS_DQTYPE_GROUP
, xfs_qm_dqpurge
, NULL
);
197 if (flags
& XFS_QMOPT_PQUOTA
)
198 xfs_qm_dquot_walk(mp
, XFS_DQTYPE_PROJ
, xfs_qm_dqpurge
, NULL
);
202 * Just destroy the quotainfo structure.
206 struct xfs_mount
*mp
)
208 if (mp
->m_quotainfo
) {
209 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
);
210 xfs_qm_destroy_quotainfo(mp
);
215 * Called from the vfsops layer.
218 xfs_qm_unmount_quotas(
222 * Release the dquots that root inode, et al might be holding,
223 * before we flush quotas and blow away the quotainfo structure.
225 ASSERT(mp
->m_rootip
);
226 xfs_qm_dqdetach(mp
->m_rootip
);
228 xfs_qm_dqdetach(mp
->m_rbmip
);
230 xfs_qm_dqdetach(mp
->m_rsumip
);
233 * Release the quota inodes.
235 if (mp
->m_quotainfo
) {
236 if (mp
->m_quotainfo
->qi_uquotaip
) {
237 xfs_irele(mp
->m_quotainfo
->qi_uquotaip
);
238 mp
->m_quotainfo
->qi_uquotaip
= NULL
;
240 if (mp
->m_quotainfo
->qi_gquotaip
) {
241 xfs_irele(mp
->m_quotainfo
->qi_gquotaip
);
242 mp
->m_quotainfo
->qi_gquotaip
= NULL
;
244 if (mp
->m_quotainfo
->qi_pquotaip
) {
245 xfs_irele(mp
->m_quotainfo
->qi_pquotaip
);
246 mp
->m_quotainfo
->qi_pquotaip
= NULL
;
253 struct xfs_inode
*ip
,
256 struct xfs_dquot
**IO_idqpp
)
258 struct xfs_dquot
*dqp
;
261 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
265 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
266 * or &i_gdquot. This made the code look weird, but made the logic a lot
271 trace_xfs_dqattach_found(dqp
);
276 * Find the dquot from somewhere. This bumps the reference count of
277 * dquot and returns it locked. This can return ENOENT if dquot didn't
278 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
279 * turned off suddenly.
281 error
= xfs_qm_dqget_inode(ip
, type
, doalloc
, &dqp
);
285 trace_xfs_dqattach_get(dqp
);
288 * dqget may have dropped and re-acquired the ilock, but it guarantees
289 * that the dquot returned is the one that should go in the inode.
297 xfs_qm_need_dqattach(
298 struct xfs_inode
*ip
)
300 struct xfs_mount
*mp
= ip
->i_mount
;
302 if (!XFS_IS_QUOTA_RUNNING(mp
))
304 if (!XFS_IS_QUOTA_ON(mp
))
306 if (!XFS_NOT_DQATTACHED(mp
, ip
))
308 if (xfs_is_quota_inode(&mp
->m_sb
, ip
->i_ino
))
314 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
316 * If @doalloc is true, the dquot(s) will be allocated if needed.
317 * Inode may get unlocked and relocked in here, and the caller must deal with
321 xfs_qm_dqattach_locked(
325 xfs_mount_t
*mp
= ip
->i_mount
;
328 if (!xfs_qm_need_dqattach(ip
))
331 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
333 if (XFS_IS_UQUOTA_ON(mp
) && !ip
->i_udquot
) {
334 error
= xfs_qm_dqattach_one(ip
, XFS_DQTYPE_USER
,
335 doalloc
, &ip
->i_udquot
);
338 ASSERT(ip
->i_udquot
);
341 if (XFS_IS_GQUOTA_ON(mp
) && !ip
->i_gdquot
) {
342 error
= xfs_qm_dqattach_one(ip
, XFS_DQTYPE_GROUP
,
343 doalloc
, &ip
->i_gdquot
);
346 ASSERT(ip
->i_gdquot
);
349 if (XFS_IS_PQUOTA_ON(mp
) && !ip
->i_pdquot
) {
350 error
= xfs_qm_dqattach_one(ip
, XFS_DQTYPE_PROJ
,
351 doalloc
, &ip
->i_pdquot
);
354 ASSERT(ip
->i_pdquot
);
359 * Don't worry about the dquots that we may have attached before any
360 * error - they'll get detached later if it has not already been done.
362 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
368 struct xfs_inode
*ip
)
372 if (!xfs_qm_need_dqattach(ip
))
375 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
376 error
= xfs_qm_dqattach_locked(ip
, false);
377 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
383 * Release dquots (and their references) if any.
384 * The inode should be locked EXCL except when this's called by
391 if (!(ip
->i_udquot
|| ip
->i_gdquot
|| ip
->i_pdquot
))
394 trace_xfs_dquot_dqdetach(ip
);
396 ASSERT(!xfs_is_quota_inode(&ip
->i_mount
->m_sb
, ip
->i_ino
));
398 xfs_qm_dqrele(ip
->i_udquot
);
402 xfs_qm_dqrele(ip
->i_gdquot
);
406 xfs_qm_dqrele(ip
->i_pdquot
);
411 struct xfs_qm_isolate
{
412 struct list_head buffers
;
413 struct list_head dispose
;
416 static enum lru_status
417 xfs_qm_dquot_isolate(
418 struct list_head
*item
,
419 struct list_lru_one
*lru
,
420 spinlock_t
*lru_lock
,
422 __releases(lru_lock
) __acquires(lru_lock
)
424 struct xfs_dquot
*dqp
= container_of(item
,
425 struct xfs_dquot
, q_lru
);
426 struct xfs_qm_isolate
*isol
= arg
;
428 if (!xfs_dqlock_nowait(dqp
))
432 * This dquot has acquired a reference in the meantime remove it from
433 * the freelist and try again.
437 XFS_STATS_INC(dqp
->q_mount
, xs_qm_dqwants
);
439 trace_xfs_dqreclaim_want(dqp
);
440 list_lru_isolate(lru
, &dqp
->q_lru
);
441 XFS_STATS_DEC(dqp
->q_mount
, xs_qm_dquot_unused
);
446 * If the dquot is dirty, flush it. If it's already being flushed, just
447 * skip it so there is time for the IO to complete before we try to
448 * reclaim it again on the next LRU pass.
450 if (!xfs_dqflock_nowait(dqp
)) {
455 if (XFS_DQ_IS_DIRTY(dqp
)) {
456 struct xfs_buf
*bp
= NULL
;
459 trace_xfs_dqreclaim_dirty(dqp
);
461 /* we have to drop the LRU lock to flush the dquot */
462 spin_unlock(lru_lock
);
464 error
= xfs_qm_dqflush(dqp
, &bp
);
466 goto out_unlock_dirty
;
468 xfs_buf_delwri_queue(bp
, &isol
->buffers
);
470 goto out_unlock_dirty
;
475 * Prevent lookups now that we are past the point of no return.
477 dqp
->q_flags
|= XFS_DQFLAG_FREEING
;
480 ASSERT(dqp
->q_nrefs
== 0);
481 list_lru_isolate_move(lru
, &dqp
->q_lru
, &isol
->dispose
);
482 XFS_STATS_DEC(dqp
->q_mount
, xs_qm_dquot_unused
);
483 trace_xfs_dqreclaim_done(dqp
);
484 XFS_STATS_INC(dqp
->q_mount
, xs_qm_dqreclaims
);
488 trace_xfs_dqreclaim_busy(dqp
);
489 XFS_STATS_INC(dqp
->q_mount
, xs_qm_dqreclaim_misses
);
493 trace_xfs_dqreclaim_busy(dqp
);
494 XFS_STATS_INC(dqp
->q_mount
, xs_qm_dqreclaim_misses
);
502 struct shrinker
*shrink
,
503 struct shrink_control
*sc
)
505 struct xfs_quotainfo
*qi
= container_of(shrink
,
506 struct xfs_quotainfo
, qi_shrinker
);
507 struct xfs_qm_isolate isol
;
511 if ((sc
->gfp_mask
& (__GFP_FS
|__GFP_DIRECT_RECLAIM
)) != (__GFP_FS
|__GFP_DIRECT_RECLAIM
))
514 INIT_LIST_HEAD(&isol
.buffers
);
515 INIT_LIST_HEAD(&isol
.dispose
);
517 freed
= list_lru_shrink_walk(&qi
->qi_lru
, sc
,
518 xfs_qm_dquot_isolate
, &isol
);
520 error
= xfs_buf_delwri_submit(&isol
.buffers
);
522 xfs_warn(NULL
, "%s: dquot reclaim failed", __func__
);
524 while (!list_empty(&isol
.dispose
)) {
525 struct xfs_dquot
*dqp
;
527 dqp
= list_first_entry(&isol
.dispose
, struct xfs_dquot
, q_lru
);
528 list_del_init(&dqp
->q_lru
);
529 xfs_qm_dqfree_one(dqp
);
537 struct shrinker
*shrink
,
538 struct shrink_control
*sc
)
540 struct xfs_quotainfo
*qi
= container_of(shrink
,
541 struct xfs_quotainfo
, qi_shrinker
);
543 return list_lru_shrink_count(&qi
->qi_lru
, sc
);
548 struct xfs_mount
*mp
,
550 struct xfs_quotainfo
*qinf
)
552 struct xfs_dquot
*dqp
;
553 struct xfs_def_quota
*defq
;
556 error
= xfs_qm_dqget_uncached(mp
, 0, type
, &dqp
);
560 defq
= xfs_get_defquota(qinf
, xfs_dquot_type(dqp
));
563 * Timers and warnings have been already set, let's just set the
564 * default limits for this quota type
566 defq
->blk
.hard
= dqp
->q_blk
.hardlimit
;
567 defq
->blk
.soft
= dqp
->q_blk
.softlimit
;
568 defq
->ino
.hard
= dqp
->q_ino
.hardlimit
;
569 defq
->ino
.soft
= dqp
->q_ino
.softlimit
;
570 defq
->rtb
.hard
= dqp
->q_rtb
.hardlimit
;
571 defq
->rtb
.soft
= dqp
->q_rtb
.softlimit
;
572 xfs_qm_dqdestroy(dqp
);
575 /* Initialize quota time limits from the root dquot. */
577 xfs_qm_init_timelimits(
578 struct xfs_mount
*mp
,
581 struct xfs_quotainfo
*qinf
= mp
->m_quotainfo
;
582 struct xfs_def_quota
*defq
;
583 struct xfs_dquot
*dqp
;
586 defq
= xfs_get_defquota(qinf
, type
);
588 defq
->blk
.time
= XFS_QM_BTIMELIMIT
;
589 defq
->ino
.time
= XFS_QM_ITIMELIMIT
;
590 defq
->rtb
.time
= XFS_QM_RTBTIMELIMIT
;
591 defq
->blk
.warn
= XFS_QM_BWARNLIMIT
;
592 defq
->ino
.warn
= XFS_QM_IWARNLIMIT
;
593 defq
->rtb
.warn
= XFS_QM_RTBWARNLIMIT
;
596 * We try to get the limits from the superuser's limits fields.
597 * This is quite hacky, but it is standard quota practice.
599 * Since we may not have done a quotacheck by this point, just read
600 * the dquot without attaching it to any hashtables or lists.
602 error
= xfs_qm_dqget_uncached(mp
, 0, type
, &dqp
);
607 * The warnings and timers set the grace period given to
608 * a user or group before he or she can not perform any
609 * more writing. If it is zero, a default is used.
611 if (dqp
->q_blk
.timer
)
612 defq
->blk
.time
= dqp
->q_blk
.timer
;
613 if (dqp
->q_ino
.timer
)
614 defq
->ino
.time
= dqp
->q_ino
.timer
;
615 if (dqp
->q_rtb
.timer
)
616 defq
->rtb
.time
= dqp
->q_rtb
.timer
;
617 if (dqp
->q_blk
.warnings
)
618 defq
->blk
.warn
= dqp
->q_blk
.warnings
;
619 if (dqp
->q_ino
.warnings
)
620 defq
->ino
.warn
= dqp
->q_ino
.warnings
;
621 if (dqp
->q_rtb
.warnings
)
622 defq
->rtb
.warn
= dqp
->q_rtb
.warnings
;
624 xfs_qm_dqdestroy(dqp
);
628 * This initializes all the quota information that's kept in the
632 xfs_qm_init_quotainfo(
633 struct xfs_mount
*mp
)
635 struct xfs_quotainfo
*qinf
;
638 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
640 qinf
= mp
->m_quotainfo
= kmem_zalloc(sizeof(struct xfs_quotainfo
), 0);
642 error
= list_lru_init(&qinf
->qi_lru
);
647 * See if quotainodes are setup, and if not, allocate them,
648 * and change the superblock accordingly.
650 error
= xfs_qm_init_quotainos(mp
);
654 INIT_RADIX_TREE(&qinf
->qi_uquota_tree
, GFP_NOFS
);
655 INIT_RADIX_TREE(&qinf
->qi_gquota_tree
, GFP_NOFS
);
656 INIT_RADIX_TREE(&qinf
->qi_pquota_tree
, GFP_NOFS
);
657 mutex_init(&qinf
->qi_tree_lock
);
659 /* mutex used to serialize quotaoffs */
660 mutex_init(&qinf
->qi_quotaofflock
);
662 /* Precalc some constants */
663 qinf
->qi_dqchunklen
= XFS_FSB_TO_BB(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
664 qinf
->qi_dqperchunk
= xfs_calc_dquots_per_chunk(qinf
->qi_dqchunklen
);
665 if (xfs_sb_version_hasbigtime(&mp
->m_sb
)) {
666 qinf
->qi_expiry_min
=
667 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN
);
668 qinf
->qi_expiry_max
=
669 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX
);
671 qinf
->qi_expiry_min
= XFS_DQ_LEGACY_EXPIRY_MIN
;
672 qinf
->qi_expiry_max
= XFS_DQ_LEGACY_EXPIRY_MAX
;
674 trace_xfs_quota_expiry_range(mp
, qinf
->qi_expiry_min
,
675 qinf
->qi_expiry_max
);
677 mp
->m_qflags
|= (mp
->m_sb
.sb_qflags
& XFS_ALL_QUOTA_CHKD
);
679 xfs_qm_init_timelimits(mp
, XFS_DQTYPE_USER
);
680 xfs_qm_init_timelimits(mp
, XFS_DQTYPE_GROUP
);
681 xfs_qm_init_timelimits(mp
, XFS_DQTYPE_PROJ
);
683 if (XFS_IS_UQUOTA_RUNNING(mp
))
684 xfs_qm_set_defquota(mp
, XFS_DQTYPE_USER
, qinf
);
685 if (XFS_IS_GQUOTA_RUNNING(mp
))
686 xfs_qm_set_defquota(mp
, XFS_DQTYPE_GROUP
, qinf
);
687 if (XFS_IS_PQUOTA_RUNNING(mp
))
688 xfs_qm_set_defquota(mp
, XFS_DQTYPE_PROJ
, qinf
);
690 qinf
->qi_shrinker
.count_objects
= xfs_qm_shrink_count
;
691 qinf
->qi_shrinker
.scan_objects
= xfs_qm_shrink_scan
;
692 qinf
->qi_shrinker
.seeks
= DEFAULT_SEEKS
;
693 qinf
->qi_shrinker
.flags
= SHRINKER_NUMA_AWARE
;
695 error
= register_shrinker(&qinf
->qi_shrinker
);
702 mutex_destroy(&qinf
->qi_quotaofflock
);
703 mutex_destroy(&qinf
->qi_tree_lock
);
704 xfs_qm_destroy_quotainos(qinf
);
706 list_lru_destroy(&qinf
->qi_lru
);
709 mp
->m_quotainfo
= NULL
;
714 * Gets called when unmounting a filesystem or when all quotas get
716 * This purges the quota inodes, destroys locks and frees itself.
719 xfs_qm_destroy_quotainfo(
720 struct xfs_mount
*mp
)
722 struct xfs_quotainfo
*qi
;
724 qi
= mp
->m_quotainfo
;
727 unregister_shrinker(&qi
->qi_shrinker
);
728 list_lru_destroy(&qi
->qi_lru
);
729 xfs_qm_destroy_quotainos(qi
);
730 mutex_destroy(&qi
->qi_tree_lock
);
731 mutex_destroy(&qi
->qi_quotaofflock
);
733 mp
->m_quotainfo
= NULL
;
737 * Create an inode and return with a reference already taken, but unlocked
738 * This is how we create quota inodes
742 struct xfs_mount
*mp
,
743 struct xfs_inode
**ipp
,
746 struct xfs_trans
*tp
;
748 bool need_alloc
= true;
752 * With superblock that doesn't have separate pquotino, we
753 * share an inode between gquota and pquota. If the on-disk
754 * superblock has GQUOTA and the filesystem is now mounted
755 * with PQUOTA, just use sb_gquotino for sb_pquotino and
758 if (!xfs_sb_version_has_pquotino(&mp
->m_sb
) &&
759 (flags
& (XFS_QMOPT_PQUOTA
|XFS_QMOPT_GQUOTA
))) {
760 xfs_ino_t ino
= NULLFSINO
;
762 if ((flags
& XFS_QMOPT_PQUOTA
) &&
763 (mp
->m_sb
.sb_gquotino
!= NULLFSINO
)) {
764 ino
= mp
->m_sb
.sb_gquotino
;
765 if (XFS_IS_CORRUPT(mp
,
766 mp
->m_sb
.sb_pquotino
!= NULLFSINO
))
767 return -EFSCORRUPTED
;
768 } else if ((flags
& XFS_QMOPT_GQUOTA
) &&
769 (mp
->m_sb
.sb_pquotino
!= NULLFSINO
)) {
770 ino
= mp
->m_sb
.sb_pquotino
;
771 if (XFS_IS_CORRUPT(mp
,
772 mp
->m_sb
.sb_gquotino
!= NULLFSINO
))
773 return -EFSCORRUPTED
;
775 if (ino
!= NULLFSINO
) {
776 error
= xfs_iget(mp
, NULL
, ino
, 0, 0, ipp
);
779 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
780 mp
->m_sb
.sb_pquotino
= NULLFSINO
;
785 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_create
,
786 need_alloc
? XFS_QM_QINOCREATE_SPACE_RES(mp
) : 0,
794 error
= xfs_dialloc(&tp
, 0, S_IFREG
, &ino
);
796 error
= xfs_init_new_inode(&init_user_ns
, tp
, NULL
, ino
,
797 S_IFREG
, 1, 0, 0, false, ipp
);
799 xfs_trans_cancel(tp
);
805 * Make the changes in the superblock, and log those too.
806 * sbfields arg may contain fields other than *QUOTINO;
807 * VERSIONNUM for example.
809 spin_lock(&mp
->m_sb_lock
);
810 if (flags
& XFS_QMOPT_SBVERSION
) {
811 ASSERT(!xfs_sb_version_hasquota(&mp
->m_sb
));
813 xfs_sb_version_addquota(&mp
->m_sb
);
814 mp
->m_sb
.sb_uquotino
= NULLFSINO
;
815 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
816 mp
->m_sb
.sb_pquotino
= NULLFSINO
;
818 /* qflags will get updated fully _after_ quotacheck */
819 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_ALL_QUOTA_ACCT
;
821 if (flags
& XFS_QMOPT_UQUOTA
)
822 mp
->m_sb
.sb_uquotino
= (*ipp
)->i_ino
;
823 else if (flags
& XFS_QMOPT_GQUOTA
)
824 mp
->m_sb
.sb_gquotino
= (*ipp
)->i_ino
;
826 mp
->m_sb
.sb_pquotino
= (*ipp
)->i_ino
;
827 spin_unlock(&mp
->m_sb_lock
);
830 error
= xfs_trans_commit(tp
);
832 ASSERT(XFS_FORCED_SHUTDOWN(mp
));
833 xfs_alert(mp
, "%s failed (error %d)!", __func__
, error
);
836 xfs_finish_inode_setup(*ipp
);
842 xfs_qm_reset_dqcounts(
843 struct xfs_mount
*mp
,
848 struct xfs_dqblk
*dqb
;
851 trace_xfs_reset_dqcounts(bp
, _RET_IP_
);
854 * Reset all counters and timers. They'll be
855 * started afresh by xfs_qm_quotacheck.
858 j
= (int)XFS_FSB_TO_B(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
) /
860 ASSERT(mp
->m_quotainfo
->qi_dqperchunk
== j
);
863 for (j
= 0; j
< mp
->m_quotainfo
->qi_dqperchunk
; j
++) {
864 struct xfs_disk_dquot
*ddq
;
866 ddq
= (struct xfs_disk_dquot
*)&dqb
[j
];
869 * Do a sanity check, and if needed, repair the dqblk. Don't
870 * output any warnings because it's perfectly possible to
871 * find uninitialised dquot blks. See comment in
874 if (xfs_dqblk_verify(mp
, &dqb
[j
], id
+ j
) ||
875 (dqb
[j
].dd_diskdq
.d_type
& XFS_DQTYPE_REC_MASK
) != type
)
876 xfs_dqblk_repair(mp
, &dqb
[j
], id
+ j
, type
);
879 * Reset type in case we are reusing group quota file for
880 * project quotas or vice versa
888 * dquot id 0 stores the default grace period and the maximum
889 * warning limit that were set by the administrator, so we
890 * should not reset them.
892 if (ddq
->d_id
!= 0) {
899 if (xfs_sb_version_hasbigtime(&mp
->m_sb
))
900 ddq
->d_type
|= XFS_DQTYPE_BIGTIME
;
903 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
904 xfs_update_cksum((char *)&dqb
[j
],
905 sizeof(struct xfs_dqblk
),
912 xfs_qm_reset_dqcounts_all(
913 struct xfs_mount
*mp
,
916 xfs_filblks_t blkcnt
,
918 struct list_head
*buffer_list
)
926 * Blkcnt arg can be a very big number, and might even be
927 * larger than the log itself. So, we have to break it up into
928 * manageable-sized transactions.
929 * Note that we don't start a permanent transaction here; we might
930 * not be able to get a log reservation for the whole thing up front,
931 * and we don't really care to either, because we just discard
932 * everything if we were to crash in the middle of this loop.
935 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
936 XFS_FSB_TO_DADDR(mp
, bno
),
937 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
941 * CRC and validation errors will return a EFSCORRUPTED here. If
942 * this occurs, re-read without CRC validation so that we can
943 * repair the damage via xfs_qm_reset_dqcounts(). This process
944 * will leave a trace in the log indicating corruption has
947 if (error
== -EFSCORRUPTED
) {
948 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
949 XFS_FSB_TO_DADDR(mp
, bno
),
950 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
958 * A corrupt buffer might not have a verifier attached, so
959 * make sure we have the correct one attached before writeback
962 bp
->b_ops
= &xfs_dquot_buf_ops
;
963 xfs_qm_reset_dqcounts(mp
, bp
, firstid
, type
);
964 xfs_buf_delwri_queue(bp
, buffer_list
);
967 /* goto the next block. */
969 firstid
+= mp
->m_quotainfo
->qi_dqperchunk
;
976 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
977 * counters for every chunk of dquots that we find.
980 xfs_qm_reset_dqcounts_buf(
981 struct xfs_mount
*mp
,
982 struct xfs_inode
*qip
,
984 struct list_head
*buffer_list
)
986 struct xfs_bmbt_irec
*map
;
987 int i
, nmaps
; /* number of map entries */
988 int error
; /* return value */
989 xfs_fileoff_t lblkno
;
990 xfs_filblks_t maxlblkcnt
;
992 xfs_fsblock_t rablkno
;
993 xfs_filblks_t rablkcnt
;
997 * This looks racy, but we can't keep an inode lock across a
998 * trans_reserve. But, this gets called during quotacheck, and that
999 * happens only at mount time which is single threaded.
1001 if (qip
->i_nblocks
== 0)
1004 map
= kmem_alloc(XFS_DQITER_MAP_SIZE
* sizeof(*map
), 0);
1007 maxlblkcnt
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
1011 nmaps
= XFS_DQITER_MAP_SIZE
;
1013 * We aren't changing the inode itself. Just changing
1014 * some of its data. No new blocks are added here, and
1015 * the inode is never added to the transaction.
1017 lock_mode
= xfs_ilock_data_map_shared(qip
);
1018 error
= xfs_bmapi_read(qip
, lblkno
, maxlblkcnt
- lblkno
,
1020 xfs_iunlock(qip
, lock_mode
);
1024 ASSERT(nmaps
<= XFS_DQITER_MAP_SIZE
);
1025 for (i
= 0; i
< nmaps
; i
++) {
1026 ASSERT(map
[i
].br_startblock
!= DELAYSTARTBLOCK
);
1027 ASSERT(map
[i
].br_blockcount
);
1030 lblkno
+= map
[i
].br_blockcount
;
1032 if (map
[i
].br_startblock
== HOLESTARTBLOCK
)
1035 firstid
= (xfs_dqid_t
) map
[i
].br_startoff
*
1036 mp
->m_quotainfo
->qi_dqperchunk
;
1038 * Do a read-ahead on the next extent.
1040 if ((i
+1 < nmaps
) &&
1041 (map
[i
+1].br_startblock
!= HOLESTARTBLOCK
)) {
1042 rablkcnt
= map
[i
+1].br_blockcount
;
1043 rablkno
= map
[i
+1].br_startblock
;
1044 while (rablkcnt
--) {
1045 xfs_buf_readahead(mp
->m_ddev_targp
,
1046 XFS_FSB_TO_DADDR(mp
, rablkno
),
1047 mp
->m_quotainfo
->qi_dqchunklen
,
1048 &xfs_dquot_buf_ops
);
1053 * Iterate thru all the blks in the extent and
1054 * reset the counters of all the dquots inside them.
1056 error
= xfs_qm_reset_dqcounts_all(mp
, firstid
,
1057 map
[i
].br_startblock
,
1058 map
[i
].br_blockcount
,
1063 } while (nmaps
> 0);
1071 * Called by dqusage_adjust in doing a quotacheck.
1073 * Given the inode, and a dquot id this updates both the incore dqout as well
1074 * as the buffer copy. This is so that once the quotacheck is done, we can
1075 * just log all the buffers, as opposed to logging numerous updates to
1076 * individual dquots.
1079 xfs_qm_quotacheck_dqadjust(
1080 struct xfs_inode
*ip
,
1085 struct xfs_mount
*mp
= ip
->i_mount
;
1086 struct xfs_dquot
*dqp
;
1090 id
= xfs_qm_id_for_quotatype(ip
, type
);
1091 error
= xfs_qm_dqget(mp
, id
, type
, true, &dqp
);
1094 * Shouldn't be able to turn off quotas here.
1096 ASSERT(error
!= -ESRCH
);
1097 ASSERT(error
!= -ENOENT
);
1101 trace_xfs_dqadjust(dqp
);
1104 * Adjust the inode count and the block count to reflect this inode's
1108 dqp
->q_ino
.reserved
++;
1110 dqp
->q_blk
.count
+= nblks
;
1111 dqp
->q_blk
.reserved
+= nblks
;
1114 dqp
->q_rtb
.count
+= rtblks
;
1115 dqp
->q_rtb
.reserved
+= rtblks
;
1119 * Set default limits, adjust timers (since we changed usages)
1121 * There are no timers for the default values set in the root dquot.
1124 xfs_qm_adjust_dqlimits(dqp
);
1125 xfs_qm_adjust_dqtimers(dqp
);
1128 dqp
->q_flags
|= XFS_DQFLAG_DIRTY
;
1134 * callback routine supplied to bulkstat(). Given an inumber, find its
1135 * dquots and update them to account for resources taken by that inode.
1139 xfs_qm_dqusage_adjust(
1140 struct xfs_mount
*mp
,
1141 struct xfs_trans
*tp
,
1145 struct xfs_inode
*ip
;
1147 xfs_filblks_t rtblks
= 0; /* total rt blks */
1150 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1153 * rootino must have its resources accounted for, not so with the quota
1156 if (xfs_is_quota_inode(&mp
->m_sb
, ino
))
1160 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1161 * at mount time and therefore nobody will be racing chown/chproj.
1163 error
= xfs_iget(mp
, tp
, ino
, XFS_IGET_DONTCACHE
, 0, &ip
);
1164 if (error
== -EINVAL
|| error
== -ENOENT
)
1169 ASSERT(ip
->i_delayed_blks
== 0);
1171 if (XFS_IS_REALTIME_INODE(ip
)) {
1172 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
1174 error
= xfs_iread_extents(tp
, ip
, XFS_DATA_FORK
);
1178 xfs_bmap_count_leaves(ifp
, &rtblks
);
1181 nblks
= (xfs_qcnt_t
)ip
->i_nblocks
- rtblks
;
1184 * Add the (disk blocks and inode) resources occupied by this
1185 * inode to its dquots. We do this adjustment in the incore dquot,
1186 * and also copy the changes to its buffer.
1187 * We don't care about putting these changes in a transaction
1188 * envelope because if we crash in the middle of a 'quotacheck'
1189 * we have to start from the beginning anyway.
1190 * Once we're done, we'll log all the dquot bufs.
1192 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1193 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1195 if (XFS_IS_UQUOTA_ON(mp
)) {
1196 error
= xfs_qm_quotacheck_dqadjust(ip
, XFS_DQTYPE_USER
, nblks
,
1202 if (XFS_IS_GQUOTA_ON(mp
)) {
1203 error
= xfs_qm_quotacheck_dqadjust(ip
, XFS_DQTYPE_GROUP
, nblks
,
1209 if (XFS_IS_PQUOTA_ON(mp
)) {
1210 error
= xfs_qm_quotacheck_dqadjust(ip
, XFS_DQTYPE_PROJ
, nblks
,
1223 struct xfs_dquot
*dqp
,
1226 struct xfs_mount
*mp
= dqp
->q_mount
;
1227 struct list_head
*buffer_list
= data
;
1228 struct xfs_buf
*bp
= NULL
;
1232 if (dqp
->q_flags
& XFS_DQFLAG_FREEING
)
1234 if (!XFS_DQ_IS_DIRTY(dqp
))
1238 * The only way the dquot is already flush locked by the time quotacheck
1239 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1240 * it for the final time. Quotacheck collects all dquot bufs in the
1241 * local delwri queue before dquots are dirtied, so reclaim can't have
1242 * possibly queued it for I/O. The only way out is to push the buffer to
1243 * cycle the flush lock.
1245 if (!xfs_dqflock_nowait(dqp
)) {
1246 /* buf is pinned in-core by delwri list */
1247 bp
= xfs_buf_incore(mp
->m_ddev_targp
, dqp
->q_blkno
,
1248 mp
->m_quotainfo
->qi_dqchunklen
, 0);
1255 xfs_buf_delwri_pushbuf(bp
, buffer_list
);
1262 error
= xfs_qm_dqflush(dqp
, &bp
);
1266 xfs_buf_delwri_queue(bp
, buffer_list
);
1274 * Walk thru all the filesystem inodes and construct a consistent view
1275 * of the disk quota world. If the quotacheck fails, disable quotas.
1283 LIST_HEAD (buffer_list
);
1284 struct xfs_inode
*uip
= mp
->m_quotainfo
->qi_uquotaip
;
1285 struct xfs_inode
*gip
= mp
->m_quotainfo
->qi_gquotaip
;
1286 struct xfs_inode
*pip
= mp
->m_quotainfo
->qi_pquotaip
;
1290 ASSERT(uip
|| gip
|| pip
);
1291 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1293 xfs_notice(mp
, "Quotacheck needed: Please wait.");
1296 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1297 * their counters to zero. We need a clean slate.
1298 * We don't log our changes till later.
1301 error
= xfs_qm_reset_dqcounts_buf(mp
, uip
, XFS_DQTYPE_USER
,
1305 flags
|= XFS_UQUOTA_CHKD
;
1309 error
= xfs_qm_reset_dqcounts_buf(mp
, gip
, XFS_DQTYPE_GROUP
,
1313 flags
|= XFS_GQUOTA_CHKD
;
1317 error
= xfs_qm_reset_dqcounts_buf(mp
, pip
, XFS_DQTYPE_PROJ
,
1321 flags
|= XFS_PQUOTA_CHKD
;
1324 error
= xfs_iwalk_threaded(mp
, 0, 0, xfs_qm_dqusage_adjust
, 0, true,
1330 * We've made all the changes that we need to make incore. Flush them
1331 * down to disk buffers if everything was updated successfully.
1333 if (XFS_IS_UQUOTA_ON(mp
)) {
1334 error
= xfs_qm_dquot_walk(mp
, XFS_DQTYPE_USER
, xfs_qm_flush_one
,
1337 if (XFS_IS_GQUOTA_ON(mp
)) {
1338 error2
= xfs_qm_dquot_walk(mp
, XFS_DQTYPE_GROUP
, xfs_qm_flush_one
,
1343 if (XFS_IS_PQUOTA_ON(mp
)) {
1344 error2
= xfs_qm_dquot_walk(mp
, XFS_DQTYPE_PROJ
, xfs_qm_flush_one
,
1350 error2
= xfs_buf_delwri_submit(&buffer_list
);
1355 * We can get this error if we couldn't do a dquot allocation inside
1356 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1357 * dirty dquots that might be cached, we just want to get rid of them
1358 * and turn quotaoff. The dquots won't be attached to any of the inodes
1359 * at this point (because we intentionally didn't in dqget_noattach).
1362 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
);
1367 * If one type of quotas is off, then it will lose its
1368 * quotachecked status, since we won't be doing accounting for
1369 * that type anymore.
1371 mp
->m_qflags
&= ~XFS_ALL_QUOTA_CHKD
;
1372 mp
->m_qflags
|= flags
;
1375 xfs_buf_delwri_cancel(&buffer_list
);
1379 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1382 * We must turn off quotas.
1384 ASSERT(mp
->m_quotainfo
!= NULL
);
1385 xfs_qm_destroy_quotainfo(mp
);
1386 if (xfs_mount_reset_sbqflags(mp
)) {
1388 "Quotacheck: Failed to reset quota flags.");
1391 xfs_notice(mp
, "Quotacheck: Done.");
1396 * This is called from xfs_mountfs to start quotas and initialize all
1397 * necessary data structures like quotainfo. This is also responsible for
1398 * running a quotacheck as necessary. We are guaranteed that the superblock
1399 * is consistently read in at this point.
1401 * If we fail here, the mount will continue with quota turned off. We don't
1402 * need to inidicate success or failure at all.
1405 xfs_qm_mount_quotas(
1406 struct xfs_mount
*mp
)
1412 * If quotas on realtime volumes is not supported, we disable
1413 * quotas immediately.
1415 if (mp
->m_sb
.sb_rextents
) {
1416 xfs_notice(mp
, "Cannot turn on quotas for realtime filesystem");
1421 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1424 * Allocate the quotainfo structure inside the mount struct, and
1425 * create quotainode(s), and change/rev superblock if necessary.
1427 error
= xfs_qm_init_quotainfo(mp
);
1430 * We must turn off quotas.
1432 ASSERT(mp
->m_quotainfo
== NULL
);
1437 * If any of the quotas are not consistent, do a quotacheck.
1439 if (XFS_QM_NEED_QUOTACHECK(mp
)) {
1440 error
= xfs_qm_quotacheck(mp
);
1442 /* Quotacheck failed and disabled quotas. */
1447 * If one type of quotas is off, then it will lose its
1448 * quotachecked status, since we won't be doing accounting for
1449 * that type anymore.
1451 if (!XFS_IS_UQUOTA_ON(mp
))
1452 mp
->m_qflags
&= ~XFS_UQUOTA_CHKD
;
1453 if (!XFS_IS_GQUOTA_ON(mp
))
1454 mp
->m_qflags
&= ~XFS_GQUOTA_CHKD
;
1455 if (!XFS_IS_PQUOTA_ON(mp
))
1456 mp
->m_qflags
&= ~XFS_PQUOTA_CHKD
;
1460 * We actually don't have to acquire the m_sb_lock at all.
1461 * This can only be called from mount, and that's single threaded. XXX
1463 spin_lock(&mp
->m_sb_lock
);
1464 sbf
= mp
->m_sb
.sb_qflags
;
1465 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
;
1466 spin_unlock(&mp
->m_sb_lock
);
1468 if (sbf
!= (mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
)) {
1469 if (xfs_sync_sb(mp
, false)) {
1471 * We could only have been turning quotas off.
1472 * We aren't in very good shape actually because
1473 * the incore structures are convinced that quotas are
1474 * off, but the on disk superblock doesn't know that !
1476 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp
)));
1477 xfs_alert(mp
, "%s: Superblock update failed!",
1483 xfs_warn(mp
, "Failed to initialize disk quotas.");
1489 * This is called after the superblock has been read in and we're ready to
1490 * iget the quota inodes.
1493 xfs_qm_init_quotainos(
1496 struct xfs_inode
*uip
= NULL
;
1497 struct xfs_inode
*gip
= NULL
;
1498 struct xfs_inode
*pip
= NULL
;
1502 ASSERT(mp
->m_quotainfo
);
1505 * Get the uquota and gquota inodes
1507 if (xfs_sb_version_hasquota(&mp
->m_sb
)) {
1508 if (XFS_IS_UQUOTA_ON(mp
) &&
1509 mp
->m_sb
.sb_uquotino
!= NULLFSINO
) {
1510 ASSERT(mp
->m_sb
.sb_uquotino
> 0);
1511 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_uquotino
,
1516 if (XFS_IS_GQUOTA_ON(mp
) &&
1517 mp
->m_sb
.sb_gquotino
!= NULLFSINO
) {
1518 ASSERT(mp
->m_sb
.sb_gquotino
> 0);
1519 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_gquotino
,
1524 if (XFS_IS_PQUOTA_ON(mp
) &&
1525 mp
->m_sb
.sb_pquotino
!= NULLFSINO
) {
1526 ASSERT(mp
->m_sb
.sb_pquotino
> 0);
1527 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_pquotino
,
1533 flags
|= XFS_QMOPT_SBVERSION
;
1537 * Create the three inodes, if they don't exist already. The changes
1538 * made above will get added to a transaction and logged in one of
1539 * the qino_alloc calls below. If the device is readonly,
1540 * temporarily switch to read-write to do this.
1542 if (XFS_IS_UQUOTA_ON(mp
) && uip
== NULL
) {
1543 error
= xfs_qm_qino_alloc(mp
, &uip
,
1544 flags
| XFS_QMOPT_UQUOTA
);
1548 flags
&= ~XFS_QMOPT_SBVERSION
;
1550 if (XFS_IS_GQUOTA_ON(mp
) && gip
== NULL
) {
1551 error
= xfs_qm_qino_alloc(mp
, &gip
,
1552 flags
| XFS_QMOPT_GQUOTA
);
1556 flags
&= ~XFS_QMOPT_SBVERSION
;
1558 if (XFS_IS_PQUOTA_ON(mp
) && pip
== NULL
) {
1559 error
= xfs_qm_qino_alloc(mp
, &pip
,
1560 flags
| XFS_QMOPT_PQUOTA
);
1565 mp
->m_quotainfo
->qi_uquotaip
= uip
;
1566 mp
->m_quotainfo
->qi_gquotaip
= gip
;
1567 mp
->m_quotainfo
->qi_pquotaip
= pip
;
1582 xfs_qm_destroy_quotainos(
1583 struct xfs_quotainfo
*qi
)
1585 if (qi
->qi_uquotaip
) {
1586 xfs_irele(qi
->qi_uquotaip
);
1587 qi
->qi_uquotaip
= NULL
; /* paranoia */
1589 if (qi
->qi_gquotaip
) {
1590 xfs_irele(qi
->qi_gquotaip
);
1591 qi
->qi_gquotaip
= NULL
;
1593 if (qi
->qi_pquotaip
) {
1594 xfs_irele(qi
->qi_pquotaip
);
1595 qi
->qi_pquotaip
= NULL
;
1601 struct xfs_dquot
*dqp
)
1603 struct xfs_mount
*mp
= dqp
->q_mount
;
1604 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
1606 mutex_lock(&qi
->qi_tree_lock
);
1607 radix_tree_delete(xfs_dquot_tree(qi
, xfs_dquot_type(dqp
)), dqp
->q_id
);
1610 mutex_unlock(&qi
->qi_tree_lock
);
1612 xfs_qm_dqdestroy(dqp
);
1615 /* --------------- utility functions for vnodeops ---------------- */
1619 * Given an inode, a uid, gid and prid make sure that we have
1620 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1621 * quotas by creating this file.
1622 * This also attaches dquot(s) to the given inode after locking it,
1623 * and returns the dquots corresponding to the uid and/or gid.
1625 * in : inode (unlocked)
1626 * out : udquot, gdquot with references taken and unlocked
1630 struct xfs_inode
*ip
,
1635 struct xfs_dquot
**O_udqpp
,
1636 struct xfs_dquot
**O_gdqpp
,
1637 struct xfs_dquot
**O_pdqpp
)
1639 struct xfs_mount
*mp
= ip
->i_mount
;
1640 struct inode
*inode
= VFS_I(ip
);
1641 struct user_namespace
*user_ns
= inode
->i_sb
->s_user_ns
;
1642 struct xfs_dquot
*uq
= NULL
;
1643 struct xfs_dquot
*gq
= NULL
;
1644 struct xfs_dquot
*pq
= NULL
;
1648 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1651 lockflags
= XFS_ILOCK_EXCL
;
1652 xfs_ilock(ip
, lockflags
);
1654 if ((flags
& XFS_QMOPT_INHERIT
) && XFS_INHERIT_GID(ip
))
1658 * Attach the dquot(s) to this inode, doing a dquot allocation
1659 * if necessary. The dquot(s) will not be locked.
1661 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
1662 error
= xfs_qm_dqattach_locked(ip
, true);
1664 xfs_iunlock(ip
, lockflags
);
1669 if ((flags
& XFS_QMOPT_UQUOTA
) && XFS_IS_UQUOTA_ON(mp
)) {
1671 if (!uid_eq(inode
->i_uid
, uid
)) {
1673 * What we need is the dquot that has this uid, and
1674 * if we send the inode to dqget, the uid of the inode
1675 * takes priority over what's sent in the uid argument.
1676 * We must unlock inode here before calling dqget if
1677 * we're not sending the inode, because otherwise
1678 * we'll deadlock by doing trans_reserve while
1681 xfs_iunlock(ip
, lockflags
);
1682 error
= xfs_qm_dqget(mp
, from_kuid(user_ns
, uid
),
1683 XFS_DQTYPE_USER
, true, &uq
);
1685 ASSERT(error
!= -ENOENT
);
1689 * Get the ilock in the right order.
1692 lockflags
= XFS_ILOCK_SHARED
;
1693 xfs_ilock(ip
, lockflags
);
1696 * Take an extra reference, because we'll return
1699 ASSERT(ip
->i_udquot
);
1700 uq
= xfs_qm_dqhold(ip
->i_udquot
);
1703 if ((flags
& XFS_QMOPT_GQUOTA
) && XFS_IS_GQUOTA_ON(mp
)) {
1705 if (!gid_eq(inode
->i_gid
, gid
)) {
1706 xfs_iunlock(ip
, lockflags
);
1707 error
= xfs_qm_dqget(mp
, from_kgid(user_ns
, gid
),
1708 XFS_DQTYPE_GROUP
, true, &gq
);
1710 ASSERT(error
!= -ENOENT
);
1714 lockflags
= XFS_ILOCK_SHARED
;
1715 xfs_ilock(ip
, lockflags
);
1717 ASSERT(ip
->i_gdquot
);
1718 gq
= xfs_qm_dqhold(ip
->i_gdquot
);
1721 if ((flags
& XFS_QMOPT_PQUOTA
) && XFS_IS_PQUOTA_ON(mp
)) {
1723 if (ip
->i_projid
!= prid
) {
1724 xfs_iunlock(ip
, lockflags
);
1725 error
= xfs_qm_dqget(mp
, prid
,
1726 XFS_DQTYPE_PROJ
, true, &pq
);
1728 ASSERT(error
!= -ENOENT
);
1732 lockflags
= XFS_ILOCK_SHARED
;
1733 xfs_ilock(ip
, lockflags
);
1735 ASSERT(ip
->i_pdquot
);
1736 pq
= xfs_qm_dqhold(ip
->i_pdquot
);
1739 trace_xfs_dquot_dqalloc(ip
);
1741 xfs_iunlock(ip
, lockflags
);
1763 * Actually transfer ownership, and do dquot modifications.
1764 * These were already reserved.
1768 struct xfs_trans
*tp
,
1769 struct xfs_inode
*ip
,
1770 struct xfs_dquot
**IO_olddq
,
1771 struct xfs_dquot
*newdq
)
1773 struct xfs_dquot
*prevdq
;
1774 uint bfield
= XFS_IS_REALTIME_INODE(ip
) ?
1775 XFS_TRANS_DQ_RTBCOUNT
: XFS_TRANS_DQ_BCOUNT
;
1778 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1779 ASSERT(XFS_IS_QUOTA_RUNNING(ip
->i_mount
));
1784 ASSERT(prevdq
!= newdq
);
1786 xfs_trans_mod_dquot(tp
, prevdq
, bfield
, -(ip
->i_nblocks
));
1787 xfs_trans_mod_dquot(tp
, prevdq
, XFS_TRANS_DQ_ICOUNT
, -1);
1789 /* the sparkling new dquot */
1790 xfs_trans_mod_dquot(tp
, newdq
, bfield
, ip
->i_nblocks
);
1791 xfs_trans_mod_dquot(tp
, newdq
, XFS_TRANS_DQ_ICOUNT
, 1);
1794 * Back when we made quota reservations for the chown, we reserved the
1795 * ondisk blocks + delalloc blocks with the new dquot. Now that we've
1796 * switched the dquots, decrease the new dquot's block reservation
1797 * (having already bumped up the real counter) so that we don't have
1798 * any reservation to give back when we commit.
1800 xfs_trans_mod_dquot(tp
, newdq
, XFS_TRANS_DQ_RES_BLKS
,
1801 -ip
->i_delayed_blks
);
1804 * Give the incore reservation for delalloc blocks back to the old
1805 * dquot. We don't normally handle delalloc quota reservations
1806 * transactionally, so just lock the dquot and subtract from the
1807 * reservation. Dirty the transaction because it's too late to turn
1810 tp
->t_flags
|= XFS_TRANS_DIRTY
;
1812 ASSERT(prevdq
->q_blk
.reserved
>= ip
->i_delayed_blks
);
1813 prevdq
->q_blk
.reserved
-= ip
->i_delayed_blks
;
1814 xfs_dqunlock(prevdq
);
1817 * Take an extra reference, because the inode is going to keep
1818 * this dquot pointer even after the trans_commit.
1820 *IO_olddq
= xfs_qm_dqhold(newdq
);
1826 xfs_qm_vop_rename_dqattach(
1827 struct xfs_inode
**i_tab
)
1829 struct xfs_mount
*mp
= i_tab
[0]->i_mount
;
1832 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1835 for (i
= 0; (i
< 4 && i_tab
[i
]); i
++) {
1836 struct xfs_inode
*ip
= i_tab
[i
];
1840 * Watch out for duplicate entries in the table.
1842 if (i
== 0 || ip
!= i_tab
[i
-1]) {
1843 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
1844 error
= xfs_qm_dqattach(ip
);
1854 xfs_qm_vop_create_dqattach(
1855 struct xfs_trans
*tp
,
1856 struct xfs_inode
*ip
,
1857 struct xfs_dquot
*udqp
,
1858 struct xfs_dquot
*gdqp
,
1859 struct xfs_dquot
*pdqp
)
1861 struct xfs_mount
*mp
= tp
->t_mountp
;
1863 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1866 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1868 if (udqp
&& XFS_IS_UQUOTA_ON(mp
)) {
1869 ASSERT(ip
->i_udquot
== NULL
);
1870 ASSERT(i_uid_read(VFS_I(ip
)) == udqp
->q_id
);
1872 ip
->i_udquot
= xfs_qm_dqhold(udqp
);
1873 xfs_trans_mod_dquot(tp
, udqp
, XFS_TRANS_DQ_ICOUNT
, 1);
1875 if (gdqp
&& XFS_IS_GQUOTA_ON(mp
)) {
1876 ASSERT(ip
->i_gdquot
== NULL
);
1877 ASSERT(i_gid_read(VFS_I(ip
)) == gdqp
->q_id
);
1879 ip
->i_gdquot
= xfs_qm_dqhold(gdqp
);
1880 xfs_trans_mod_dquot(tp
, gdqp
, XFS_TRANS_DQ_ICOUNT
, 1);
1882 if (pdqp
&& XFS_IS_PQUOTA_ON(mp
)) {
1883 ASSERT(ip
->i_pdquot
== NULL
);
1884 ASSERT(ip
->i_projid
== pdqp
->q_id
);
1886 ip
->i_pdquot
= xfs_qm_dqhold(pdqp
);
1887 xfs_trans_mod_dquot(tp
, pdqp
, XFS_TRANS_DQ_ICOUNT
, 1);