]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/xfs/xfs_qm.c
Merge tag 'regulator-fix-v5.14-rc2' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / fs / xfs / xfs_qm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
22 #include "xfs_qm.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_error.h"
26 #include "xfs_ag.h"
27 #include "xfs_ialloc.h"
28
29 /*
30 * The global quota manager. There is only one of these for the entire
31 * system, _not_ one per file system. XQM keeps track of the overall
32 * quota functionality, including maintaining the freelist and hash
33 * tables of dquots.
34 */
35 STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
36 STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
37
38 STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
39 STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
40 /*
41 * We use the batch lookup interface to iterate over the dquots as it
42 * currently is the only interface into the radix tree code that allows
43 * fuzzy lookups instead of exact matches. Holding the lock over multiple
44 * operations is fine as all callers are used either during mount/umount
45 * or quotaoff.
46 */
47 #define XFS_DQ_LOOKUP_BATCH 32
48
49 STATIC int
50 xfs_qm_dquot_walk(
51 struct xfs_mount *mp,
52 xfs_dqtype_t type,
53 int (*execute)(struct xfs_dquot *dqp, void *data),
54 void *data)
55 {
56 struct xfs_quotainfo *qi = mp->m_quotainfo;
57 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
58 uint32_t next_index;
59 int last_error = 0;
60 int skipped;
61 int nr_found;
62
63 restart:
64 skipped = 0;
65 next_index = 0;
66 nr_found = 0;
67
68 while (1) {
69 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
70 int error = 0;
71 int i;
72
73 mutex_lock(&qi->qi_tree_lock);
74 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
75 next_index, XFS_DQ_LOOKUP_BATCH);
76 if (!nr_found) {
77 mutex_unlock(&qi->qi_tree_lock);
78 break;
79 }
80
81 for (i = 0; i < nr_found; i++) {
82 struct xfs_dquot *dqp = batch[i];
83
84 next_index = dqp->q_id + 1;
85
86 error = execute(batch[i], data);
87 if (error == -EAGAIN) {
88 skipped++;
89 continue;
90 }
91 if (error && last_error != -EFSCORRUPTED)
92 last_error = error;
93 }
94
95 mutex_unlock(&qi->qi_tree_lock);
96
97 /* bail out if the filesystem is corrupted. */
98 if (last_error == -EFSCORRUPTED) {
99 skipped = 0;
100 break;
101 }
102 /* we're done if id overflows back to zero */
103 if (!next_index)
104 break;
105 }
106
107 if (skipped) {
108 delay(1);
109 goto restart;
110 }
111
112 return last_error;
113 }
114
115
116 /*
117 * Purge a dquot from all tracking data structures and free it.
118 */
119 STATIC int
120 xfs_qm_dqpurge(
121 struct xfs_dquot *dqp,
122 void *data)
123 {
124 struct xfs_mount *mp = dqp->q_mount;
125 struct xfs_quotainfo *qi = mp->m_quotainfo;
126 int error = -EAGAIN;
127
128 xfs_dqlock(dqp);
129 if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
130 goto out_unlock;
131
132 dqp->q_flags |= XFS_DQFLAG_FREEING;
133
134 xfs_dqflock(dqp);
135
136 /*
137 * If we are turning this type of quotas off, we don't care
138 * about the dirty metadata sitting in this dquot. OTOH, if
139 * we're unmounting, we do care, so we flush it and wait.
140 */
141 if (XFS_DQ_IS_DIRTY(dqp)) {
142 struct xfs_buf *bp = NULL;
143
144 /*
145 * We don't care about getting disk errors here. We need
146 * to purge this dquot anyway, so we go ahead regardless.
147 */
148 error = xfs_qm_dqflush(dqp, &bp);
149 if (!error) {
150 error = xfs_bwrite(bp);
151 xfs_buf_relse(bp);
152 } else if (error == -EAGAIN) {
153 dqp->q_flags &= ~XFS_DQFLAG_FREEING;
154 goto out_unlock;
155 }
156 xfs_dqflock(dqp);
157 }
158
159 ASSERT(atomic_read(&dqp->q_pincount) == 0);
160 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
161 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
162
163 xfs_dqfunlock(dqp);
164 xfs_dqunlock(dqp);
165
166 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
167 qi->qi_dquots--;
168
169 /*
170 * We move dquots to the freelist as soon as their reference count
171 * hits zero, so it really should be on the freelist here.
172 */
173 ASSERT(!list_empty(&dqp->q_lru));
174 list_lru_del(&qi->qi_lru, &dqp->q_lru);
175 XFS_STATS_DEC(mp, xs_qm_dquot_unused);
176
177 xfs_qm_dqdestroy(dqp);
178 return 0;
179
180 out_unlock:
181 xfs_dqunlock(dqp);
182 return error;
183 }
184
185 /*
186 * Purge the dquot cache.
187 */
188 void
189 xfs_qm_dqpurge_all(
190 struct xfs_mount *mp,
191 uint flags)
192 {
193 if (flags & XFS_QMOPT_UQUOTA)
194 xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
195 if (flags & XFS_QMOPT_GQUOTA)
196 xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
197 if (flags & XFS_QMOPT_PQUOTA)
198 xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
199 }
200
201 /*
202 * Just destroy the quotainfo structure.
203 */
204 void
205 xfs_qm_unmount(
206 struct xfs_mount *mp)
207 {
208 if (mp->m_quotainfo) {
209 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
210 xfs_qm_destroy_quotainfo(mp);
211 }
212 }
213
214 /*
215 * Called from the vfsops layer.
216 */
217 void
218 xfs_qm_unmount_quotas(
219 xfs_mount_t *mp)
220 {
221 /*
222 * Release the dquots that root inode, et al might be holding,
223 * before we flush quotas and blow away the quotainfo structure.
224 */
225 ASSERT(mp->m_rootip);
226 xfs_qm_dqdetach(mp->m_rootip);
227 if (mp->m_rbmip)
228 xfs_qm_dqdetach(mp->m_rbmip);
229 if (mp->m_rsumip)
230 xfs_qm_dqdetach(mp->m_rsumip);
231
232 /*
233 * Release the quota inodes.
234 */
235 if (mp->m_quotainfo) {
236 if (mp->m_quotainfo->qi_uquotaip) {
237 xfs_irele(mp->m_quotainfo->qi_uquotaip);
238 mp->m_quotainfo->qi_uquotaip = NULL;
239 }
240 if (mp->m_quotainfo->qi_gquotaip) {
241 xfs_irele(mp->m_quotainfo->qi_gquotaip);
242 mp->m_quotainfo->qi_gquotaip = NULL;
243 }
244 if (mp->m_quotainfo->qi_pquotaip) {
245 xfs_irele(mp->m_quotainfo->qi_pquotaip);
246 mp->m_quotainfo->qi_pquotaip = NULL;
247 }
248 }
249 }
250
251 STATIC int
252 xfs_qm_dqattach_one(
253 struct xfs_inode *ip,
254 xfs_dqtype_t type,
255 bool doalloc,
256 struct xfs_dquot **IO_idqpp)
257 {
258 struct xfs_dquot *dqp;
259 int error;
260
261 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
262 error = 0;
263
264 /*
265 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
266 * or &i_gdquot. This made the code look weird, but made the logic a lot
267 * simpler.
268 */
269 dqp = *IO_idqpp;
270 if (dqp) {
271 trace_xfs_dqattach_found(dqp);
272 return 0;
273 }
274
275 /*
276 * Find the dquot from somewhere. This bumps the reference count of
277 * dquot and returns it locked. This can return ENOENT if dquot didn't
278 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
279 * turned off suddenly.
280 */
281 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
282 if (error)
283 return error;
284
285 trace_xfs_dqattach_get(dqp);
286
287 /*
288 * dqget may have dropped and re-acquired the ilock, but it guarantees
289 * that the dquot returned is the one that should go in the inode.
290 */
291 *IO_idqpp = dqp;
292 xfs_dqunlock(dqp);
293 return 0;
294 }
295
296 static bool
297 xfs_qm_need_dqattach(
298 struct xfs_inode *ip)
299 {
300 struct xfs_mount *mp = ip->i_mount;
301
302 if (!XFS_IS_QUOTA_RUNNING(mp))
303 return false;
304 if (!XFS_IS_QUOTA_ON(mp))
305 return false;
306 if (!XFS_NOT_DQATTACHED(mp, ip))
307 return false;
308 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
309 return false;
310 return true;
311 }
312
313 /*
314 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
315 * into account.
316 * If @doalloc is true, the dquot(s) will be allocated if needed.
317 * Inode may get unlocked and relocked in here, and the caller must deal with
318 * the consequences.
319 */
320 int
321 xfs_qm_dqattach_locked(
322 xfs_inode_t *ip,
323 bool doalloc)
324 {
325 xfs_mount_t *mp = ip->i_mount;
326 int error = 0;
327
328 if (!xfs_qm_need_dqattach(ip))
329 return 0;
330
331 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
332
333 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
334 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
335 doalloc, &ip->i_udquot);
336 if (error)
337 goto done;
338 ASSERT(ip->i_udquot);
339 }
340
341 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
342 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
343 doalloc, &ip->i_gdquot);
344 if (error)
345 goto done;
346 ASSERT(ip->i_gdquot);
347 }
348
349 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
350 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
351 doalloc, &ip->i_pdquot);
352 if (error)
353 goto done;
354 ASSERT(ip->i_pdquot);
355 }
356
357 done:
358 /*
359 * Don't worry about the dquots that we may have attached before any
360 * error - they'll get detached later if it has not already been done.
361 */
362 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
363 return error;
364 }
365
366 int
367 xfs_qm_dqattach(
368 struct xfs_inode *ip)
369 {
370 int error;
371
372 if (!xfs_qm_need_dqattach(ip))
373 return 0;
374
375 xfs_ilock(ip, XFS_ILOCK_EXCL);
376 error = xfs_qm_dqattach_locked(ip, false);
377 xfs_iunlock(ip, XFS_ILOCK_EXCL);
378
379 return error;
380 }
381
382 /*
383 * Release dquots (and their references) if any.
384 * The inode should be locked EXCL except when this's called by
385 * xfs_ireclaim.
386 */
387 void
388 xfs_qm_dqdetach(
389 xfs_inode_t *ip)
390 {
391 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
392 return;
393
394 trace_xfs_dquot_dqdetach(ip);
395
396 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
397 if (ip->i_udquot) {
398 xfs_qm_dqrele(ip->i_udquot);
399 ip->i_udquot = NULL;
400 }
401 if (ip->i_gdquot) {
402 xfs_qm_dqrele(ip->i_gdquot);
403 ip->i_gdquot = NULL;
404 }
405 if (ip->i_pdquot) {
406 xfs_qm_dqrele(ip->i_pdquot);
407 ip->i_pdquot = NULL;
408 }
409 }
410
411 struct xfs_qm_isolate {
412 struct list_head buffers;
413 struct list_head dispose;
414 };
415
416 static enum lru_status
417 xfs_qm_dquot_isolate(
418 struct list_head *item,
419 struct list_lru_one *lru,
420 spinlock_t *lru_lock,
421 void *arg)
422 __releases(lru_lock) __acquires(lru_lock)
423 {
424 struct xfs_dquot *dqp = container_of(item,
425 struct xfs_dquot, q_lru);
426 struct xfs_qm_isolate *isol = arg;
427
428 if (!xfs_dqlock_nowait(dqp))
429 goto out_miss_busy;
430
431 /*
432 * This dquot has acquired a reference in the meantime remove it from
433 * the freelist and try again.
434 */
435 if (dqp->q_nrefs) {
436 xfs_dqunlock(dqp);
437 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
438
439 trace_xfs_dqreclaim_want(dqp);
440 list_lru_isolate(lru, &dqp->q_lru);
441 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
442 return LRU_REMOVED;
443 }
444
445 /*
446 * If the dquot is dirty, flush it. If it's already being flushed, just
447 * skip it so there is time for the IO to complete before we try to
448 * reclaim it again on the next LRU pass.
449 */
450 if (!xfs_dqflock_nowait(dqp)) {
451 xfs_dqunlock(dqp);
452 goto out_miss_busy;
453 }
454
455 if (XFS_DQ_IS_DIRTY(dqp)) {
456 struct xfs_buf *bp = NULL;
457 int error;
458
459 trace_xfs_dqreclaim_dirty(dqp);
460
461 /* we have to drop the LRU lock to flush the dquot */
462 spin_unlock(lru_lock);
463
464 error = xfs_qm_dqflush(dqp, &bp);
465 if (error)
466 goto out_unlock_dirty;
467
468 xfs_buf_delwri_queue(bp, &isol->buffers);
469 xfs_buf_relse(bp);
470 goto out_unlock_dirty;
471 }
472 xfs_dqfunlock(dqp);
473
474 /*
475 * Prevent lookups now that we are past the point of no return.
476 */
477 dqp->q_flags |= XFS_DQFLAG_FREEING;
478 xfs_dqunlock(dqp);
479
480 ASSERT(dqp->q_nrefs == 0);
481 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
482 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
483 trace_xfs_dqreclaim_done(dqp);
484 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
485 return LRU_REMOVED;
486
487 out_miss_busy:
488 trace_xfs_dqreclaim_busy(dqp);
489 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
490 return LRU_SKIP;
491
492 out_unlock_dirty:
493 trace_xfs_dqreclaim_busy(dqp);
494 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
495 xfs_dqunlock(dqp);
496 spin_lock(lru_lock);
497 return LRU_RETRY;
498 }
499
500 static unsigned long
501 xfs_qm_shrink_scan(
502 struct shrinker *shrink,
503 struct shrink_control *sc)
504 {
505 struct xfs_quotainfo *qi = container_of(shrink,
506 struct xfs_quotainfo, qi_shrinker);
507 struct xfs_qm_isolate isol;
508 unsigned long freed;
509 int error;
510
511 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
512 return 0;
513
514 INIT_LIST_HEAD(&isol.buffers);
515 INIT_LIST_HEAD(&isol.dispose);
516
517 freed = list_lru_shrink_walk(&qi->qi_lru, sc,
518 xfs_qm_dquot_isolate, &isol);
519
520 error = xfs_buf_delwri_submit(&isol.buffers);
521 if (error)
522 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
523
524 while (!list_empty(&isol.dispose)) {
525 struct xfs_dquot *dqp;
526
527 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
528 list_del_init(&dqp->q_lru);
529 xfs_qm_dqfree_one(dqp);
530 }
531
532 return freed;
533 }
534
535 static unsigned long
536 xfs_qm_shrink_count(
537 struct shrinker *shrink,
538 struct shrink_control *sc)
539 {
540 struct xfs_quotainfo *qi = container_of(shrink,
541 struct xfs_quotainfo, qi_shrinker);
542
543 return list_lru_shrink_count(&qi->qi_lru, sc);
544 }
545
546 STATIC void
547 xfs_qm_set_defquota(
548 struct xfs_mount *mp,
549 xfs_dqtype_t type,
550 struct xfs_quotainfo *qinf)
551 {
552 struct xfs_dquot *dqp;
553 struct xfs_def_quota *defq;
554 int error;
555
556 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
557 if (error)
558 return;
559
560 defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
561
562 /*
563 * Timers and warnings have been already set, let's just set the
564 * default limits for this quota type
565 */
566 defq->blk.hard = dqp->q_blk.hardlimit;
567 defq->blk.soft = dqp->q_blk.softlimit;
568 defq->ino.hard = dqp->q_ino.hardlimit;
569 defq->ino.soft = dqp->q_ino.softlimit;
570 defq->rtb.hard = dqp->q_rtb.hardlimit;
571 defq->rtb.soft = dqp->q_rtb.softlimit;
572 xfs_qm_dqdestroy(dqp);
573 }
574
575 /* Initialize quota time limits from the root dquot. */
576 static void
577 xfs_qm_init_timelimits(
578 struct xfs_mount *mp,
579 xfs_dqtype_t type)
580 {
581 struct xfs_quotainfo *qinf = mp->m_quotainfo;
582 struct xfs_def_quota *defq;
583 struct xfs_dquot *dqp;
584 int error;
585
586 defq = xfs_get_defquota(qinf, type);
587
588 defq->blk.time = XFS_QM_BTIMELIMIT;
589 defq->ino.time = XFS_QM_ITIMELIMIT;
590 defq->rtb.time = XFS_QM_RTBTIMELIMIT;
591 defq->blk.warn = XFS_QM_BWARNLIMIT;
592 defq->ino.warn = XFS_QM_IWARNLIMIT;
593 defq->rtb.warn = XFS_QM_RTBWARNLIMIT;
594
595 /*
596 * We try to get the limits from the superuser's limits fields.
597 * This is quite hacky, but it is standard quota practice.
598 *
599 * Since we may not have done a quotacheck by this point, just read
600 * the dquot without attaching it to any hashtables or lists.
601 */
602 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
603 if (error)
604 return;
605
606 /*
607 * The warnings and timers set the grace period given to
608 * a user or group before he or she can not perform any
609 * more writing. If it is zero, a default is used.
610 */
611 if (dqp->q_blk.timer)
612 defq->blk.time = dqp->q_blk.timer;
613 if (dqp->q_ino.timer)
614 defq->ino.time = dqp->q_ino.timer;
615 if (dqp->q_rtb.timer)
616 defq->rtb.time = dqp->q_rtb.timer;
617 if (dqp->q_blk.warnings)
618 defq->blk.warn = dqp->q_blk.warnings;
619 if (dqp->q_ino.warnings)
620 defq->ino.warn = dqp->q_ino.warnings;
621 if (dqp->q_rtb.warnings)
622 defq->rtb.warn = dqp->q_rtb.warnings;
623
624 xfs_qm_dqdestroy(dqp);
625 }
626
627 /*
628 * This initializes all the quota information that's kept in the
629 * mount structure
630 */
631 STATIC int
632 xfs_qm_init_quotainfo(
633 struct xfs_mount *mp)
634 {
635 struct xfs_quotainfo *qinf;
636 int error;
637
638 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
639
640 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
641
642 error = list_lru_init(&qinf->qi_lru);
643 if (error)
644 goto out_free_qinf;
645
646 /*
647 * See if quotainodes are setup, and if not, allocate them,
648 * and change the superblock accordingly.
649 */
650 error = xfs_qm_init_quotainos(mp);
651 if (error)
652 goto out_free_lru;
653
654 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
655 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
656 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
657 mutex_init(&qinf->qi_tree_lock);
658
659 /* mutex used to serialize quotaoffs */
660 mutex_init(&qinf->qi_quotaofflock);
661
662 /* Precalc some constants */
663 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
664 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
665 if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
666 qinf->qi_expiry_min =
667 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
668 qinf->qi_expiry_max =
669 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
670 } else {
671 qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
672 qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
673 }
674 trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
675 qinf->qi_expiry_max);
676
677 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
678
679 xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
680 xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
681 xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
682
683 if (XFS_IS_UQUOTA_RUNNING(mp))
684 xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
685 if (XFS_IS_GQUOTA_RUNNING(mp))
686 xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
687 if (XFS_IS_PQUOTA_RUNNING(mp))
688 xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
689
690 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
691 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
692 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
693 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
694
695 error = register_shrinker(&qinf->qi_shrinker);
696 if (error)
697 goto out_free_inos;
698
699 return 0;
700
701 out_free_inos:
702 mutex_destroy(&qinf->qi_quotaofflock);
703 mutex_destroy(&qinf->qi_tree_lock);
704 xfs_qm_destroy_quotainos(qinf);
705 out_free_lru:
706 list_lru_destroy(&qinf->qi_lru);
707 out_free_qinf:
708 kmem_free(qinf);
709 mp->m_quotainfo = NULL;
710 return error;
711 }
712
713 /*
714 * Gets called when unmounting a filesystem or when all quotas get
715 * turned off.
716 * This purges the quota inodes, destroys locks and frees itself.
717 */
718 void
719 xfs_qm_destroy_quotainfo(
720 struct xfs_mount *mp)
721 {
722 struct xfs_quotainfo *qi;
723
724 qi = mp->m_quotainfo;
725 ASSERT(qi != NULL);
726
727 unregister_shrinker(&qi->qi_shrinker);
728 list_lru_destroy(&qi->qi_lru);
729 xfs_qm_destroy_quotainos(qi);
730 mutex_destroy(&qi->qi_tree_lock);
731 mutex_destroy(&qi->qi_quotaofflock);
732 kmem_free(qi);
733 mp->m_quotainfo = NULL;
734 }
735
736 /*
737 * Create an inode and return with a reference already taken, but unlocked
738 * This is how we create quota inodes
739 */
740 STATIC int
741 xfs_qm_qino_alloc(
742 struct xfs_mount *mp,
743 struct xfs_inode **ipp,
744 unsigned int flags)
745 {
746 struct xfs_trans *tp;
747 int error;
748 bool need_alloc = true;
749
750 *ipp = NULL;
751 /*
752 * With superblock that doesn't have separate pquotino, we
753 * share an inode between gquota and pquota. If the on-disk
754 * superblock has GQUOTA and the filesystem is now mounted
755 * with PQUOTA, just use sb_gquotino for sb_pquotino and
756 * vice-versa.
757 */
758 if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
759 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
760 xfs_ino_t ino = NULLFSINO;
761
762 if ((flags & XFS_QMOPT_PQUOTA) &&
763 (mp->m_sb.sb_gquotino != NULLFSINO)) {
764 ino = mp->m_sb.sb_gquotino;
765 if (XFS_IS_CORRUPT(mp,
766 mp->m_sb.sb_pquotino != NULLFSINO))
767 return -EFSCORRUPTED;
768 } else if ((flags & XFS_QMOPT_GQUOTA) &&
769 (mp->m_sb.sb_pquotino != NULLFSINO)) {
770 ino = mp->m_sb.sb_pquotino;
771 if (XFS_IS_CORRUPT(mp,
772 mp->m_sb.sb_gquotino != NULLFSINO))
773 return -EFSCORRUPTED;
774 }
775 if (ino != NULLFSINO) {
776 error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
777 if (error)
778 return error;
779 mp->m_sb.sb_gquotino = NULLFSINO;
780 mp->m_sb.sb_pquotino = NULLFSINO;
781 need_alloc = false;
782 }
783 }
784
785 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
786 need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
787 0, 0, &tp);
788 if (error)
789 return error;
790
791 if (need_alloc) {
792 xfs_ino_t ino;
793
794 error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
795 if (!error)
796 error = xfs_init_new_inode(&init_user_ns, tp, NULL, ino,
797 S_IFREG, 1, 0, 0, false, ipp);
798 if (error) {
799 xfs_trans_cancel(tp);
800 return error;
801 }
802 }
803
804 /*
805 * Make the changes in the superblock, and log those too.
806 * sbfields arg may contain fields other than *QUOTINO;
807 * VERSIONNUM for example.
808 */
809 spin_lock(&mp->m_sb_lock);
810 if (flags & XFS_QMOPT_SBVERSION) {
811 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
812
813 xfs_sb_version_addquota(&mp->m_sb);
814 mp->m_sb.sb_uquotino = NULLFSINO;
815 mp->m_sb.sb_gquotino = NULLFSINO;
816 mp->m_sb.sb_pquotino = NULLFSINO;
817
818 /* qflags will get updated fully _after_ quotacheck */
819 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
820 }
821 if (flags & XFS_QMOPT_UQUOTA)
822 mp->m_sb.sb_uquotino = (*ipp)->i_ino;
823 else if (flags & XFS_QMOPT_GQUOTA)
824 mp->m_sb.sb_gquotino = (*ipp)->i_ino;
825 else
826 mp->m_sb.sb_pquotino = (*ipp)->i_ino;
827 spin_unlock(&mp->m_sb_lock);
828 xfs_log_sb(tp);
829
830 error = xfs_trans_commit(tp);
831 if (error) {
832 ASSERT(XFS_FORCED_SHUTDOWN(mp));
833 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
834 }
835 if (need_alloc)
836 xfs_finish_inode_setup(*ipp);
837 return error;
838 }
839
840
841 STATIC void
842 xfs_qm_reset_dqcounts(
843 struct xfs_mount *mp,
844 struct xfs_buf *bp,
845 xfs_dqid_t id,
846 xfs_dqtype_t type)
847 {
848 struct xfs_dqblk *dqb;
849 int j;
850
851 trace_xfs_reset_dqcounts(bp, _RET_IP_);
852
853 /*
854 * Reset all counters and timers. They'll be
855 * started afresh by xfs_qm_quotacheck.
856 */
857 #ifdef DEBUG
858 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
859 sizeof(xfs_dqblk_t);
860 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
861 #endif
862 dqb = bp->b_addr;
863 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
864 struct xfs_disk_dquot *ddq;
865
866 ddq = (struct xfs_disk_dquot *)&dqb[j];
867
868 /*
869 * Do a sanity check, and if needed, repair the dqblk. Don't
870 * output any warnings because it's perfectly possible to
871 * find uninitialised dquot blks. See comment in
872 * xfs_dquot_verify.
873 */
874 if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
875 (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
876 xfs_dqblk_repair(mp, &dqb[j], id + j, type);
877
878 /*
879 * Reset type in case we are reusing group quota file for
880 * project quotas or vice versa
881 */
882 ddq->d_type = type;
883 ddq->d_bcount = 0;
884 ddq->d_icount = 0;
885 ddq->d_rtbcount = 0;
886
887 /*
888 * dquot id 0 stores the default grace period and the maximum
889 * warning limit that were set by the administrator, so we
890 * should not reset them.
891 */
892 if (ddq->d_id != 0) {
893 ddq->d_btimer = 0;
894 ddq->d_itimer = 0;
895 ddq->d_rtbtimer = 0;
896 ddq->d_bwarns = 0;
897 ddq->d_iwarns = 0;
898 ddq->d_rtbwarns = 0;
899 if (xfs_sb_version_hasbigtime(&mp->m_sb))
900 ddq->d_type |= XFS_DQTYPE_BIGTIME;
901 }
902
903 if (xfs_sb_version_hascrc(&mp->m_sb)) {
904 xfs_update_cksum((char *)&dqb[j],
905 sizeof(struct xfs_dqblk),
906 XFS_DQUOT_CRC_OFF);
907 }
908 }
909 }
910
911 STATIC int
912 xfs_qm_reset_dqcounts_all(
913 struct xfs_mount *mp,
914 xfs_dqid_t firstid,
915 xfs_fsblock_t bno,
916 xfs_filblks_t blkcnt,
917 xfs_dqtype_t type,
918 struct list_head *buffer_list)
919 {
920 struct xfs_buf *bp;
921 int error = 0;
922
923 ASSERT(blkcnt > 0);
924
925 /*
926 * Blkcnt arg can be a very big number, and might even be
927 * larger than the log itself. So, we have to break it up into
928 * manageable-sized transactions.
929 * Note that we don't start a permanent transaction here; we might
930 * not be able to get a log reservation for the whole thing up front,
931 * and we don't really care to either, because we just discard
932 * everything if we were to crash in the middle of this loop.
933 */
934 while (blkcnt--) {
935 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
936 XFS_FSB_TO_DADDR(mp, bno),
937 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
938 &xfs_dquot_buf_ops);
939
940 /*
941 * CRC and validation errors will return a EFSCORRUPTED here. If
942 * this occurs, re-read without CRC validation so that we can
943 * repair the damage via xfs_qm_reset_dqcounts(). This process
944 * will leave a trace in the log indicating corruption has
945 * been detected.
946 */
947 if (error == -EFSCORRUPTED) {
948 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
949 XFS_FSB_TO_DADDR(mp, bno),
950 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
951 NULL);
952 }
953
954 if (error)
955 break;
956
957 /*
958 * A corrupt buffer might not have a verifier attached, so
959 * make sure we have the correct one attached before writeback
960 * occurs.
961 */
962 bp->b_ops = &xfs_dquot_buf_ops;
963 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
964 xfs_buf_delwri_queue(bp, buffer_list);
965 xfs_buf_relse(bp);
966
967 /* goto the next block. */
968 bno++;
969 firstid += mp->m_quotainfo->qi_dqperchunk;
970 }
971
972 return error;
973 }
974
975 /*
976 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
977 * counters for every chunk of dquots that we find.
978 */
979 STATIC int
980 xfs_qm_reset_dqcounts_buf(
981 struct xfs_mount *mp,
982 struct xfs_inode *qip,
983 xfs_dqtype_t type,
984 struct list_head *buffer_list)
985 {
986 struct xfs_bmbt_irec *map;
987 int i, nmaps; /* number of map entries */
988 int error; /* return value */
989 xfs_fileoff_t lblkno;
990 xfs_filblks_t maxlblkcnt;
991 xfs_dqid_t firstid;
992 xfs_fsblock_t rablkno;
993 xfs_filblks_t rablkcnt;
994
995 error = 0;
996 /*
997 * This looks racy, but we can't keep an inode lock across a
998 * trans_reserve. But, this gets called during quotacheck, and that
999 * happens only at mount time which is single threaded.
1000 */
1001 if (qip->i_nblocks == 0)
1002 return 0;
1003
1004 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
1005
1006 lblkno = 0;
1007 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1008 do {
1009 uint lock_mode;
1010
1011 nmaps = XFS_DQITER_MAP_SIZE;
1012 /*
1013 * We aren't changing the inode itself. Just changing
1014 * some of its data. No new blocks are added here, and
1015 * the inode is never added to the transaction.
1016 */
1017 lock_mode = xfs_ilock_data_map_shared(qip);
1018 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1019 map, &nmaps, 0);
1020 xfs_iunlock(qip, lock_mode);
1021 if (error)
1022 break;
1023
1024 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1025 for (i = 0; i < nmaps; i++) {
1026 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1027 ASSERT(map[i].br_blockcount);
1028
1029
1030 lblkno += map[i].br_blockcount;
1031
1032 if (map[i].br_startblock == HOLESTARTBLOCK)
1033 continue;
1034
1035 firstid = (xfs_dqid_t) map[i].br_startoff *
1036 mp->m_quotainfo->qi_dqperchunk;
1037 /*
1038 * Do a read-ahead on the next extent.
1039 */
1040 if ((i+1 < nmaps) &&
1041 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1042 rablkcnt = map[i+1].br_blockcount;
1043 rablkno = map[i+1].br_startblock;
1044 while (rablkcnt--) {
1045 xfs_buf_readahead(mp->m_ddev_targp,
1046 XFS_FSB_TO_DADDR(mp, rablkno),
1047 mp->m_quotainfo->qi_dqchunklen,
1048 &xfs_dquot_buf_ops);
1049 rablkno++;
1050 }
1051 }
1052 /*
1053 * Iterate thru all the blks in the extent and
1054 * reset the counters of all the dquots inside them.
1055 */
1056 error = xfs_qm_reset_dqcounts_all(mp, firstid,
1057 map[i].br_startblock,
1058 map[i].br_blockcount,
1059 type, buffer_list);
1060 if (error)
1061 goto out;
1062 }
1063 } while (nmaps > 0);
1064
1065 out:
1066 kmem_free(map);
1067 return error;
1068 }
1069
1070 /*
1071 * Called by dqusage_adjust in doing a quotacheck.
1072 *
1073 * Given the inode, and a dquot id this updates both the incore dqout as well
1074 * as the buffer copy. This is so that once the quotacheck is done, we can
1075 * just log all the buffers, as opposed to logging numerous updates to
1076 * individual dquots.
1077 */
1078 STATIC int
1079 xfs_qm_quotacheck_dqadjust(
1080 struct xfs_inode *ip,
1081 xfs_dqtype_t type,
1082 xfs_qcnt_t nblks,
1083 xfs_qcnt_t rtblks)
1084 {
1085 struct xfs_mount *mp = ip->i_mount;
1086 struct xfs_dquot *dqp;
1087 xfs_dqid_t id;
1088 int error;
1089
1090 id = xfs_qm_id_for_quotatype(ip, type);
1091 error = xfs_qm_dqget(mp, id, type, true, &dqp);
1092 if (error) {
1093 /*
1094 * Shouldn't be able to turn off quotas here.
1095 */
1096 ASSERT(error != -ESRCH);
1097 ASSERT(error != -ENOENT);
1098 return error;
1099 }
1100
1101 trace_xfs_dqadjust(dqp);
1102
1103 /*
1104 * Adjust the inode count and the block count to reflect this inode's
1105 * resource usage.
1106 */
1107 dqp->q_ino.count++;
1108 dqp->q_ino.reserved++;
1109 if (nblks) {
1110 dqp->q_blk.count += nblks;
1111 dqp->q_blk.reserved += nblks;
1112 }
1113 if (rtblks) {
1114 dqp->q_rtb.count += rtblks;
1115 dqp->q_rtb.reserved += rtblks;
1116 }
1117
1118 /*
1119 * Set default limits, adjust timers (since we changed usages)
1120 *
1121 * There are no timers for the default values set in the root dquot.
1122 */
1123 if (dqp->q_id) {
1124 xfs_qm_adjust_dqlimits(dqp);
1125 xfs_qm_adjust_dqtimers(dqp);
1126 }
1127
1128 dqp->q_flags |= XFS_DQFLAG_DIRTY;
1129 xfs_qm_dqput(dqp);
1130 return 0;
1131 }
1132
1133 /*
1134 * callback routine supplied to bulkstat(). Given an inumber, find its
1135 * dquots and update them to account for resources taken by that inode.
1136 */
1137 /* ARGSUSED */
1138 STATIC int
1139 xfs_qm_dqusage_adjust(
1140 struct xfs_mount *mp,
1141 struct xfs_trans *tp,
1142 xfs_ino_t ino,
1143 void *data)
1144 {
1145 struct xfs_inode *ip;
1146 xfs_qcnt_t nblks;
1147 xfs_filblks_t rtblks = 0; /* total rt blks */
1148 int error;
1149
1150 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1151
1152 /*
1153 * rootino must have its resources accounted for, not so with the quota
1154 * inodes.
1155 */
1156 if (xfs_is_quota_inode(&mp->m_sb, ino))
1157 return 0;
1158
1159 /*
1160 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1161 * at mount time and therefore nobody will be racing chown/chproj.
1162 */
1163 error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1164 if (error == -EINVAL || error == -ENOENT)
1165 return 0;
1166 if (error)
1167 return error;
1168
1169 ASSERT(ip->i_delayed_blks == 0);
1170
1171 if (XFS_IS_REALTIME_INODE(ip)) {
1172 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1173
1174 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1175 if (error)
1176 goto error0;
1177
1178 xfs_bmap_count_leaves(ifp, &rtblks);
1179 }
1180
1181 nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1182
1183 /*
1184 * Add the (disk blocks and inode) resources occupied by this
1185 * inode to its dquots. We do this adjustment in the incore dquot,
1186 * and also copy the changes to its buffer.
1187 * We don't care about putting these changes in a transaction
1188 * envelope because if we crash in the middle of a 'quotacheck'
1189 * we have to start from the beginning anyway.
1190 * Once we're done, we'll log all the dquot bufs.
1191 *
1192 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1193 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1194 */
1195 if (XFS_IS_UQUOTA_ON(mp)) {
1196 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1197 rtblks);
1198 if (error)
1199 goto error0;
1200 }
1201
1202 if (XFS_IS_GQUOTA_ON(mp)) {
1203 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1204 rtblks);
1205 if (error)
1206 goto error0;
1207 }
1208
1209 if (XFS_IS_PQUOTA_ON(mp)) {
1210 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1211 rtblks);
1212 if (error)
1213 goto error0;
1214 }
1215
1216 error0:
1217 xfs_irele(ip);
1218 return error;
1219 }
1220
1221 STATIC int
1222 xfs_qm_flush_one(
1223 struct xfs_dquot *dqp,
1224 void *data)
1225 {
1226 struct xfs_mount *mp = dqp->q_mount;
1227 struct list_head *buffer_list = data;
1228 struct xfs_buf *bp = NULL;
1229 int error = 0;
1230
1231 xfs_dqlock(dqp);
1232 if (dqp->q_flags & XFS_DQFLAG_FREEING)
1233 goto out_unlock;
1234 if (!XFS_DQ_IS_DIRTY(dqp))
1235 goto out_unlock;
1236
1237 /*
1238 * The only way the dquot is already flush locked by the time quotacheck
1239 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1240 * it for the final time. Quotacheck collects all dquot bufs in the
1241 * local delwri queue before dquots are dirtied, so reclaim can't have
1242 * possibly queued it for I/O. The only way out is to push the buffer to
1243 * cycle the flush lock.
1244 */
1245 if (!xfs_dqflock_nowait(dqp)) {
1246 /* buf is pinned in-core by delwri list */
1247 bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1248 mp->m_quotainfo->qi_dqchunklen, 0);
1249 if (!bp) {
1250 error = -EINVAL;
1251 goto out_unlock;
1252 }
1253 xfs_buf_unlock(bp);
1254
1255 xfs_buf_delwri_pushbuf(bp, buffer_list);
1256 xfs_buf_rele(bp);
1257
1258 error = -EAGAIN;
1259 goto out_unlock;
1260 }
1261
1262 error = xfs_qm_dqflush(dqp, &bp);
1263 if (error)
1264 goto out_unlock;
1265
1266 xfs_buf_delwri_queue(bp, buffer_list);
1267 xfs_buf_relse(bp);
1268 out_unlock:
1269 xfs_dqunlock(dqp);
1270 return error;
1271 }
1272
1273 /*
1274 * Walk thru all the filesystem inodes and construct a consistent view
1275 * of the disk quota world. If the quotacheck fails, disable quotas.
1276 */
1277 STATIC int
1278 xfs_qm_quotacheck(
1279 xfs_mount_t *mp)
1280 {
1281 int error, error2;
1282 uint flags;
1283 LIST_HEAD (buffer_list);
1284 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1285 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1286 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1287
1288 flags = 0;
1289
1290 ASSERT(uip || gip || pip);
1291 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1292
1293 xfs_notice(mp, "Quotacheck needed: Please wait.");
1294
1295 /*
1296 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1297 * their counters to zero. We need a clean slate.
1298 * We don't log our changes till later.
1299 */
1300 if (uip) {
1301 error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1302 &buffer_list);
1303 if (error)
1304 goto error_return;
1305 flags |= XFS_UQUOTA_CHKD;
1306 }
1307
1308 if (gip) {
1309 error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1310 &buffer_list);
1311 if (error)
1312 goto error_return;
1313 flags |= XFS_GQUOTA_CHKD;
1314 }
1315
1316 if (pip) {
1317 error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1318 &buffer_list);
1319 if (error)
1320 goto error_return;
1321 flags |= XFS_PQUOTA_CHKD;
1322 }
1323
1324 error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1325 NULL);
1326 if (error)
1327 goto error_return;
1328
1329 /*
1330 * We've made all the changes that we need to make incore. Flush them
1331 * down to disk buffers if everything was updated successfully.
1332 */
1333 if (XFS_IS_UQUOTA_ON(mp)) {
1334 error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1335 &buffer_list);
1336 }
1337 if (XFS_IS_GQUOTA_ON(mp)) {
1338 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1339 &buffer_list);
1340 if (!error)
1341 error = error2;
1342 }
1343 if (XFS_IS_PQUOTA_ON(mp)) {
1344 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1345 &buffer_list);
1346 if (!error)
1347 error = error2;
1348 }
1349
1350 error2 = xfs_buf_delwri_submit(&buffer_list);
1351 if (!error)
1352 error = error2;
1353
1354 /*
1355 * We can get this error if we couldn't do a dquot allocation inside
1356 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1357 * dirty dquots that might be cached, we just want to get rid of them
1358 * and turn quotaoff. The dquots won't be attached to any of the inodes
1359 * at this point (because we intentionally didn't in dqget_noattach).
1360 */
1361 if (error) {
1362 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1363 goto error_return;
1364 }
1365
1366 /*
1367 * If one type of quotas is off, then it will lose its
1368 * quotachecked status, since we won't be doing accounting for
1369 * that type anymore.
1370 */
1371 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1372 mp->m_qflags |= flags;
1373
1374 error_return:
1375 xfs_buf_delwri_cancel(&buffer_list);
1376
1377 if (error) {
1378 xfs_warn(mp,
1379 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1380 error);
1381 /*
1382 * We must turn off quotas.
1383 */
1384 ASSERT(mp->m_quotainfo != NULL);
1385 xfs_qm_destroy_quotainfo(mp);
1386 if (xfs_mount_reset_sbqflags(mp)) {
1387 xfs_warn(mp,
1388 "Quotacheck: Failed to reset quota flags.");
1389 }
1390 } else
1391 xfs_notice(mp, "Quotacheck: Done.");
1392 return error;
1393 }
1394
1395 /*
1396 * This is called from xfs_mountfs to start quotas and initialize all
1397 * necessary data structures like quotainfo. This is also responsible for
1398 * running a quotacheck as necessary. We are guaranteed that the superblock
1399 * is consistently read in at this point.
1400 *
1401 * If we fail here, the mount will continue with quota turned off. We don't
1402 * need to inidicate success or failure at all.
1403 */
1404 void
1405 xfs_qm_mount_quotas(
1406 struct xfs_mount *mp)
1407 {
1408 int error = 0;
1409 uint sbf;
1410
1411 /*
1412 * If quotas on realtime volumes is not supported, we disable
1413 * quotas immediately.
1414 */
1415 if (mp->m_sb.sb_rextents) {
1416 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1417 mp->m_qflags = 0;
1418 goto write_changes;
1419 }
1420
1421 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1422
1423 /*
1424 * Allocate the quotainfo structure inside the mount struct, and
1425 * create quotainode(s), and change/rev superblock if necessary.
1426 */
1427 error = xfs_qm_init_quotainfo(mp);
1428 if (error) {
1429 /*
1430 * We must turn off quotas.
1431 */
1432 ASSERT(mp->m_quotainfo == NULL);
1433 mp->m_qflags = 0;
1434 goto write_changes;
1435 }
1436 /*
1437 * If any of the quotas are not consistent, do a quotacheck.
1438 */
1439 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1440 error = xfs_qm_quotacheck(mp);
1441 if (error) {
1442 /* Quotacheck failed and disabled quotas. */
1443 return;
1444 }
1445 }
1446 /*
1447 * If one type of quotas is off, then it will lose its
1448 * quotachecked status, since we won't be doing accounting for
1449 * that type anymore.
1450 */
1451 if (!XFS_IS_UQUOTA_ON(mp))
1452 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1453 if (!XFS_IS_GQUOTA_ON(mp))
1454 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1455 if (!XFS_IS_PQUOTA_ON(mp))
1456 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1457
1458 write_changes:
1459 /*
1460 * We actually don't have to acquire the m_sb_lock at all.
1461 * This can only be called from mount, and that's single threaded. XXX
1462 */
1463 spin_lock(&mp->m_sb_lock);
1464 sbf = mp->m_sb.sb_qflags;
1465 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1466 spin_unlock(&mp->m_sb_lock);
1467
1468 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1469 if (xfs_sync_sb(mp, false)) {
1470 /*
1471 * We could only have been turning quotas off.
1472 * We aren't in very good shape actually because
1473 * the incore structures are convinced that quotas are
1474 * off, but the on disk superblock doesn't know that !
1475 */
1476 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1477 xfs_alert(mp, "%s: Superblock update failed!",
1478 __func__);
1479 }
1480 }
1481
1482 if (error) {
1483 xfs_warn(mp, "Failed to initialize disk quotas.");
1484 return;
1485 }
1486 }
1487
1488 /*
1489 * This is called after the superblock has been read in and we're ready to
1490 * iget the quota inodes.
1491 */
1492 STATIC int
1493 xfs_qm_init_quotainos(
1494 xfs_mount_t *mp)
1495 {
1496 struct xfs_inode *uip = NULL;
1497 struct xfs_inode *gip = NULL;
1498 struct xfs_inode *pip = NULL;
1499 int error;
1500 uint flags = 0;
1501
1502 ASSERT(mp->m_quotainfo);
1503
1504 /*
1505 * Get the uquota and gquota inodes
1506 */
1507 if (xfs_sb_version_hasquota(&mp->m_sb)) {
1508 if (XFS_IS_UQUOTA_ON(mp) &&
1509 mp->m_sb.sb_uquotino != NULLFSINO) {
1510 ASSERT(mp->m_sb.sb_uquotino > 0);
1511 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1512 0, 0, &uip);
1513 if (error)
1514 return error;
1515 }
1516 if (XFS_IS_GQUOTA_ON(mp) &&
1517 mp->m_sb.sb_gquotino != NULLFSINO) {
1518 ASSERT(mp->m_sb.sb_gquotino > 0);
1519 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1520 0, 0, &gip);
1521 if (error)
1522 goto error_rele;
1523 }
1524 if (XFS_IS_PQUOTA_ON(mp) &&
1525 mp->m_sb.sb_pquotino != NULLFSINO) {
1526 ASSERT(mp->m_sb.sb_pquotino > 0);
1527 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1528 0, 0, &pip);
1529 if (error)
1530 goto error_rele;
1531 }
1532 } else {
1533 flags |= XFS_QMOPT_SBVERSION;
1534 }
1535
1536 /*
1537 * Create the three inodes, if they don't exist already. The changes
1538 * made above will get added to a transaction and logged in one of
1539 * the qino_alloc calls below. If the device is readonly,
1540 * temporarily switch to read-write to do this.
1541 */
1542 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1543 error = xfs_qm_qino_alloc(mp, &uip,
1544 flags | XFS_QMOPT_UQUOTA);
1545 if (error)
1546 goto error_rele;
1547
1548 flags &= ~XFS_QMOPT_SBVERSION;
1549 }
1550 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1551 error = xfs_qm_qino_alloc(mp, &gip,
1552 flags | XFS_QMOPT_GQUOTA);
1553 if (error)
1554 goto error_rele;
1555
1556 flags &= ~XFS_QMOPT_SBVERSION;
1557 }
1558 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1559 error = xfs_qm_qino_alloc(mp, &pip,
1560 flags | XFS_QMOPT_PQUOTA);
1561 if (error)
1562 goto error_rele;
1563 }
1564
1565 mp->m_quotainfo->qi_uquotaip = uip;
1566 mp->m_quotainfo->qi_gquotaip = gip;
1567 mp->m_quotainfo->qi_pquotaip = pip;
1568
1569 return 0;
1570
1571 error_rele:
1572 if (uip)
1573 xfs_irele(uip);
1574 if (gip)
1575 xfs_irele(gip);
1576 if (pip)
1577 xfs_irele(pip);
1578 return error;
1579 }
1580
1581 STATIC void
1582 xfs_qm_destroy_quotainos(
1583 struct xfs_quotainfo *qi)
1584 {
1585 if (qi->qi_uquotaip) {
1586 xfs_irele(qi->qi_uquotaip);
1587 qi->qi_uquotaip = NULL; /* paranoia */
1588 }
1589 if (qi->qi_gquotaip) {
1590 xfs_irele(qi->qi_gquotaip);
1591 qi->qi_gquotaip = NULL;
1592 }
1593 if (qi->qi_pquotaip) {
1594 xfs_irele(qi->qi_pquotaip);
1595 qi->qi_pquotaip = NULL;
1596 }
1597 }
1598
1599 STATIC void
1600 xfs_qm_dqfree_one(
1601 struct xfs_dquot *dqp)
1602 {
1603 struct xfs_mount *mp = dqp->q_mount;
1604 struct xfs_quotainfo *qi = mp->m_quotainfo;
1605
1606 mutex_lock(&qi->qi_tree_lock);
1607 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1608
1609 qi->qi_dquots--;
1610 mutex_unlock(&qi->qi_tree_lock);
1611
1612 xfs_qm_dqdestroy(dqp);
1613 }
1614
1615 /* --------------- utility functions for vnodeops ---------------- */
1616
1617
1618 /*
1619 * Given an inode, a uid, gid and prid make sure that we have
1620 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1621 * quotas by creating this file.
1622 * This also attaches dquot(s) to the given inode after locking it,
1623 * and returns the dquots corresponding to the uid and/or gid.
1624 *
1625 * in : inode (unlocked)
1626 * out : udquot, gdquot with references taken and unlocked
1627 */
1628 int
1629 xfs_qm_vop_dqalloc(
1630 struct xfs_inode *ip,
1631 kuid_t uid,
1632 kgid_t gid,
1633 prid_t prid,
1634 uint flags,
1635 struct xfs_dquot **O_udqpp,
1636 struct xfs_dquot **O_gdqpp,
1637 struct xfs_dquot **O_pdqpp)
1638 {
1639 struct xfs_mount *mp = ip->i_mount;
1640 struct inode *inode = VFS_I(ip);
1641 struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1642 struct xfs_dquot *uq = NULL;
1643 struct xfs_dquot *gq = NULL;
1644 struct xfs_dquot *pq = NULL;
1645 int error;
1646 uint lockflags;
1647
1648 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1649 return 0;
1650
1651 lockflags = XFS_ILOCK_EXCL;
1652 xfs_ilock(ip, lockflags);
1653
1654 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1655 gid = inode->i_gid;
1656
1657 /*
1658 * Attach the dquot(s) to this inode, doing a dquot allocation
1659 * if necessary. The dquot(s) will not be locked.
1660 */
1661 if (XFS_NOT_DQATTACHED(mp, ip)) {
1662 error = xfs_qm_dqattach_locked(ip, true);
1663 if (error) {
1664 xfs_iunlock(ip, lockflags);
1665 return error;
1666 }
1667 }
1668
1669 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1670 ASSERT(O_udqpp);
1671 if (!uid_eq(inode->i_uid, uid)) {
1672 /*
1673 * What we need is the dquot that has this uid, and
1674 * if we send the inode to dqget, the uid of the inode
1675 * takes priority over what's sent in the uid argument.
1676 * We must unlock inode here before calling dqget if
1677 * we're not sending the inode, because otherwise
1678 * we'll deadlock by doing trans_reserve while
1679 * holding ilock.
1680 */
1681 xfs_iunlock(ip, lockflags);
1682 error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1683 XFS_DQTYPE_USER, true, &uq);
1684 if (error) {
1685 ASSERT(error != -ENOENT);
1686 return error;
1687 }
1688 /*
1689 * Get the ilock in the right order.
1690 */
1691 xfs_dqunlock(uq);
1692 lockflags = XFS_ILOCK_SHARED;
1693 xfs_ilock(ip, lockflags);
1694 } else {
1695 /*
1696 * Take an extra reference, because we'll return
1697 * this to caller
1698 */
1699 ASSERT(ip->i_udquot);
1700 uq = xfs_qm_dqhold(ip->i_udquot);
1701 }
1702 }
1703 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1704 ASSERT(O_gdqpp);
1705 if (!gid_eq(inode->i_gid, gid)) {
1706 xfs_iunlock(ip, lockflags);
1707 error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1708 XFS_DQTYPE_GROUP, true, &gq);
1709 if (error) {
1710 ASSERT(error != -ENOENT);
1711 goto error_rele;
1712 }
1713 xfs_dqunlock(gq);
1714 lockflags = XFS_ILOCK_SHARED;
1715 xfs_ilock(ip, lockflags);
1716 } else {
1717 ASSERT(ip->i_gdquot);
1718 gq = xfs_qm_dqhold(ip->i_gdquot);
1719 }
1720 }
1721 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1722 ASSERT(O_pdqpp);
1723 if (ip->i_projid != prid) {
1724 xfs_iunlock(ip, lockflags);
1725 error = xfs_qm_dqget(mp, prid,
1726 XFS_DQTYPE_PROJ, true, &pq);
1727 if (error) {
1728 ASSERT(error != -ENOENT);
1729 goto error_rele;
1730 }
1731 xfs_dqunlock(pq);
1732 lockflags = XFS_ILOCK_SHARED;
1733 xfs_ilock(ip, lockflags);
1734 } else {
1735 ASSERT(ip->i_pdquot);
1736 pq = xfs_qm_dqhold(ip->i_pdquot);
1737 }
1738 }
1739 trace_xfs_dquot_dqalloc(ip);
1740
1741 xfs_iunlock(ip, lockflags);
1742 if (O_udqpp)
1743 *O_udqpp = uq;
1744 else
1745 xfs_qm_dqrele(uq);
1746 if (O_gdqpp)
1747 *O_gdqpp = gq;
1748 else
1749 xfs_qm_dqrele(gq);
1750 if (O_pdqpp)
1751 *O_pdqpp = pq;
1752 else
1753 xfs_qm_dqrele(pq);
1754 return 0;
1755
1756 error_rele:
1757 xfs_qm_dqrele(gq);
1758 xfs_qm_dqrele(uq);
1759 return error;
1760 }
1761
1762 /*
1763 * Actually transfer ownership, and do dquot modifications.
1764 * These were already reserved.
1765 */
1766 struct xfs_dquot *
1767 xfs_qm_vop_chown(
1768 struct xfs_trans *tp,
1769 struct xfs_inode *ip,
1770 struct xfs_dquot **IO_olddq,
1771 struct xfs_dquot *newdq)
1772 {
1773 struct xfs_dquot *prevdq;
1774 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1775 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1776
1777
1778 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1779 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1780
1781 /* old dquot */
1782 prevdq = *IO_olddq;
1783 ASSERT(prevdq);
1784 ASSERT(prevdq != newdq);
1785
1786 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
1787 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1788
1789 /* the sparkling new dquot */
1790 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
1791 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1792
1793 /*
1794 * Back when we made quota reservations for the chown, we reserved the
1795 * ondisk blocks + delalloc blocks with the new dquot. Now that we've
1796 * switched the dquots, decrease the new dquot's block reservation
1797 * (having already bumped up the real counter) so that we don't have
1798 * any reservation to give back when we commit.
1799 */
1800 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1801 -ip->i_delayed_blks);
1802
1803 /*
1804 * Give the incore reservation for delalloc blocks back to the old
1805 * dquot. We don't normally handle delalloc quota reservations
1806 * transactionally, so just lock the dquot and subtract from the
1807 * reservation. Dirty the transaction because it's too late to turn
1808 * back now.
1809 */
1810 tp->t_flags |= XFS_TRANS_DIRTY;
1811 xfs_dqlock(prevdq);
1812 ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1813 prevdq->q_blk.reserved -= ip->i_delayed_blks;
1814 xfs_dqunlock(prevdq);
1815
1816 /*
1817 * Take an extra reference, because the inode is going to keep
1818 * this dquot pointer even after the trans_commit.
1819 */
1820 *IO_olddq = xfs_qm_dqhold(newdq);
1821
1822 return prevdq;
1823 }
1824
1825 int
1826 xfs_qm_vop_rename_dqattach(
1827 struct xfs_inode **i_tab)
1828 {
1829 struct xfs_mount *mp = i_tab[0]->i_mount;
1830 int i;
1831
1832 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1833 return 0;
1834
1835 for (i = 0; (i < 4 && i_tab[i]); i++) {
1836 struct xfs_inode *ip = i_tab[i];
1837 int error;
1838
1839 /*
1840 * Watch out for duplicate entries in the table.
1841 */
1842 if (i == 0 || ip != i_tab[i-1]) {
1843 if (XFS_NOT_DQATTACHED(mp, ip)) {
1844 error = xfs_qm_dqattach(ip);
1845 if (error)
1846 return error;
1847 }
1848 }
1849 }
1850 return 0;
1851 }
1852
1853 void
1854 xfs_qm_vop_create_dqattach(
1855 struct xfs_trans *tp,
1856 struct xfs_inode *ip,
1857 struct xfs_dquot *udqp,
1858 struct xfs_dquot *gdqp,
1859 struct xfs_dquot *pdqp)
1860 {
1861 struct xfs_mount *mp = tp->t_mountp;
1862
1863 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1864 return;
1865
1866 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1867
1868 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1869 ASSERT(ip->i_udquot == NULL);
1870 ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1871
1872 ip->i_udquot = xfs_qm_dqhold(udqp);
1873 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1874 }
1875 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1876 ASSERT(ip->i_gdquot == NULL);
1877 ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1878
1879 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1880 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1881 }
1882 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1883 ASSERT(ip->i_pdquot == NULL);
1884 ASSERT(ip->i_projid == pdqp->q_id);
1885
1886 ip->i_pdquot = xfs_qm_dqhold(pdqp);
1887 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1888 }
1889 }
1890