]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/xfs/xfs_trans_dquot.c
xfs: assume the default quota limits are always set in xfs_qm_adjust_dqlimits
[mirror_ubuntu-jammy-kernel.git] / fs / xfs / xfs_trans_dquot.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
4ce3121f
NS
3 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
4 * All Rights Reserved.
1da177e4 5 */
1da177e4
LT
6#include "xfs.h"
7#include "xfs_fs.h"
70a9883c 8#include "xfs_shared.h"
239880ef
DC
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
1da177e4 12#include "xfs_mount.h"
1da177e4 13#include "xfs_inode.h"
239880ef 14#include "xfs_trans.h"
1da177e4 15#include "xfs_trans_priv.h"
a4fbe6ab 16#include "xfs_quota.h"
1da177e4
LT
17#include "xfs_qm.h"
18
19STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
20
21/*
22 * Add the locked dquot to the transaction.
23 * The dquot must be locked, and it cannot be associated with any
24 * transaction.
25 */
26void
27xfs_trans_dqjoin(
aefe69a4
PR
28 struct xfs_trans *tp,
29 struct xfs_dquot *dqp)
1da177e4 30{
1da177e4 31 ASSERT(XFS_DQ_IS_LOCKED(dqp));
e98c414f 32 ASSERT(dqp->q_logitem.qli_dquot == dqp);
1da177e4
LT
33
34 /*
35 * Get a log_item_desc to point at the new item.
36 */
e98c414f 37 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
1da177e4
LT
38}
39
1da177e4
LT
40/*
41 * This is called to mark the dquot as needing
42 * to be logged when the transaction is committed. The dquot must
43 * already be associated with the given transaction.
44 * Note that it marks the entire transaction as dirty. In the ordinary
45 * case, this gets called via xfs_trans_commit, after the transaction
46 * is already dirty. However, there's nothing stop this from getting
47 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
48 * flag.
49 */
50void
51xfs_trans_log_dquot(
aefe69a4
PR
52 struct xfs_trans *tp,
53 struct xfs_dquot *dqp)
1da177e4 54{
1da177e4
LT
55 ASSERT(XFS_DQ_IS_LOCKED(dqp));
56
1da177e4 57 tp->t_flags |= XFS_TRANS_DIRTY;
e6631f85 58 set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
1da177e4
LT
59}
60
61/*
62 * Carry forward whatever is left of the quota blk reservation to
63 * the spanky new transaction
64 */
7d095257 65void
1da177e4 66xfs_trans_dup_dqinfo(
078f4a7d
DW
67 struct xfs_trans *otp,
68 struct xfs_trans *ntp)
1da177e4 69{
078f4a7d
DW
70 struct xfs_dqtrx *oq, *nq;
71 int i, j;
72 struct xfs_dqtrx *oqa, *nqa;
73 uint64_t blk_res_used;
1da177e4
LT
74
75 if (!otp->t_dqinfo)
76 return;
77
78 xfs_trans_alloc_dqinfo(ntp);
1da177e4
LT
79
80 /*
81 * Because the quota blk reservation is carried forward,
82 * it is also necessary to carry forward the DQ_DIRTY flag.
83 */
339e4f66 84 if (otp->t_flags & XFS_TRANS_DQ_DIRTY)
1da177e4
LT
85 ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
86
0e6436d9
CS
87 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
88 oqa = otp->t_dqinfo->dqs[j];
89 nqa = ntp->t_dqinfo->dqs[j];
1da177e4 90 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
7f884dc1
BF
91 blk_res_used = 0;
92
1da177e4
LT
93 if (oqa[i].qt_dquot == NULL)
94 break;
95 oq = &oqa[i];
96 nq = &nqa[i];
97
7f884dc1
BF
98 if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
99 blk_res_used = oq->qt_bcount_delta;
100
1da177e4
LT
101 nq->qt_dquot = oq->qt_dquot;
102 nq->qt_bcount_delta = nq->qt_icount_delta = 0;
103 nq->qt_rtbcount_delta = 0;
104
105 /*
106 * Transfer whatever is left of the reservations.
107 */
7f884dc1
BF
108 nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
109 oq->qt_blk_res = blk_res_used;
1da177e4
LT
110
111 nq->qt_rtblk_res = oq->qt_rtblk_res -
112 oq->qt_rtblk_res_used;
113 oq->qt_rtblk_res = oq->qt_rtblk_res_used;
114
115 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
116 oq->qt_ino_res = oq->qt_ino_res_used;
117
118 }
1da177e4
LT
119 }
120}
121
122/*
123 * Wrap around mod_dquot to account for both user and group quotas.
124 */
7d095257 125void
1da177e4
LT
126xfs_trans_mod_dquot_byino(
127 xfs_trans_t *tp,
128 xfs_inode_t *ip,
129 uint field,
903b1fc2 130 int64_t delta)
1da177e4 131{
7d095257 132 xfs_mount_t *mp = tp->t_mountp;
1da177e4 133
7d095257
CH
134 if (!XFS_IS_QUOTA_RUNNING(mp) ||
135 !XFS_IS_QUOTA_ON(mp) ||
9cad19d2 136 xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
1da177e4
LT
137 return;
138
139 if (tp->t_dqinfo == NULL)
140 xfs_trans_alloc_dqinfo(tp);
141
c8ad20ff 142 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
1da177e4 143 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
92f8ff73 144 if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
1da177e4 145 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
92f8ff73
CS
146 if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
147 (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
1da177e4
LT
148}
149
113a5683 150STATIC struct xfs_dqtrx *
1da177e4 151xfs_trans_get_dqtrx(
113a5683
CS
152 struct xfs_trans *tp,
153 struct xfs_dquot *dqp)
1da177e4 154{
113a5683
CS
155 int i;
156 struct xfs_dqtrx *qa;
1da177e4 157
0e6436d9
CS
158 if (XFS_QM_ISUDQ(dqp))
159 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
92f8ff73 160 else if (XFS_QM_ISGDQ(dqp))
0e6436d9 161 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
92f8ff73
CS
162 else if (XFS_QM_ISPDQ(dqp))
163 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
164 else
165 return NULL;
1da177e4 166
191f8488 167 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
1da177e4 168 if (qa[i].qt_dquot == NULL ||
191f8488
CH
169 qa[i].qt_dquot == dqp)
170 return &qa[i];
1da177e4
LT
171 }
172
191f8488 173 return NULL;
1da177e4
LT
174}
175
176/*
177 * Make the changes in the transaction structure.
178 * The moral equivalent to xfs_trans_mod_sb().
179 * We don't touch any fields in the dquot, so we don't care
180 * if it's locked or not (most of the time it won't be).
181 */
182void
183xfs_trans_mod_dquot(
078f4a7d
DW
184 struct xfs_trans *tp,
185 struct xfs_dquot *dqp,
186 uint field,
187 int64_t delta)
1da177e4 188{
078f4a7d 189 struct xfs_dqtrx *qtrx;
1da177e4
LT
190
191 ASSERT(tp);
7d095257 192 ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
1da177e4
LT
193 qtrx = NULL;
194
195 if (tp->t_dqinfo == NULL)
196 xfs_trans_alloc_dqinfo(tp);
197 /*
198 * Find either the first free slot or the slot that belongs
199 * to this dquot.
200 */
201 qtrx = xfs_trans_get_dqtrx(tp, dqp);
202 ASSERT(qtrx);
203 if (qtrx->qt_dquot == NULL)
204 qtrx->qt_dquot = dqp;
205
206 switch (field) {
207
208 /*
209 * regular disk blk reservation
210 */
211 case XFS_TRANS_DQ_RES_BLKS:
903b1fc2 212 qtrx->qt_blk_res += delta;
1da177e4
LT
213 break;
214
215 /*
216 * inode reservation
217 */
218 case XFS_TRANS_DQ_RES_INOS:
903b1fc2 219 qtrx->qt_ino_res += delta;
1da177e4
LT
220 break;
221
222 /*
223 * disk blocks used.
224 */
225 case XFS_TRANS_DQ_BCOUNT:
1da177e4
LT
226 qtrx->qt_bcount_delta += delta;
227 break;
228
229 case XFS_TRANS_DQ_DELBCOUNT:
230 qtrx->qt_delbcnt_delta += delta;
231 break;
232
233 /*
234 * Inode Count
235 */
236 case XFS_TRANS_DQ_ICOUNT:
237 if (qtrx->qt_ino_res && delta > 0) {
903b1fc2 238 qtrx->qt_ino_res_used += delta;
1da177e4
LT
239 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
240 }
241 qtrx->qt_icount_delta += delta;
242 break;
243
244 /*
245 * rtblk reservation
246 */
247 case XFS_TRANS_DQ_RES_RTBLKS:
903b1fc2 248 qtrx->qt_rtblk_res += delta;
1da177e4
LT
249 break;
250
251 /*
252 * rtblk count
253 */
254 case XFS_TRANS_DQ_RTBCOUNT:
255 if (qtrx->qt_rtblk_res && delta > 0) {
903b1fc2 256 qtrx->qt_rtblk_res_used += delta;
1da177e4
LT
257 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
258 }
259 qtrx->qt_rtbcount_delta += delta;
260 break;
261
262 case XFS_TRANS_DQ_DELRTBCOUNT:
263 qtrx->qt_delrtb_delta += delta;
264 break;
265
266 default:
267 ASSERT(0);
268 }
269 tp->t_flags |= XFS_TRANS_DQ_DIRTY;
270}
271
272
273/*
b0a9dab7
DC
274 * Given an array of dqtrx structures, lock all the dquots associated and join
275 * them to the transaction, provided they have been modified. We know that the
10f73d27
CH
276 * highest number of dquots of one type - usr, grp and prj - involved in a
277 * transaction is 3 so we don't need to make this very generic.
1da177e4
LT
278 */
279STATIC void
280xfs_trans_dqlockedjoin(
078f4a7d
DW
281 struct xfs_trans *tp,
282 struct xfs_dqtrx *q)
1da177e4
LT
283{
284 ASSERT(q[0].qt_dquot != NULL);
285 if (q[1].qt_dquot == NULL) {
286 xfs_dqlock(q[0].qt_dquot);
287 xfs_trans_dqjoin(tp, q[0].qt_dquot);
288 } else {
289 ASSERT(XFS_QM_TRANS_MAXDQS == 2);
290 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
291 xfs_trans_dqjoin(tp, q[0].qt_dquot);
292 xfs_trans_dqjoin(tp, q[1].qt_dquot);
293 }
294}
295
d92c8815
DW
296/* Apply dqtrx changes to the quota reservation counters. */
297static inline void
298xfs_apply_quota_reservation_deltas(
299 struct xfs_dquot_res *res,
300 uint64_t reserved,
301 int64_t res_used,
302 int64_t count_delta)
303{
304 if (reserved != 0) {
305 /*
306 * Subtle math here: If reserved > res_used (the normal case),
307 * we're simply subtracting the unused transaction quota
308 * reservation from the dquot reservation.
309 *
310 * If, however, res_used > reserved, then we have allocated
311 * more quota blocks than were reserved for the transaction.
312 * We must add that excess to the dquot reservation since it
313 * tracks (usage + resv) and by definition we didn't reserve
314 * that excess.
315 */
316 res->reserved -= abs(reserved - res_used);
317 } else if (count_delta != 0) {
318 /*
319 * These blks were never reserved, either inside a transaction
320 * or outside one (in a delayed allocation). Also, this isn't
321 * always a negative number since we sometimes deliberately
322 * skip quota reservations.
323 */
324 res->reserved += count_delta;
325 }
326}
1da177e4
LT
327
328/*
329 * Called by xfs_trans_commit() and similar in spirit to
330 * xfs_trans_apply_sb_deltas().
331 * Go thru all the dquots belonging to this transaction and modify the
332 * INCORE dquot to reflect the actual usages.
333 * Unreserve just the reservations done by this transaction.
334 * dquot is still left locked at exit.
335 */
7d095257 336void
1da177e4 337xfs_trans_apply_dquot_deltas(
4b6eae2e 338 struct xfs_trans *tp)
1da177e4
LT
339{
340 int i, j;
4b6eae2e
BF
341 struct xfs_dquot *dqp;
342 struct xfs_dqtrx *qtrx, *qa;
903b1fc2
DW
343 int64_t totalbdelta;
344 int64_t totalrtbdelta;
1da177e4 345
7d095257 346 if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
1da177e4
LT
347 return;
348
349 ASSERT(tp->t_dqinfo);
0e6436d9
CS
350 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
351 qa = tp->t_dqinfo->dqs[j];
352 if (qa[0].qt_dquot == NULL)
1da177e4 353 continue;
1da177e4
LT
354
355 /*
356 * Lock all of the dquots and join them to the transaction.
357 */
358 xfs_trans_dqlockedjoin(tp, qa);
359
360 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
d92c8815
DW
361 uint64_t blk_res_used;
362
1da177e4
LT
363 qtrx = &qa[i];
364 /*
365 * The array of dquots is filled
366 * sequentially, not sparsely.
367 */
368 if ((dqp = qtrx->qt_dquot) == NULL)
369 break;
370
371 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1da177e4
LT
372
373 /*
374 * adjust the actual number of blocks used
375 */
1da177e4
LT
376
377 /*
378 * The issue here is - sometimes we don't make a blkquota
379 * reservation intentionally to be fair to users
380 * (when the amount is small). On the other hand,
381 * delayed allocs do make reservations, but that's
382 * outside of a transaction, so we have no
383 * idea how much was really reserved.
384 * So, here we've accumulated delayed allocation blks and
385 * non-delay blks. The assumption is that the
386 * delayed ones are always reserved (outside of a
387 * transaction), and the others may or may not have
388 * quota reservations.
389 */
390 totalbdelta = qtrx->qt_bcount_delta +
391 qtrx->qt_delbcnt_delta;
392 totalrtbdelta = qtrx->qt_rtbcount_delta +
393 qtrx->qt_delrtb_delta;
ea15ab3c 394#ifdef DEBUG
1da177e4 395 if (totalbdelta < 0)
be37d40c 396 ASSERT(dqp->q_blk.count >= -totalbdelta);
1da177e4
LT
397
398 if (totalrtbdelta < 0)
be37d40c 399 ASSERT(dqp->q_rtb.count >= -totalrtbdelta);
1da177e4
LT
400
401 if (qtrx->qt_icount_delta < 0)
be37d40c 402 ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta);
1da177e4
LT
403#endif
404 if (totalbdelta)
be37d40c 405 dqp->q_blk.count += totalbdelta;
1da177e4
LT
406
407 if (qtrx->qt_icount_delta)
be37d40c 408 dqp->q_ino.count += qtrx->qt_icount_delta;
1da177e4
LT
409
410 if (totalrtbdelta)
be37d40c 411 dqp->q_rtb.count += totalrtbdelta;
1da177e4
LT
412
413 /*
414 * Get any default limits in use.
415 * Start/reset the timer(s) if needed.
416 */
c51df733 417 if (dqp->q_id) {
c8c753e1
DW
418 xfs_qm_adjust_dqlimits(dqp);
419 xfs_qm_adjust_dqtimers(dqp);
1da177e4
LT
420 }
421
985a78fd 422 dqp->q_flags |= XFS_DQFLAG_DIRTY;
1da177e4
LT
423 /*
424 * add this to the list of items to get logged
425 */
426 xfs_trans_log_dquot(tp, dqp);
427 /*
428 * Take off what's left of the original reservation.
429 * In case of delayed allocations, there's no
430 * reservation that a transaction structure knows of.
431 */
d92c8815
DW
432 blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta);
433 xfs_apply_quota_reservation_deltas(&dqp->q_blk,
434 qtrx->qt_blk_res, blk_res_used,
435 qtrx->qt_bcount_delta);
436
1da177e4
LT
437 /*
438 * Adjust the RT reservation.
439 */
d92c8815
DW
440 xfs_apply_quota_reservation_deltas(&dqp->q_rtb,
441 qtrx->qt_rtblk_res,
442 qtrx->qt_rtblk_res_used,
443 qtrx->qt_rtbcount_delta);
1da177e4
LT
444
445 /*
446 * Adjust the inode reservation.
447 */
d92c8815
DW
448 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
449 xfs_apply_quota_reservation_deltas(&dqp->q_ino,
450 qtrx->qt_ino_res,
451 qtrx->qt_ino_res_used,
452 qtrx->qt_icount_delta);
1da177e4 453
be37d40c
DW
454 ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
455 ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
456 ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
1da177e4 457 }
1da177e4
LT
458 }
459}
460
461/*
462 * Release the reservations, and adjust the dquots accordingly.
463 * This is called only when the transaction is being aborted. If by
464 * any chance we have done dquot modifications incore (ie. deltas) already,
465 * we simply throw those away, since that's the expected behavior
466 * when a transaction is curtailed without a commit.
467 */
7d095257 468void
1da177e4 469xfs_trans_unreserve_and_mod_dquots(
aefe69a4 470 struct xfs_trans *tp)
1da177e4
LT
471{
472 int i, j;
aefe69a4 473 struct xfs_dquot *dqp;
078f4a7d 474 struct xfs_dqtrx *qtrx, *qa;
aefe69a4 475 bool locked;
1da177e4
LT
476
477 if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
478 return;
479
0e6436d9
CS
480 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
481 qa = tp->t_dqinfo->dqs[j];
1da177e4 482
1da177e4
LT
483 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
484 qtrx = &qa[i];
485 /*
486 * We assume that the array of dquots is filled
487 * sequentially, not sparsely.
488 */
489 if ((dqp = qtrx->qt_dquot) == NULL)
490 break;
491 /*
492 * Unreserve the original reservation. We don't care
493 * about the number of blocks used field, or deltas.
494 * Also we don't bother to zero the fields.
495 */
667a9291 496 locked = false;
1da177e4
LT
497 if (qtrx->qt_blk_res) {
498 xfs_dqlock(dqp);
667a9291 499 locked = true;
784e80f5 500 dqp->q_blk.reserved -=
1da177e4
LT
501 (xfs_qcnt_t)qtrx->qt_blk_res;
502 }
503 if (qtrx->qt_ino_res) {
504 if (!locked) {
505 xfs_dqlock(dqp);
667a9291 506 locked = true;
1da177e4 507 }
784e80f5 508 dqp->q_ino.reserved -=
1da177e4
LT
509 (xfs_qcnt_t)qtrx->qt_ino_res;
510 }
511
512 if (qtrx->qt_rtblk_res) {
513 if (!locked) {
514 xfs_dqlock(dqp);
667a9291 515 locked = true;
1da177e4 516 }
784e80f5 517 dqp->q_rtb.reserved -=
1da177e4
LT
518 (xfs_qcnt_t)qtrx->qt_rtblk_res;
519 }
520 if (locked)
521 xfs_dqunlock(dqp);
522
523 }
1da177e4
LT
524 }
525}
526
a210c1aa
CH
527STATIC void
528xfs_quota_warn(
529 struct xfs_mount *mp,
530 struct xfs_dquot *dqp,
531 int type)
532{
ffc671f1
MY
533 enum quota_type qtype;
534
a210c1aa 535 if (dqp->dq_flags & XFS_DQ_PROJ)
ffc671f1
MY
536 qtype = PRJQUOTA;
537 else if (dqp->dq_flags & XFS_DQ_USER)
538 qtype = USRQUOTA;
539 else
540 qtype = GRPQUOTA;
541
c51df733 542 quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id),
431f1974 543 mp->m_super->s_dev, type);
a210c1aa
CH
544}
545
292b47b4
DW
546/*
547 * Decide if we can make an additional reservation against a quota resource.
548 * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal.
549 *
550 * Note that we assume that the numeric difference between the inode and block
551 * warning codes will always be 3 since it's userspace ABI now, and will never
552 * decrease the quota reservation, so the *BELOW messages are irrelevant.
553 */
554static inline int
555xfs_dqresv_check(
556 struct xfs_dquot_res *res,
557 struct xfs_quota_limits *qlim,
558 int64_t delta,
559 bool *fatal)
560{
561 xfs_qcnt_t hardlimit = res->hardlimit;
562 xfs_qcnt_t softlimit = res->softlimit;
563 xfs_qcnt_t total_count = res->reserved + delta;
564
565 BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3);
566 BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3);
567 BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3);
568
569 *fatal = false;
570 if (delta <= 0)
571 return QUOTA_NL_NOWARN;
572
573 if (!hardlimit)
574 hardlimit = qlim->hard;
575 if (!softlimit)
576 softlimit = qlim->soft;
577
578 if (hardlimit && total_count > hardlimit) {
579 *fatal = true;
580 return QUOTA_NL_IHARDWARN;
581 }
582
583 if (softlimit && total_count > softlimit) {
584 time64_t now = ktime_get_real_seconds();
585
586 if ((res->timer != 0 && now > res->timer) ||
587 (res->warnings != 0 && res->warnings >= qlim->warn)) {
588 *fatal = true;
589 return QUOTA_NL_ISOFTLONGWARN;
590 }
591
592 return QUOTA_NL_ISOFTWARN;
593 }
594
595 return QUOTA_NL_NOWARN;
596}
597
1da177e4
LT
598/*
599 * This reserves disk blocks and inodes against a dquot.
600 * Flags indicate if the dquot is to be locked here and also
601 * if the blk reservation is for RT or regular blocks.
602 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
1da177e4
LT
603 */
604STATIC int
605xfs_trans_dqresv(
aefe69a4
PR
606 struct xfs_trans *tp,
607 struct xfs_mount *mp,
608 struct xfs_dquot *dqp,
609 int64_t nblks,
610 long ninos,
611 uint flags)
1da177e4 612{
c072fbef 613 struct xfs_quotainfo *q = mp->m_quotainfo;
be607946 614 struct xfs_def_quota *defq;
292b47b4
DW
615 struct xfs_dquot_res *blkres;
616 struct xfs_quota_limits *qlim;
8e9b6e7f
CH
617
618 xfs_dqlock(dqp);
619
ce6e7e79 620 defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
be607946 621
1da177e4 622 if (flags & XFS_TRANS_DQ_RES_BLKS) {
292b47b4
DW
623 blkres = &dqp->q_blk;
624 qlim = &defq->blk;
1da177e4 625 } else {
292b47b4
DW
626 blkres = &dqp->q_rtb;
627 qlim = &defq->rtb;
1da177e4 628 }
1da177e4 629
c51df733 630 if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id &&
e6d29426 631 ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
83e782e1
CS
632 (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) ||
633 (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) {
292b47b4
DW
634 int quota_nl;
635 bool fatal;
636
637 /*
638 * dquot is locked already. See if we'd go over the hardlimit
639 * or exceed the timelimit if we'd reserve resources.
640 */
641 quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal);
642 if (quota_nl != QUOTA_NL_NOWARN) {
1da177e4 643 /*
292b47b4
DW
644 * Quota block warning codes are 3 more than the inode
645 * codes, which we check above.
1da177e4 646 */
292b47b4
DW
647 xfs_quota_warn(mp, dqp, quota_nl + 3);
648 if (fatal)
1da177e4 649 goto error_return;
1da177e4 650 }
292b47b4
DW
651
652 quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos,
653 &fatal);
654 if (quota_nl != QUOTA_NL_NOWARN) {
655 xfs_quota_warn(mp, dqp, quota_nl);
656 if (fatal)
1da177e4 657 goto error_return;
1da177e4
LT
658 }
659 }
660
661 /*
662 * Change the reservation, but not the actual usage.
be37d40c 663 * Note that q_blk.reserved = q_blk.count + resv
1da177e4 664 */
292b47b4
DW
665 blkres->reserved += (xfs_qcnt_t)nblks;
666 dqp->q_ino.reserved += (xfs_qcnt_t)ninos;
1da177e4
LT
667
668 /*
669 * note the reservation amt in the trans struct too,
670 * so that the transaction knows how much was reserved by
671 * it against this particular dquot.
672 * We don't do this when we are reserving for a delayed allocation,
673 * because we don't have the luxury of a transaction envelope then.
674 */
675 if (tp) {
676 ASSERT(tp->t_dqinfo);
677 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
678 if (nblks != 0)
679 xfs_trans_mod_dquot(tp, dqp,
680 flags & XFS_QMOPT_RESBLK_MASK,
681 nblks);
682 if (ninos != 0)
683 xfs_trans_mod_dquot(tp, dqp,
684 XFS_TRANS_DQ_RES_INOS,
685 ninos);
686 }
be37d40c
DW
687 ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
688 ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
689 ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
1da177e4 690
4d1f88d7
CH
691 xfs_dqunlock(dqp);
692 return 0;
693
1da177e4 694error_return:
8e9b6e7f 695 xfs_dqunlock(dqp);
dcf1ccc9 696 if (XFS_QM_ISPDQ(dqp))
2451337d
DC
697 return -ENOSPC;
698 return -EDQUOT;
1da177e4
LT
699}
700
701
702/*
9a2a7de2 703 * Given dquot(s), make disk block and/or inode reservations against them.
92f8ff73
CS
704 * The fact that this does the reservation against user, group and
705 * project quotas is important, because this follows a all-or-nothing
1da177e4
LT
706 * approach.
707 *
8e9b6e7f 708 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
9a2a7de2 709 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
1da177e4
LT
710 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
711 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
712 * dquots are unlocked on return, if they were not locked by caller.
713 */
714int
715xfs_trans_reserve_quota_bydquots(
113a5683
CS
716 struct xfs_trans *tp,
717 struct xfs_mount *mp,
718 struct xfs_dquot *udqp,
719 struct xfs_dquot *gdqp,
92f8ff73 720 struct xfs_dquot *pdqp,
903b1fc2 721 int64_t nblks,
113a5683
CS
722 long ninos,
723 uint flags)
1da177e4 724{
113a5683 725 int error;
1da177e4 726
7d095257 727 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
9a2a7de2 728 return 0;
1da177e4
LT
729
730 if (tp && tp->t_dqinfo == NULL)
731 xfs_trans_alloc_dqinfo(tp);
732
733 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
1da177e4
LT
734
735 if (udqp) {
dcf1ccc9 736 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags);
9a2a7de2
NS
737 if (error)
738 return error;
1da177e4
LT
739 }
740
741 if (gdqp) {
dcf1ccc9 742 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
113a5683
CS
743 if (error)
744 goto unwind_usr;
1da177e4
LT
745 }
746
92f8ff73
CS
747 if (pdqp) {
748 error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
749 if (error)
750 goto unwind_grp;
751 }
752
1da177e4 753 /*
c41564b5 754 * Didn't change anything critical, so, no need to log
1da177e4 755 */
9a2a7de2 756 return 0;
113a5683 757
92f8ff73
CS
758unwind_grp:
759 flags |= XFS_QMOPT_FORCE_RES;
760 if (gdqp)
761 xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
113a5683
CS
762unwind_usr:
763 flags |= XFS_QMOPT_FORCE_RES;
764 if (udqp)
765 xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
766 return error;
1da177e4
LT
767}
768
769
770/*
771 * Lock the dquot and change the reservation if we can.
772 * This doesn't change the actual usage, just the reservation.
773 * The inode sent in is locked.
1da177e4 774 */
7d095257 775int
1da177e4 776xfs_trans_reserve_quota_nblks(
7d095257
CH
777 struct xfs_trans *tp,
778 struct xfs_inode *ip,
903b1fc2 779 int64_t nblks,
7d095257
CH
780 long ninos,
781 uint flags)
1da177e4 782{
7d095257 783 struct xfs_mount *mp = ip->i_mount;
1da177e4 784
7d095257 785 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
9a2a7de2 786 return 0;
1da177e4 787
9cad19d2 788 ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
1da177e4 789
579aa9ca 790 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
dcf1ccc9
ES
791 ASSERT((flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_RTBLKS ||
792 (flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_BLKS);
1da177e4
LT
793
794 /*
795 * Reserve nblks against these dquots, with trans as the mediator.
796 */
7d095257
CH
797 return xfs_trans_reserve_quota_bydquots(tp, mp,
798 ip->i_udquot, ip->i_gdquot,
92f8ff73 799 ip->i_pdquot,
7d095257 800 nblks, ninos, flags);
1da177e4
LT
801}
802
803/*
804 * This routine is called to allocate a quotaoff log item.
805 */
d0bdfb10 806struct xfs_qoff_logitem *
1da177e4 807xfs_trans_get_qoff_item(
d0bdfb10
PR
808 struct xfs_trans *tp,
809 struct xfs_qoff_logitem *startqoff,
1da177e4
LT
810 uint flags)
811{
d0bdfb10 812 struct xfs_qoff_logitem *q;
1da177e4
LT
813
814 ASSERT(tp != NULL);
815
816 q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
817 ASSERT(q != NULL);
818
819 /*
820 * Get a log_item_desc to point at the new item.
821 */
e98c414f
CH
822 xfs_trans_add_item(tp, &q->qql_item);
823 return q;
1da177e4
LT
824}
825
826
827/*
828 * This is called to mark the quotaoff logitem as needing
829 * to be logged when the transaction is committed. The logitem must
830 * already be associated with the given transaction.
831 */
832void
833xfs_trans_log_quotaoff_item(
d0bdfb10
PR
834 struct xfs_trans *tp,
835 struct xfs_qoff_logitem *qlp)
1da177e4 836{
1da177e4 837 tp->t_flags |= XFS_TRANS_DIRTY;
e6631f85 838 set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags);
1da177e4
LT
839}
840
841STATIC void
842xfs_trans_alloc_dqinfo(
843 xfs_trans_t *tp)
844{
707e0dda 845 tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, 0);
1da177e4
LT
846}
847
7d095257 848void
1da177e4
LT
849xfs_trans_free_dqinfo(
850 xfs_trans_t *tp)
851{
852 if (!tp->t_dqinfo)
853 return;
377bcd5f 854 kmem_cache_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
7d095257 855 tp->t_dqinfo = NULL;
1da177e4 856}