]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/gfs2/quota.c
[GFS2] Fix sign problem in quota/statfs and cleanup _host structures
[mirror_ubuntu-zesty-kernel.git] / fs / gfs2 / quota.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3a8a9a10 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10/*
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
20 *
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
34 *
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
37 */
38
39#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/spinlock.h>
42#include <linux/completion.h>
43#include <linux/buffer_head.h>
b3b94faa 44#include <linux/sort.h>
18ec7d5c 45#include <linux/fs.h>
2e565bb6 46#include <linux/bio.h>
5c676f6d 47#include <linux/gfs2_ondisk.h>
7d308590 48#include <linux/lm_interface.h>
b3b94faa
DT
49
50#include "gfs2.h"
5c676f6d 51#include "incore.h"
b3b94faa
DT
52#include "bmap.h"
53#include "glock.h"
54#include "glops.h"
b3b94faa
DT
55#include "log.h"
56#include "meta_io.h"
57#include "quota.h"
58#include "rgrp.h"
59#include "super.h"
60#include "trans.h"
18ec7d5c 61#include "inode.h"
f42faf4f 62#include "ops_file.h"
18ec7d5c 63#include "ops_address.h"
5c676f6d 64#include "util.h"
b3b94faa
DT
65
66#define QUOTA_USER 1
67#define QUOTA_GROUP 0
68
bb8d8a6f
SW
69struct gfs2_quota_host {
70 u64 qu_limit;
71 u64 qu_warn;
72 s64 qu_value;
73};
74
75struct gfs2_quota_change_host {
76 u64 qc_change;
77 u32 qc_flags; /* GFS2_QCF_... */
78 u32 qc_id;
79};
80
cd915493 81static u64 qd2offset(struct gfs2_quota_data *qd)
b3b94faa 82{
cd915493 83 u64 offset;
b3b94faa 84
cd915493 85 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
b3b94faa
DT
86 offset *= sizeof(struct gfs2_quota);
87
88 return offset;
89}
90
cd915493 91static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
b3b94faa
DT
92 struct gfs2_quota_data **qdp)
93{
94 struct gfs2_quota_data *qd;
95 int error;
96
97 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
98 if (!qd)
99 return -ENOMEM;
100
101 qd->qd_count = 1;
102 qd->qd_id = id;
103 if (user)
104 set_bit(QDF_USER, &qd->qd_flags);
105 qd->qd_slot = -1;
106
cd915493 107 error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
b3b94faa
DT
108 &gfs2_quota_glops, CREATE, &qd->qd_gl);
109 if (error)
110 goto fail;
111
112 error = gfs2_lvb_hold(qd->qd_gl);
113 gfs2_glock_put(qd->qd_gl);
114 if (error)
115 goto fail;
116
117 *qdp = qd;
118
119 return 0;
120
a91ea69f 121fail:
b3b94faa
DT
122 kfree(qd);
123 return error;
124}
125
cd915493 126static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
b3b94faa
DT
127 struct gfs2_quota_data **qdp)
128{
129 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
130 int error, found;
131
132 *qdp = NULL;
133
134 for (;;) {
135 found = 0;
136 spin_lock(&sdp->sd_quota_spin);
137 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
138 if (qd->qd_id == id &&
139 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
140 qd->qd_count++;
141 found = 1;
142 break;
143 }
144 }
145
146 if (!found)
147 qd = NULL;
148
149 if (!qd && new_qd) {
150 qd = new_qd;
151 list_add(&qd->qd_list, &sdp->sd_quota_list);
152 atomic_inc(&sdp->sd_quota_count);
153 new_qd = NULL;
154 }
155
156 spin_unlock(&sdp->sd_quota_spin);
157
158 if (qd || !create) {
159 if (new_qd) {
160 gfs2_lvb_unhold(new_qd->qd_gl);
161 kfree(new_qd);
162 }
163 *qdp = qd;
164 return 0;
165 }
166
167 error = qd_alloc(sdp, user, id, &new_qd);
168 if (error)
169 return error;
170 }
171}
172
173static void qd_hold(struct gfs2_quota_data *qd)
174{
175 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
176
177 spin_lock(&sdp->sd_quota_spin);
178 gfs2_assert(sdp, qd->qd_count);
179 qd->qd_count++;
180 spin_unlock(&sdp->sd_quota_spin);
181}
182
183static void qd_put(struct gfs2_quota_data *qd)
184{
185 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
186 spin_lock(&sdp->sd_quota_spin);
187 gfs2_assert(sdp, qd->qd_count);
188 if (!--qd->qd_count)
189 qd->qd_last_touched = jiffies;
190 spin_unlock(&sdp->sd_quota_spin);
191}
192
193static int slot_get(struct gfs2_quota_data *qd)
194{
195 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
196 unsigned int c, o = 0, b;
197 unsigned char byte = 0;
198
199 spin_lock(&sdp->sd_quota_spin);
200
201 if (qd->qd_slot_count++) {
202 spin_unlock(&sdp->sd_quota_spin);
203 return 0;
204 }
205
206 for (c = 0; c < sdp->sd_quota_chunks; c++)
207 for (o = 0; o < PAGE_SIZE; o++) {
208 byte = sdp->sd_quota_bitmap[c][o];
209 if (byte != 0xFF)
210 goto found;
211 }
212
213 goto fail;
214
a91ea69f 215found:
b3b94faa
DT
216 for (b = 0; b < 8; b++)
217 if (!(byte & (1 << b)))
218 break;
219 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
220
221 if (qd->qd_slot >= sdp->sd_quota_slots)
222 goto fail;
223
224 sdp->sd_quota_bitmap[c][o] |= 1 << b;
225
226 spin_unlock(&sdp->sd_quota_spin);
227
228 return 0;
229
a91ea69f 230fail:
b3b94faa
DT
231 qd->qd_slot_count--;
232 spin_unlock(&sdp->sd_quota_spin);
233 return -ENOSPC;
234}
235
236static void slot_hold(struct gfs2_quota_data *qd)
237{
238 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
239
240 spin_lock(&sdp->sd_quota_spin);
241 gfs2_assert(sdp, qd->qd_slot_count);
242 qd->qd_slot_count++;
243 spin_unlock(&sdp->sd_quota_spin);
244}
245
246static void slot_put(struct gfs2_quota_data *qd)
247{
248 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
249
250 spin_lock(&sdp->sd_quota_spin);
251 gfs2_assert(sdp, qd->qd_slot_count);
252 if (!--qd->qd_slot_count) {
253 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
254 qd->qd_slot = -1;
255 }
256 spin_unlock(&sdp->sd_quota_spin);
257}
258
259static int bh_get(struct gfs2_quota_data *qd)
260{
261 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
feaa7bba 262 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa 263 unsigned int block, offset;
b3b94faa
DT
264 struct buffer_head *bh;
265 int error;
23591256 266 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
b3b94faa 267
f55ab26a 268 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
269
270 if (qd->qd_bh_count++) {
f55ab26a 271 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
272 return 0;
273 }
274
275 block = qd->qd_slot / sdp->sd_qc_per_block;
276 offset = qd->qd_slot % sdp->sd_qc_per_block;;
277
23591256
SW
278 bh_map.b_size = 1 << ip->i_inode.i_blkbits;
279 error = gfs2_block_map(&ip->i_inode, block, 0, &bh_map);
b3b94faa
DT
280 if (error)
281 goto fail;
7276b3b0 282 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
b3b94faa
DT
283 if (error)
284 goto fail;
285 error = -EIO;
286 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
287 goto fail_brelse;
288
289 qd->qd_bh = bh;
290 qd->qd_bh_qc = (struct gfs2_quota_change *)
291 (bh->b_data + sizeof(struct gfs2_meta_header) +
292 offset * sizeof(struct gfs2_quota_change));
293
2e95b665 294 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
295
296 return 0;
297
a91ea69f 298fail_brelse:
b3b94faa 299 brelse(bh);
a91ea69f 300fail:
b3b94faa 301 qd->qd_bh_count--;
f55ab26a 302 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
303 return error;
304}
305
306static void bh_put(struct gfs2_quota_data *qd)
307{
308 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
309
f55ab26a 310 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
311 gfs2_assert(sdp, qd->qd_bh_count);
312 if (!--qd->qd_bh_count) {
313 brelse(qd->qd_bh);
314 qd->qd_bh = NULL;
315 qd->qd_bh_qc = NULL;
316 }
f55ab26a 317 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
318}
319
320static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
321{
322 struct gfs2_quota_data *qd = NULL;
323 int error;
324 int found = 0;
325
326 *qdp = NULL;
327
328 if (sdp->sd_vfs->s_flags & MS_RDONLY)
329 return 0;
330
331 spin_lock(&sdp->sd_quota_spin);
332
333 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
334 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
335 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
336 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
337 continue;
338
339 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
340
341 set_bit(QDF_LOCKED, &qd->qd_flags);
342 gfs2_assert_warn(sdp, qd->qd_count);
343 qd->qd_count++;
344 qd->qd_change_sync = qd->qd_change;
345 gfs2_assert_warn(sdp, qd->qd_slot_count);
346 qd->qd_slot_count++;
347 found = 1;
348
349 break;
350 }
351
352 if (!found)
353 qd = NULL;
354
355 spin_unlock(&sdp->sd_quota_spin);
356
357 if (qd) {
358 gfs2_assert_warn(sdp, qd->qd_change_sync);
359 error = bh_get(qd);
360 if (error) {
361 clear_bit(QDF_LOCKED, &qd->qd_flags);
362 slot_put(qd);
363 qd_put(qd);
364 return error;
365 }
366 }
367
368 *qdp = qd;
369
370 return 0;
371}
372
373static int qd_trylock(struct gfs2_quota_data *qd)
374{
375 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
376
377 if (sdp->sd_vfs->s_flags & MS_RDONLY)
378 return 0;
379
380 spin_lock(&sdp->sd_quota_spin);
381
382 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
383 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
384 spin_unlock(&sdp->sd_quota_spin);
385 return 0;
386 }
387
388 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
389
390 set_bit(QDF_LOCKED, &qd->qd_flags);
391 gfs2_assert_warn(sdp, qd->qd_count);
392 qd->qd_count++;
393 qd->qd_change_sync = qd->qd_change;
394 gfs2_assert_warn(sdp, qd->qd_slot_count);
395 qd->qd_slot_count++;
396
397 spin_unlock(&sdp->sd_quota_spin);
398
399 gfs2_assert_warn(sdp, qd->qd_change_sync);
400 if (bh_get(qd)) {
401 clear_bit(QDF_LOCKED, &qd->qd_flags);
402 slot_put(qd);
403 qd_put(qd);
404 return 0;
405 }
406
407 return 1;
408}
409
410static void qd_unlock(struct gfs2_quota_data *qd)
411{
568f4c96
SW
412 gfs2_assert_warn(qd->qd_gl->gl_sbd,
413 test_bit(QDF_LOCKED, &qd->qd_flags));
b3b94faa
DT
414 clear_bit(QDF_LOCKED, &qd->qd_flags);
415 bh_put(qd);
416 slot_put(qd);
417 qd_put(qd);
418}
419
cd915493 420static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
b3b94faa
DT
421 struct gfs2_quota_data **qdp)
422{
423 int error;
424
425 error = qd_get(sdp, user, id, create, qdp);
426 if (error)
427 return error;
428
429 error = slot_get(*qdp);
430 if (error)
431 goto fail;
432
433 error = bh_get(*qdp);
434 if (error)
435 goto fail_slot;
436
437 return 0;
438
a91ea69f 439fail_slot:
b3b94faa 440 slot_put(*qdp);
a91ea69f 441fail:
b3b94faa
DT
442 qd_put(*qdp);
443 return error;
444}
445
446static void qdsb_put(struct gfs2_quota_data *qd)
447{
448 bh_put(qd);
449 slot_put(qd);
450 qd_put(qd);
451}
452
cd915493 453int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
b3b94faa 454{
feaa7bba 455 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
456 struct gfs2_alloc *al = &ip->i_alloc;
457 struct gfs2_quota_data **qd = al->al_qd;
458 int error;
459
460 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
461 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
462 return -EIO;
463
464 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
465 return 0;
466
2933f925 467 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
b3b94faa
DT
468 if (error)
469 goto out;
470 al->al_qd_num++;
471 qd++;
472
2933f925 473 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
b3b94faa
DT
474 if (error)
475 goto out;
476 al->al_qd_num++;
477 qd++;
478
2933f925 479 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
b3b94faa
DT
480 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
481 if (error)
482 goto out;
483 al->al_qd_num++;
484 qd++;
485 }
486
2933f925 487 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
b3b94faa
DT
488 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
489 if (error)
490 goto out;
491 al->al_qd_num++;
492 qd++;
493 }
494
a91ea69f 495out:
b3b94faa
DT
496 if (error)
497 gfs2_quota_unhold(ip);
b3b94faa
DT
498 return error;
499}
500
501void gfs2_quota_unhold(struct gfs2_inode *ip)
502{
feaa7bba 503 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
504 struct gfs2_alloc *al = &ip->i_alloc;
505 unsigned int x;
506
507 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
508
509 for (x = 0; x < al->al_qd_num; x++) {
510 qdsb_put(al->al_qd[x]);
511 al->al_qd[x] = NULL;
512 }
513 al->al_qd_num = 0;
514}
515
516static int sort_qd(const void *a, const void *b)
517{
48fac179
SW
518 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
519 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
b3b94faa
DT
520
521 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
522 !test_bit(QDF_USER, &qd_b->qd_flags)) {
523 if (test_bit(QDF_USER, &qd_a->qd_flags))
48fac179 524 return -1;
b3b94faa 525 else
48fac179 526 return 1;
b3b94faa 527 }
48fac179
SW
528 if (qd_a->qd_id < qd_b->qd_id)
529 return -1;
530 if (qd_a->qd_id > qd_b->qd_id)
531 return 1;
b3b94faa 532
48fac179 533 return 0;
b3b94faa
DT
534}
535
cd915493 536static void do_qc(struct gfs2_quota_data *qd, s64 change)
b3b94faa
DT
537{
538 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
feaa7bba 539 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa 540 struct gfs2_quota_change *qc = qd->qd_bh_qc;
cd915493 541 s64 x;
b3b94faa 542
f55ab26a 543 mutex_lock(&sdp->sd_quota_mutex);
d4e9c4c3 544 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
b3b94faa
DT
545
546 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
547 qc->qc_change = 0;
548 qc->qc_flags = 0;
549 if (test_bit(QDF_USER, &qd->qd_flags))
550 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
551 qc->qc_id = cpu_to_be32(qd->qd_id);
552 }
553
b44b84d7 554 x = be64_to_cpu(qc->qc_change) + change;
b3b94faa
DT
555 qc->qc_change = cpu_to_be64(x);
556
557 spin_lock(&sdp->sd_quota_spin);
558 qd->qd_change = x;
559 spin_unlock(&sdp->sd_quota_spin);
560
561 if (!x) {
562 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
563 clear_bit(QDF_CHANGE, &qd->qd_flags);
564 qc->qc_flags = 0;
565 qc->qc_id = 0;
566 slot_put(qd);
567 qd_put(qd);
568 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
569 qd_hold(qd);
570 slot_hold(qd);
571 }
907b9bce 572
f55ab26a 573 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
574}
575
bb8d8a6f
SW
576static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
577{
578 const struct gfs2_quota *str = buf;
579
580 qu->qu_limit = be64_to_cpu(str->qu_limit);
581 qu->qu_warn = be64_to_cpu(str->qu_warn);
582 qu->qu_value = be64_to_cpu(str->qu_value);
583}
584
585static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
586{
587 struct gfs2_quota *str = buf;
588
589 str->qu_limit = cpu_to_be64(qu->qu_limit);
590 str->qu_warn = cpu_to_be64(qu->qu_warn);
591 str->qu_value = cpu_to_be64(qu->qu_value);
592 memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
593}
594
18ec7d5c
SW
595/**
596 * gfs2_adjust_quota
597 *
598 * This function was mostly borrowed from gfs2_block_truncate_page which was
599 * in turn mostly borrowed from ext3
600 */
601static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
cd915493 602 s64 change, struct gfs2_quota_data *qd)
18ec7d5c 603{
feaa7bba 604 struct inode *inode = &ip->i_inode;
18ec7d5c
SW
605 struct address_space *mapping = inode->i_mapping;
606 unsigned long index = loc >> PAGE_CACHE_SHIFT;
1990e917 607 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
18ec7d5c
SW
608 unsigned blocksize, iblock, pos;
609 struct buffer_head *bh;
610 struct page *page;
611 void *kaddr;
1990e917
AD
612 char *ptr;
613 struct gfs2_quota_host qp;
e9fc2aa0 614 s64 value;
18ec7d5c
SW
615 int err = -EIO;
616
617 page = grab_cache_page(mapping, index);
618 if (!page)
619 return -ENOMEM;
620
621 blocksize = inode->i_sb->s_blocksize;
622 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
623
624 if (!page_has_buffers(page))
625 create_empty_buffers(page, blocksize, 0);
626
627 bh = page_buffers(page);
628 pos = blocksize;
629 while (offset >= pos) {
630 bh = bh->b_this_page;
631 iblock++;
632 pos += blocksize;
633 }
634
635 if (!buffer_mapped(bh)) {
636 gfs2_get_block(inode, iblock, bh, 1);
637 if (!buffer_mapped(bh))
638 goto unlock;
639 }
640
641 if (PageUptodate(page))
642 set_buffer_uptodate(bh);
643
644 if (!buffer_uptodate(bh)) {
2e565bb6 645 ll_rw_block(READ_META, 1, &bh);
18ec7d5c
SW
646 wait_on_buffer(bh);
647 if (!buffer_uptodate(bh))
648 goto unlock;
649 }
650
651 gfs2_trans_add_bh(ip->i_gl, bh, 0);
652
653 kaddr = kmap_atomic(page, KM_USER0);
48fac179 654 ptr = kaddr + offset;
1990e917
AD
655 gfs2_quota_in(&qp, ptr);
656 qp.qu_value += change;
657 value = qp.qu_value;
658 gfs2_quota_out(&qp, ptr);
18ec7d5c
SW
659 flush_dcache_page(page);
660 kunmap_atomic(kaddr, KM_USER0);
661 err = 0;
662 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
18ec7d5c 663 qd->qd_qb.qb_value = cpu_to_be64(value);
2a87ab08
AD
664 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
665 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
18ec7d5c
SW
666unlock:
667 unlock_page(page);
668 page_cache_release(page);
669 return err;
670}
671
b3b94faa
DT
672static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
673{
674 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
feaa7bba 675 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
b3b94faa
DT
676 unsigned int data_blocks, ind_blocks;
677 struct gfs2_holder *ghs, i_gh;
678 unsigned int qx, x;
679 struct gfs2_quota_data *qd;
f42faf4f 680 loff_t offset;
b3b94faa
DT
681 unsigned int nalloc = 0;
682 struct gfs2_alloc *al = NULL;
683 int error;
684
685 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
686 &data_blocks, &ind_blocks);
687
688 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
689 if (!ghs)
690 return -ENOMEM;
691
692 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
693 for (qx = 0; qx < num_qd; qx++) {
694 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
695 LM_ST_EXCLUSIVE,
696 GL_NOCACHE, &ghs[qx]);
697 if (error)
698 goto out;
699 }
700
701 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
702 if (error)
703 goto out;
704
705 for (x = 0; x < num_qd; x++) {
706 int alloc_required;
707
708 offset = qd2offset(qda[x]);
709 error = gfs2_write_alloc_required(ip, offset,
710 sizeof(struct gfs2_quota),
711 &alloc_required);
712 if (error)
713 goto out_gunlock;
714 if (alloc_required)
715 nalloc++;
716 }
717
718 if (nalloc) {
719 al = gfs2_alloc_get(ip);
720
721 al->al_requested = nalloc * (data_blocks + ind_blocks);
722
723 error = gfs2_inplace_reserve(ip);
724 if (error)
725 goto out_alloc;
726
727 error = gfs2_trans_begin(sdp,
bb8d8a6f 728 al->al_rgd->rd_length +
b3b94faa
DT
729 num_qd * data_blocks +
730 nalloc * ind_blocks +
731 RES_DINODE + num_qd +
732 RES_STATFS, 0);
733 if (error)
734 goto out_ipres;
735 } else {
736 error = gfs2_trans_begin(sdp,
737 num_qd * data_blocks +
738 RES_DINODE + num_qd, 0);
739 if (error)
740 goto out_gunlock;
741 }
742
743 for (x = 0; x < num_qd; x++) {
b3b94faa
DT
744 qd = qda[x];
745 offset = qd2offset(qd);
18ec7d5c 746 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
568f4c96 747 (struct gfs2_quota_data *)
2a87ab08 748 qd);
18ec7d5c 749 if (error)
b3b94faa 750 goto out_end_trans;
b3b94faa
DT
751
752 do_qc(qd, -qd->qd_change_sync);
b3b94faa
DT
753 }
754
755 error = 0;
756
a91ea69f 757out_end_trans:
b3b94faa 758 gfs2_trans_end(sdp);
a91ea69f 759out_ipres:
b3b94faa
DT
760 if (nalloc)
761 gfs2_inplace_release(ip);
a91ea69f 762out_alloc:
b3b94faa
DT
763 if (nalloc)
764 gfs2_alloc_put(ip);
a91ea69f 765out_gunlock:
b3b94faa 766 gfs2_glock_dq_uninit(&i_gh);
a91ea69f 767out:
b3b94faa
DT
768 while (qx--)
769 gfs2_glock_dq_uninit(&ghs[qx]);
770 kfree(ghs);
b09e593d 771 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
b3b94faa
DT
772 return error;
773}
774
775static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
776 struct gfs2_holder *q_gh)
777{
778 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
feaa7bba 779 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
b3b94faa 780 struct gfs2_holder i_gh;
b5bc9e8b 781 struct gfs2_quota_host q;
b3b94faa 782 char buf[sizeof(struct gfs2_quota)];
f42faf4f 783 struct file_ra_state ra_state;
b3b94faa 784 int error;
e9fc2aa0 785 struct gfs2_quota_lvb *qlvb;
b3b94faa 786
f42faf4f 787 file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);
a91ea69f 788restart:
b3b94faa
DT
789 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
790 if (error)
791 return error;
792
e9fc2aa0 793 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
b3b94faa 794
e9fc2aa0 795 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
f42faf4f 796 loff_t pos;
b3b94faa
DT
797 gfs2_glock_dq_uninit(q_gh);
798 error = gfs2_glock_nq_init(qd->qd_gl,
799 LM_ST_EXCLUSIVE, GL_NOCACHE,
800 q_gh);
801 if (error)
802 return error;
803
e9fc2aa0 804 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
b3b94faa
DT
805 if (error)
806 goto fail;
807
808 memset(buf, 0, sizeof(struct gfs2_quota));
f42faf4f 809 pos = qd2offset(qd);
0d42e542
SW
810 error = gfs2_internal_read(ip, &ra_state, buf,
811 &pos, sizeof(struct gfs2_quota));
b3b94faa
DT
812 if (error < 0)
813 goto fail_gunlock;
814
815 gfs2_glock_dq_uninit(&i_gh);
816
907b9bce 817
b3b94faa 818 gfs2_quota_in(&q, buf);
e9fc2aa0
SW
819 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
820 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
821 qlvb->__pad = 0;
822 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
823 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
824 qlvb->qb_value = cpu_to_be64(q.qu_value);
825 qd->qd_qb = *qlvb;
b3b94faa
DT
826
827 if (gfs2_glock_is_blocking(qd->qd_gl)) {
828 gfs2_glock_dq_uninit(q_gh);
829 force_refresh = 0;
830 goto restart;
831 }
832 }
833
834 return 0;
835
a91ea69f 836fail_gunlock:
b3b94faa 837 gfs2_glock_dq_uninit(&i_gh);
a91ea69f 838fail:
b3b94faa 839 gfs2_glock_dq_uninit(q_gh);
b3b94faa
DT
840 return error;
841}
842
cd915493 843int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
b3b94faa 844{
feaa7bba 845 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
846 struct gfs2_alloc *al = &ip->i_alloc;
847 unsigned int x;
848 int error = 0;
849
850 gfs2_quota_hold(ip, uid, gid);
851
852 if (capable(CAP_SYS_RESOURCE) ||
853 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
854 return 0;
855
856 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
857 sort_qd, NULL);
858
859 for (x = 0; x < al->al_qd_num; x++) {
860 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
861 if (error)
862 break;
863 }
864
865 if (!error)
866 set_bit(GIF_QD_LOCKED, &ip->i_flags);
867 else {
868 while (x--)
869 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
870 gfs2_quota_unhold(ip);
871 }
872
873 return error;
874}
875
876static int need_sync(struct gfs2_quota_data *qd)
877{
878 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
879 struct gfs2_tune *gt = &sdp->sd_tune;
cd915493 880 s64 value;
b3b94faa
DT
881 unsigned int num, den;
882 int do_sync = 1;
883
884 if (!qd->qd_qb.qb_limit)
885 return 0;
886
887 spin_lock(&sdp->sd_quota_spin);
888 value = qd->qd_change;
889 spin_unlock(&sdp->sd_quota_spin);
890
891 spin_lock(&gt->gt_spin);
892 num = gt->gt_quota_scale_num;
893 den = gt->gt_quota_scale_den;
894 spin_unlock(&gt->gt_spin);
895
896 if (value < 0)
897 do_sync = 0;
e9fc2aa0
SW
898 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
899 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
b3b94faa
DT
900 do_sync = 0;
901 else {
902 value *= gfs2_jindex_size(sdp) * num;
903 do_div(value, den);
e9fc2aa0 904 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
cd915493 905 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
b3b94faa
DT
906 do_sync = 0;
907 }
908
909 return do_sync;
910}
911
912void gfs2_quota_unlock(struct gfs2_inode *ip)
913{
914 struct gfs2_alloc *al = &ip->i_alloc;
915 struct gfs2_quota_data *qda[4];
916 unsigned int count = 0;
917 unsigned int x;
918
919 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
920 goto out;
921
922 for (x = 0; x < al->al_qd_num; x++) {
923 struct gfs2_quota_data *qd;
924 int sync;
925
926 qd = al->al_qd[x];
927 sync = need_sync(qd);
928
929 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
930
931 if (sync && qd_trylock(qd))
932 qda[count++] = qd;
933 }
934
935 if (count) {
936 do_sync(count, qda);
937 for (x = 0; x < count; x++)
938 qd_unlock(qda[x]);
939 }
940
a91ea69f 941out:
b3b94faa
DT
942 gfs2_quota_unhold(ip);
943}
944
945#define MAX_LINE 256
946
947static int print_message(struct gfs2_quota_data *qd, char *type)
948{
949 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
b3b94faa 950
02630a12
SW
951 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
952 sdp->sd_fsname, type,
953 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
954 qd->qd_id);
b3b94faa
DT
955
956 return 0;
957}
958
cd915493 959int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
b3b94faa 960{
feaa7bba 961 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
962 struct gfs2_alloc *al = &ip->i_alloc;
963 struct gfs2_quota_data *qd;
cd915493 964 s64 value;
b3b94faa
DT
965 unsigned int x;
966 int error = 0;
967
968 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
969 return 0;
970
971 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
972 return 0;
973
974 for (x = 0; x < al->al_qd_num; x++) {
975 qd = al->al_qd[x];
976
977 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
978 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
979 continue;
980
e9fc2aa0 981 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
b3b94faa
DT
982 spin_lock(&sdp->sd_quota_spin);
983 value += qd->qd_change;
984 spin_unlock(&sdp->sd_quota_spin);
985
cd915493 986 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
b3b94faa
DT
987 print_message(qd, "exceeded");
988 error = -EDQUOT;
989 break;
e9fc2aa0 990 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
cd915493 991 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
b3b94faa 992 time_after_eq(jiffies, qd->qd_last_warn +
568f4c96
SW
993 gfs2_tune_get(sdp,
994 gt_quota_warn_period) * HZ)) {
b3b94faa
DT
995 error = print_message(qd, "warning");
996 qd->qd_last_warn = jiffies;
997 }
998 }
999
1000 return error;
1001}
1002
cd915493
SW
1003void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1004 u32 uid, u32 gid)
b3b94faa
DT
1005{
1006 struct gfs2_alloc *al = &ip->i_alloc;
1007 struct gfs2_quota_data *qd;
1008 unsigned int x;
1009 unsigned int found = 0;
1010
feaa7bba 1011 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
b3b94faa
DT
1012 return;
1013 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
1014 return;
1015
1016 for (x = 0; x < al->al_qd_num; x++) {
1017 qd = al->al_qd[x];
1018
1019 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1020 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1021 do_qc(qd, change);
1022 found++;
1023 }
1024 }
1025}
1026
1027int gfs2_quota_sync(struct gfs2_sbd *sdp)
1028{
1029 struct gfs2_quota_data **qda;
1030 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1031 unsigned int num_qd;
1032 unsigned int x;
1033 int error = 0;
1034
1035 sdp->sd_quota_sync_gen++;
1036
1037 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1038 if (!qda)
1039 return -ENOMEM;
1040
1041 do {
1042 num_qd = 0;
1043
1044 for (;;) {
1045 error = qd_fish(sdp, qda + num_qd);
1046 if (error || !qda[num_qd])
1047 break;
1048 if (++num_qd == max_qd)
1049 break;
1050 }
1051
1052 if (num_qd) {
1053 if (!error)
1054 error = do_sync(num_qd, qda);
1055 if (!error)
1056 for (x = 0; x < num_qd; x++)
1057 qda[x]->qd_sync_gen =
1058 sdp->sd_quota_sync_gen;
1059
1060 for (x = 0; x < num_qd; x++)
1061 qd_unlock(qda[x]);
1062 }
1063 } while (!error && num_qd == max_qd);
1064
1065 kfree(qda);
1066
1067 return error;
1068}
1069
cd915493 1070int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
b3b94faa
DT
1071{
1072 struct gfs2_quota_data *qd;
1073 struct gfs2_holder q_gh;
1074 int error;
1075
1076 error = qd_get(sdp, user, id, CREATE, &qd);
1077 if (error)
1078 return error;
1079
1080 error = do_glock(qd, FORCE, &q_gh);
1081 if (!error)
1082 gfs2_glock_dq_uninit(&q_gh);
1083
1084 qd_put(qd);
1085
1086 return error;
1087}
1088
bb8d8a6f
SW
1089static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1090{
1091 const struct gfs2_quota_change *str = buf;
1092
1093 qc->qc_change = be64_to_cpu(str->qc_change);
1094 qc->qc_flags = be32_to_cpu(str->qc_flags);
1095 qc->qc_id = be32_to_cpu(str->qc_id);
1096}
1097
b3b94faa
DT
1098int gfs2_quota_init(struct gfs2_sbd *sdp)
1099{
feaa7bba 1100 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa
DT
1101 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1102 unsigned int x, slot = 0;
1103 unsigned int found = 0;
cd915493
SW
1104 u64 dblock;
1105 u32 extlen = 0;
b3b94faa
DT
1106 int error;
1107
7276b3b0 1108 if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) ||
b3b94faa
DT
1109 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1110 gfs2_consist_inode(ip);
907b9bce 1111 return -EIO;
b3b94faa
DT
1112 }
1113 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
5c676f6d 1114 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
b3b94faa
DT
1115
1116 error = -ENOMEM;
1117
1118 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1119 sizeof(unsigned char *), GFP_KERNEL);
1120 if (!sdp->sd_quota_bitmap)
1121 return error;
1122
1123 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1124 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1125 if (!sdp->sd_quota_bitmap[x])
1126 goto fail;
1127 }
1128
1129 for (x = 0; x < blocks; x++) {
1130 struct buffer_head *bh;
1131 unsigned int y;
1132
1133 if (!extlen) {
1134 int new = 0;
feaa7bba 1135 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
b3b94faa
DT
1136 if (error)
1137 goto fail;
1138 }
b3b94faa 1139 error = -EIO;
7276b3b0
SW
1140 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1141 if (!bh)
1142 goto fail;
b3b94faa
DT
1143 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1144 brelse(bh);
1145 goto fail;
1146 }
1147
7276b3b0 1148 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
b3b94faa 1149 y++, slot++) {
b62f963e 1150 struct gfs2_quota_change_host qc;
b3b94faa
DT
1151 struct gfs2_quota_data *qd;
1152
1153 gfs2_quota_change_in(&qc, bh->b_data +
1154 sizeof(struct gfs2_meta_header) +
1155 y * sizeof(struct gfs2_quota_change));
1156 if (!qc.qc_change)
1157 continue;
1158
1159 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1160 qc.qc_id, &qd);
1161 if (error) {
1162 brelse(bh);
1163 goto fail;
1164 }
1165
1166 set_bit(QDF_CHANGE, &qd->qd_flags);
1167 qd->qd_change = qc.qc_change;
1168 qd->qd_slot = slot;
1169 qd->qd_slot_count = 1;
1170 qd->qd_last_touched = jiffies;
1171
1172 spin_lock(&sdp->sd_quota_spin);
1173 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1174 list_add(&qd->qd_list, &sdp->sd_quota_list);
1175 atomic_inc(&sdp->sd_quota_count);
1176 spin_unlock(&sdp->sd_quota_spin);
1177
1178 found++;
1179 }
1180
1181 brelse(bh);
1182 dblock++;
1183 extlen--;
1184 }
1185
1186 if (found)
1187 fs_info(sdp, "found %u quota changes\n", found);
1188
1189 return 0;
1190
a91ea69f 1191fail:
b3b94faa
DT
1192 gfs2_quota_cleanup(sdp);
1193 return error;
1194}
1195
1196void gfs2_quota_scan(struct gfs2_sbd *sdp)
1197{
1198 struct gfs2_quota_data *qd, *safe;
1199 LIST_HEAD(dead);
1200
1201 spin_lock(&sdp->sd_quota_spin);
1202 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1203 if (!qd->qd_count &&
1204 time_after_eq(jiffies, qd->qd_last_touched +
1205 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1206 list_move(&qd->qd_list, &dead);
1207 gfs2_assert_warn(sdp,
1208 atomic_read(&sdp->sd_quota_count) > 0);
1209 atomic_dec(&sdp->sd_quota_count);
1210 }
1211 }
1212 spin_unlock(&sdp->sd_quota_spin);
1213
1214 while (!list_empty(&dead)) {
1215 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1216 list_del(&qd->qd_list);
1217
1218 gfs2_assert_warn(sdp, !qd->qd_change);
1219 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1220 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1221
1222 gfs2_lvb_unhold(qd->qd_gl);
1223 kfree(qd);
1224 }
1225}
1226
1227void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1228{
1229 struct list_head *head = &sdp->sd_quota_list;
1230 struct gfs2_quota_data *qd;
1231 unsigned int x;
1232
1233 spin_lock(&sdp->sd_quota_spin);
1234 while (!list_empty(head)) {
1235 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1236
1237 if (qd->qd_count > 1 ||
1238 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1239 list_move(&qd->qd_list, head);
1240 spin_unlock(&sdp->sd_quota_spin);
1241 schedule();
1242 spin_lock(&sdp->sd_quota_spin);
1243 continue;
1244 }
1245
1246 list_del(&qd->qd_list);
1247 atomic_dec(&sdp->sd_quota_count);
1248 spin_unlock(&sdp->sd_quota_spin);
1249
1250 if (!qd->qd_count) {
1251 gfs2_assert_warn(sdp, !qd->qd_change);
1252 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1253 } else
1254 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1255 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1256
1257 gfs2_lvb_unhold(qd->qd_gl);
1258 kfree(qd);
1259
1260 spin_lock(&sdp->sd_quota_spin);
1261 }
1262 spin_unlock(&sdp->sd_quota_spin);
1263
1264 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1265
1266 if (sdp->sd_quota_bitmap) {
1267 for (x = 0; x < sdp->sd_quota_chunks; x++)
1268 kfree(sdp->sd_quota_bitmap[x]);
1269 kfree(sdp->sd_quota_bitmap);
1270 }
1271}
1272