]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - fs/gfs2/quota.c
[GFS2] fix jdata issues
[mirror_ubuntu-eoan-kernel.git] / fs / gfs2 / quota.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3a8a9a10 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10/*
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
20 *
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
34 *
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
37 */
38
39#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/spinlock.h>
42#include <linux/completion.h>
43#include <linux/buffer_head.h>
b3b94faa 44#include <linux/sort.h>
18ec7d5c 45#include <linux/fs.h>
2e565bb6 46#include <linux/bio.h>
5c676f6d 47#include <linux/gfs2_ondisk.h>
7d308590 48#include <linux/lm_interface.h>
b3b94faa
DT
49
50#include "gfs2.h"
5c676f6d 51#include "incore.h"
b3b94faa
DT
52#include "bmap.h"
53#include "glock.h"
54#include "glops.h"
b3b94faa
DT
55#include "log.h"
56#include "meta_io.h"
57#include "quota.h"
58#include "rgrp.h"
59#include "super.h"
60#include "trans.h"
18ec7d5c 61#include "inode.h"
f42faf4f 62#include "ops_file.h"
18ec7d5c 63#include "ops_address.h"
5c676f6d 64#include "util.h"
b3b94faa
DT
65
66#define QUOTA_USER 1
67#define QUOTA_GROUP 0
68
cd915493 69static u64 qd2offset(struct gfs2_quota_data *qd)
b3b94faa 70{
cd915493 71 u64 offset;
b3b94faa 72
cd915493 73 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
b3b94faa
DT
74 offset *= sizeof(struct gfs2_quota);
75
76 return offset;
77}
78
cd915493 79static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
b3b94faa
DT
80 struct gfs2_quota_data **qdp)
81{
82 struct gfs2_quota_data *qd;
83 int error;
84
85 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
86 if (!qd)
87 return -ENOMEM;
88
89 qd->qd_count = 1;
90 qd->qd_id = id;
91 if (user)
92 set_bit(QDF_USER, &qd->qd_flags);
93 qd->qd_slot = -1;
94
cd915493 95 error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
b3b94faa
DT
96 &gfs2_quota_glops, CREATE, &qd->qd_gl);
97 if (error)
98 goto fail;
99
100 error = gfs2_lvb_hold(qd->qd_gl);
101 gfs2_glock_put(qd->qd_gl);
102 if (error)
103 goto fail;
104
105 *qdp = qd;
106
107 return 0;
108
a91ea69f 109fail:
b3b94faa
DT
110 kfree(qd);
111 return error;
112}
113
cd915493 114static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
b3b94faa
DT
115 struct gfs2_quota_data **qdp)
116{
117 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
118 int error, found;
119
120 *qdp = NULL;
121
122 for (;;) {
123 found = 0;
124 spin_lock(&sdp->sd_quota_spin);
125 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
126 if (qd->qd_id == id &&
127 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
128 qd->qd_count++;
129 found = 1;
130 break;
131 }
132 }
133
134 if (!found)
135 qd = NULL;
136
137 if (!qd && new_qd) {
138 qd = new_qd;
139 list_add(&qd->qd_list, &sdp->sd_quota_list);
140 atomic_inc(&sdp->sd_quota_count);
141 new_qd = NULL;
142 }
143
144 spin_unlock(&sdp->sd_quota_spin);
145
146 if (qd || !create) {
147 if (new_qd) {
148 gfs2_lvb_unhold(new_qd->qd_gl);
149 kfree(new_qd);
150 }
151 *qdp = qd;
152 return 0;
153 }
154
155 error = qd_alloc(sdp, user, id, &new_qd);
156 if (error)
157 return error;
158 }
159}
160
161static void qd_hold(struct gfs2_quota_data *qd)
162{
163 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
164
165 spin_lock(&sdp->sd_quota_spin);
166 gfs2_assert(sdp, qd->qd_count);
167 qd->qd_count++;
168 spin_unlock(&sdp->sd_quota_spin);
169}
170
171static void qd_put(struct gfs2_quota_data *qd)
172{
173 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
174 spin_lock(&sdp->sd_quota_spin);
175 gfs2_assert(sdp, qd->qd_count);
176 if (!--qd->qd_count)
177 qd->qd_last_touched = jiffies;
178 spin_unlock(&sdp->sd_quota_spin);
179}
180
181static int slot_get(struct gfs2_quota_data *qd)
182{
183 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
184 unsigned int c, o = 0, b;
185 unsigned char byte = 0;
186
187 spin_lock(&sdp->sd_quota_spin);
188
189 if (qd->qd_slot_count++) {
190 spin_unlock(&sdp->sd_quota_spin);
191 return 0;
192 }
193
194 for (c = 0; c < sdp->sd_quota_chunks; c++)
195 for (o = 0; o < PAGE_SIZE; o++) {
196 byte = sdp->sd_quota_bitmap[c][o];
197 if (byte != 0xFF)
198 goto found;
199 }
200
201 goto fail;
202
a91ea69f 203found:
b3b94faa
DT
204 for (b = 0; b < 8; b++)
205 if (!(byte & (1 << b)))
206 break;
207 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
208
209 if (qd->qd_slot >= sdp->sd_quota_slots)
210 goto fail;
211
212 sdp->sd_quota_bitmap[c][o] |= 1 << b;
213
214 spin_unlock(&sdp->sd_quota_spin);
215
216 return 0;
217
a91ea69f 218fail:
b3b94faa
DT
219 qd->qd_slot_count--;
220 spin_unlock(&sdp->sd_quota_spin);
221 return -ENOSPC;
222}
223
224static void slot_hold(struct gfs2_quota_data *qd)
225{
226 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
227
228 spin_lock(&sdp->sd_quota_spin);
229 gfs2_assert(sdp, qd->qd_slot_count);
230 qd->qd_slot_count++;
231 spin_unlock(&sdp->sd_quota_spin);
232}
233
234static void slot_put(struct gfs2_quota_data *qd)
235{
236 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
237
238 spin_lock(&sdp->sd_quota_spin);
239 gfs2_assert(sdp, qd->qd_slot_count);
240 if (!--qd->qd_slot_count) {
241 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
242 qd->qd_slot = -1;
243 }
244 spin_unlock(&sdp->sd_quota_spin);
245}
246
247static int bh_get(struct gfs2_quota_data *qd)
248{
249 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
feaa7bba 250 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa 251 unsigned int block, offset;
b3b94faa
DT
252 struct buffer_head *bh;
253 int error;
23591256 254 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
b3b94faa 255
f55ab26a 256 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
257
258 if (qd->qd_bh_count++) {
f55ab26a 259 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
260 return 0;
261 }
262
263 block = qd->qd_slot / sdp->sd_qc_per_block;
264 offset = qd->qd_slot % sdp->sd_qc_per_block;;
265
23591256
SW
266 bh_map.b_size = 1 << ip->i_inode.i_blkbits;
267 error = gfs2_block_map(&ip->i_inode, block, 0, &bh_map);
b3b94faa
DT
268 if (error)
269 goto fail;
7276b3b0 270 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
b3b94faa
DT
271 if (error)
272 goto fail;
273 error = -EIO;
274 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
275 goto fail_brelse;
276
277 qd->qd_bh = bh;
278 qd->qd_bh_qc = (struct gfs2_quota_change *)
279 (bh->b_data + sizeof(struct gfs2_meta_header) +
280 offset * sizeof(struct gfs2_quota_change));
281
2e95b665 282 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
283
284 return 0;
285
a91ea69f 286fail_brelse:
b3b94faa 287 brelse(bh);
a91ea69f 288fail:
b3b94faa 289 qd->qd_bh_count--;
f55ab26a 290 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
291 return error;
292}
293
294static void bh_put(struct gfs2_quota_data *qd)
295{
296 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
297
f55ab26a 298 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
299 gfs2_assert(sdp, qd->qd_bh_count);
300 if (!--qd->qd_bh_count) {
301 brelse(qd->qd_bh);
302 qd->qd_bh = NULL;
303 qd->qd_bh_qc = NULL;
304 }
f55ab26a 305 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
306}
307
308static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
309{
310 struct gfs2_quota_data *qd = NULL;
311 int error;
312 int found = 0;
313
314 *qdp = NULL;
315
316 if (sdp->sd_vfs->s_flags & MS_RDONLY)
317 return 0;
318
319 spin_lock(&sdp->sd_quota_spin);
320
321 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
322 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
323 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
324 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
325 continue;
326
327 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
328
329 set_bit(QDF_LOCKED, &qd->qd_flags);
330 gfs2_assert_warn(sdp, qd->qd_count);
331 qd->qd_count++;
332 qd->qd_change_sync = qd->qd_change;
333 gfs2_assert_warn(sdp, qd->qd_slot_count);
334 qd->qd_slot_count++;
335 found = 1;
336
337 break;
338 }
339
340 if (!found)
341 qd = NULL;
342
343 spin_unlock(&sdp->sd_quota_spin);
344
345 if (qd) {
346 gfs2_assert_warn(sdp, qd->qd_change_sync);
347 error = bh_get(qd);
348 if (error) {
349 clear_bit(QDF_LOCKED, &qd->qd_flags);
350 slot_put(qd);
351 qd_put(qd);
352 return error;
353 }
354 }
355
356 *qdp = qd;
357
358 return 0;
359}
360
361static int qd_trylock(struct gfs2_quota_data *qd)
362{
363 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
364
365 if (sdp->sd_vfs->s_flags & MS_RDONLY)
366 return 0;
367
368 spin_lock(&sdp->sd_quota_spin);
369
370 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
371 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
372 spin_unlock(&sdp->sd_quota_spin);
373 return 0;
374 }
375
376 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
377
378 set_bit(QDF_LOCKED, &qd->qd_flags);
379 gfs2_assert_warn(sdp, qd->qd_count);
380 qd->qd_count++;
381 qd->qd_change_sync = qd->qd_change;
382 gfs2_assert_warn(sdp, qd->qd_slot_count);
383 qd->qd_slot_count++;
384
385 spin_unlock(&sdp->sd_quota_spin);
386
387 gfs2_assert_warn(sdp, qd->qd_change_sync);
388 if (bh_get(qd)) {
389 clear_bit(QDF_LOCKED, &qd->qd_flags);
390 slot_put(qd);
391 qd_put(qd);
392 return 0;
393 }
394
395 return 1;
396}
397
398static void qd_unlock(struct gfs2_quota_data *qd)
399{
568f4c96
SW
400 gfs2_assert_warn(qd->qd_gl->gl_sbd,
401 test_bit(QDF_LOCKED, &qd->qd_flags));
b3b94faa
DT
402 clear_bit(QDF_LOCKED, &qd->qd_flags);
403 bh_put(qd);
404 slot_put(qd);
405 qd_put(qd);
406}
407
cd915493 408static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
b3b94faa
DT
409 struct gfs2_quota_data **qdp)
410{
411 int error;
412
413 error = qd_get(sdp, user, id, create, qdp);
414 if (error)
415 return error;
416
417 error = slot_get(*qdp);
418 if (error)
419 goto fail;
420
421 error = bh_get(*qdp);
422 if (error)
423 goto fail_slot;
424
425 return 0;
426
a91ea69f 427fail_slot:
b3b94faa 428 slot_put(*qdp);
a91ea69f 429fail:
b3b94faa
DT
430 qd_put(*qdp);
431 return error;
432}
433
434static void qdsb_put(struct gfs2_quota_data *qd)
435{
436 bh_put(qd);
437 slot_put(qd);
438 qd_put(qd);
439}
440
cd915493 441int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
b3b94faa 442{
feaa7bba 443 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
444 struct gfs2_alloc *al = &ip->i_alloc;
445 struct gfs2_quota_data **qd = al->al_qd;
446 int error;
447
448 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
449 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
450 return -EIO;
451
452 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
453 return 0;
454
2933f925 455 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
b3b94faa
DT
456 if (error)
457 goto out;
458 al->al_qd_num++;
459 qd++;
460
2933f925 461 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
b3b94faa
DT
462 if (error)
463 goto out;
464 al->al_qd_num++;
465 qd++;
466
2933f925 467 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
b3b94faa
DT
468 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
469 if (error)
470 goto out;
471 al->al_qd_num++;
472 qd++;
473 }
474
2933f925 475 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
b3b94faa
DT
476 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
477 if (error)
478 goto out;
479 al->al_qd_num++;
480 qd++;
481 }
482
a91ea69f 483out:
b3b94faa
DT
484 if (error)
485 gfs2_quota_unhold(ip);
b3b94faa
DT
486 return error;
487}
488
489void gfs2_quota_unhold(struct gfs2_inode *ip)
490{
feaa7bba 491 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
492 struct gfs2_alloc *al = &ip->i_alloc;
493 unsigned int x;
494
495 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
496
497 for (x = 0; x < al->al_qd_num; x++) {
498 qdsb_put(al->al_qd[x]);
499 al->al_qd[x] = NULL;
500 }
501 al->al_qd_num = 0;
502}
503
504static int sort_qd(const void *a, const void *b)
505{
48fac179
SW
506 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
507 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
b3b94faa
DT
508
509 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
510 !test_bit(QDF_USER, &qd_b->qd_flags)) {
511 if (test_bit(QDF_USER, &qd_a->qd_flags))
48fac179 512 return -1;
b3b94faa 513 else
48fac179 514 return 1;
b3b94faa 515 }
48fac179
SW
516 if (qd_a->qd_id < qd_b->qd_id)
517 return -1;
518 if (qd_a->qd_id > qd_b->qd_id)
519 return 1;
b3b94faa 520
48fac179 521 return 0;
b3b94faa
DT
522}
523
cd915493 524static void do_qc(struct gfs2_quota_data *qd, s64 change)
b3b94faa
DT
525{
526 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
feaa7bba 527 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa 528 struct gfs2_quota_change *qc = qd->qd_bh_qc;
cd915493 529 s64 x;
b3b94faa 530
f55ab26a 531 mutex_lock(&sdp->sd_quota_mutex);
d4e9c4c3 532 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
b3b94faa
DT
533
534 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
535 qc->qc_change = 0;
536 qc->qc_flags = 0;
537 if (test_bit(QDF_USER, &qd->qd_flags))
538 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
539 qc->qc_id = cpu_to_be32(qd->qd_id);
540 }
541
b44b84d7 542 x = be64_to_cpu(qc->qc_change) + change;
b3b94faa
DT
543 qc->qc_change = cpu_to_be64(x);
544
545 spin_lock(&sdp->sd_quota_spin);
546 qd->qd_change = x;
547 spin_unlock(&sdp->sd_quota_spin);
548
549 if (!x) {
550 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
551 clear_bit(QDF_CHANGE, &qd->qd_flags);
552 qc->qc_flags = 0;
553 qc->qc_id = 0;
554 slot_put(qd);
555 qd_put(qd);
556 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
557 qd_hold(qd);
558 slot_hold(qd);
559 }
907b9bce 560
f55ab26a 561 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
562}
563
18ec7d5c
SW
564/**
565 * gfs2_adjust_quota
566 *
567 * This function was mostly borrowed from gfs2_block_truncate_page which was
568 * in turn mostly borrowed from ext3
569 */
570static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
cd915493 571 s64 change, struct gfs2_quota_data *qd)
18ec7d5c 572{
feaa7bba 573 struct inode *inode = &ip->i_inode;
18ec7d5c
SW
574 struct address_space *mapping = inode->i_mapping;
575 unsigned long index = loc >> PAGE_CACHE_SHIFT;
1990e917 576 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
18ec7d5c
SW
577 unsigned blocksize, iblock, pos;
578 struct buffer_head *bh;
579 struct page *page;
580 void *kaddr;
1990e917
AD
581 char *ptr;
582 struct gfs2_quota_host qp;
e9fc2aa0 583 s64 value;
18ec7d5c
SW
584 int err = -EIO;
585
586 page = grab_cache_page(mapping, index);
587 if (!page)
588 return -ENOMEM;
589
590 blocksize = inode->i_sb->s_blocksize;
591 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
592
593 if (!page_has_buffers(page))
594 create_empty_buffers(page, blocksize, 0);
595
596 bh = page_buffers(page);
597 pos = blocksize;
598 while (offset >= pos) {
599 bh = bh->b_this_page;
600 iblock++;
601 pos += blocksize;
602 }
603
604 if (!buffer_mapped(bh)) {
605 gfs2_get_block(inode, iblock, bh, 1);
606 if (!buffer_mapped(bh))
607 goto unlock;
608 }
609
610 if (PageUptodate(page))
611 set_buffer_uptodate(bh);
612
613 if (!buffer_uptodate(bh)) {
2e565bb6 614 ll_rw_block(READ_META, 1, &bh);
18ec7d5c
SW
615 wait_on_buffer(bh);
616 if (!buffer_uptodate(bh))
617 goto unlock;
618 }
619
620 gfs2_trans_add_bh(ip->i_gl, bh, 0);
621
622 kaddr = kmap_atomic(page, KM_USER0);
48fac179 623 ptr = kaddr + offset;
1990e917
AD
624 gfs2_quota_in(&qp, ptr);
625 qp.qu_value += change;
626 value = qp.qu_value;
627 gfs2_quota_out(&qp, ptr);
18ec7d5c
SW
628 flush_dcache_page(page);
629 kunmap_atomic(kaddr, KM_USER0);
630 err = 0;
631 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
18ec7d5c 632 qd->qd_qb.qb_value = cpu_to_be64(value);
2a87ab08
AD
633 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
634 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
18ec7d5c
SW
635unlock:
636 unlock_page(page);
637 page_cache_release(page);
638 return err;
639}
640
b3b94faa
DT
641static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
642{
643 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
feaa7bba 644 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
b3b94faa
DT
645 unsigned int data_blocks, ind_blocks;
646 struct gfs2_holder *ghs, i_gh;
647 unsigned int qx, x;
648 struct gfs2_quota_data *qd;
f42faf4f 649 loff_t offset;
b3b94faa
DT
650 unsigned int nalloc = 0;
651 struct gfs2_alloc *al = NULL;
652 int error;
653
654 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
655 &data_blocks, &ind_blocks);
656
657 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
658 if (!ghs)
659 return -ENOMEM;
660
661 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
662 for (qx = 0; qx < num_qd; qx++) {
663 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
664 LM_ST_EXCLUSIVE,
665 GL_NOCACHE, &ghs[qx]);
666 if (error)
667 goto out;
668 }
669
670 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
671 if (error)
672 goto out;
673
674 for (x = 0; x < num_qd; x++) {
675 int alloc_required;
676
677 offset = qd2offset(qda[x]);
678 error = gfs2_write_alloc_required(ip, offset,
679 sizeof(struct gfs2_quota),
680 &alloc_required);
681 if (error)
682 goto out_gunlock;
683 if (alloc_required)
684 nalloc++;
685 }
686
687 if (nalloc) {
688 al = gfs2_alloc_get(ip);
689
690 al->al_requested = nalloc * (data_blocks + ind_blocks);
691
692 error = gfs2_inplace_reserve(ip);
693 if (error)
694 goto out_alloc;
695
696 error = gfs2_trans_begin(sdp,
697 al->al_rgd->rd_ri.ri_length +
698 num_qd * data_blocks +
699 nalloc * ind_blocks +
700 RES_DINODE + num_qd +
701 RES_STATFS, 0);
702 if (error)
703 goto out_ipres;
704 } else {
705 error = gfs2_trans_begin(sdp,
706 num_qd * data_blocks +
707 RES_DINODE + num_qd, 0);
708 if (error)
709 goto out_gunlock;
710 }
711
712 for (x = 0; x < num_qd; x++) {
b3b94faa
DT
713 qd = qda[x];
714 offset = qd2offset(qd);
18ec7d5c 715 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
568f4c96 716 (struct gfs2_quota_data *)
2a87ab08 717 qd);
18ec7d5c 718 if (error)
b3b94faa 719 goto out_end_trans;
b3b94faa
DT
720
721 do_qc(qd, -qd->qd_change_sync);
b3b94faa
DT
722 }
723
724 error = 0;
725
a91ea69f 726out_end_trans:
b3b94faa 727 gfs2_trans_end(sdp);
a91ea69f 728out_ipres:
b3b94faa
DT
729 if (nalloc)
730 gfs2_inplace_release(ip);
a91ea69f 731out_alloc:
b3b94faa
DT
732 if (nalloc)
733 gfs2_alloc_put(ip);
a91ea69f 734out_gunlock:
b3b94faa 735 gfs2_glock_dq_uninit(&i_gh);
a91ea69f 736out:
b3b94faa
DT
737 while (qx--)
738 gfs2_glock_dq_uninit(&ghs[qx]);
739 kfree(ghs);
b09e593d 740 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
b3b94faa
DT
741 return error;
742}
743
744static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
745 struct gfs2_holder *q_gh)
746{
747 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
feaa7bba 748 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
b3b94faa 749 struct gfs2_holder i_gh;
b5bc9e8b 750 struct gfs2_quota_host q;
b3b94faa 751 char buf[sizeof(struct gfs2_quota)];
f42faf4f 752 struct file_ra_state ra_state;
b3b94faa 753 int error;
e9fc2aa0 754 struct gfs2_quota_lvb *qlvb;
b3b94faa 755
f42faf4f 756 file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);
a91ea69f 757restart:
b3b94faa
DT
758 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
759 if (error)
760 return error;
761
e9fc2aa0 762 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
b3b94faa 763
e9fc2aa0 764 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
f42faf4f 765 loff_t pos;
b3b94faa
DT
766 gfs2_glock_dq_uninit(q_gh);
767 error = gfs2_glock_nq_init(qd->qd_gl,
768 LM_ST_EXCLUSIVE, GL_NOCACHE,
769 q_gh);
770 if (error)
771 return error;
772
e9fc2aa0 773 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
b3b94faa
DT
774 if (error)
775 goto fail;
776
777 memset(buf, 0, sizeof(struct gfs2_quota));
f42faf4f 778 pos = qd2offset(qd);
0d42e542
SW
779 error = gfs2_internal_read(ip, &ra_state, buf,
780 &pos, sizeof(struct gfs2_quota));
b3b94faa
DT
781 if (error < 0)
782 goto fail_gunlock;
783
784 gfs2_glock_dq_uninit(&i_gh);
785
907b9bce 786
b3b94faa 787 gfs2_quota_in(&q, buf);
e9fc2aa0
SW
788 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
789 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
790 qlvb->__pad = 0;
791 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
792 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
793 qlvb->qb_value = cpu_to_be64(q.qu_value);
794 qd->qd_qb = *qlvb;
b3b94faa
DT
795
796 if (gfs2_glock_is_blocking(qd->qd_gl)) {
797 gfs2_glock_dq_uninit(q_gh);
798 force_refresh = 0;
799 goto restart;
800 }
801 }
802
803 return 0;
804
a91ea69f 805fail_gunlock:
b3b94faa 806 gfs2_glock_dq_uninit(&i_gh);
a91ea69f 807fail:
b3b94faa 808 gfs2_glock_dq_uninit(q_gh);
b3b94faa
DT
809 return error;
810}
811
cd915493 812int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
b3b94faa 813{
feaa7bba 814 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
815 struct gfs2_alloc *al = &ip->i_alloc;
816 unsigned int x;
817 int error = 0;
818
819 gfs2_quota_hold(ip, uid, gid);
820
821 if (capable(CAP_SYS_RESOURCE) ||
822 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
823 return 0;
824
825 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
826 sort_qd, NULL);
827
828 for (x = 0; x < al->al_qd_num; x++) {
829 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
830 if (error)
831 break;
832 }
833
834 if (!error)
835 set_bit(GIF_QD_LOCKED, &ip->i_flags);
836 else {
837 while (x--)
838 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
839 gfs2_quota_unhold(ip);
840 }
841
842 return error;
843}
844
845static int need_sync(struct gfs2_quota_data *qd)
846{
847 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
848 struct gfs2_tune *gt = &sdp->sd_tune;
cd915493 849 s64 value;
b3b94faa
DT
850 unsigned int num, den;
851 int do_sync = 1;
852
853 if (!qd->qd_qb.qb_limit)
854 return 0;
855
856 spin_lock(&sdp->sd_quota_spin);
857 value = qd->qd_change;
858 spin_unlock(&sdp->sd_quota_spin);
859
860 spin_lock(&gt->gt_spin);
861 num = gt->gt_quota_scale_num;
862 den = gt->gt_quota_scale_den;
863 spin_unlock(&gt->gt_spin);
864
865 if (value < 0)
866 do_sync = 0;
e9fc2aa0
SW
867 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
868 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
b3b94faa
DT
869 do_sync = 0;
870 else {
871 value *= gfs2_jindex_size(sdp) * num;
872 do_div(value, den);
e9fc2aa0 873 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
cd915493 874 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
b3b94faa
DT
875 do_sync = 0;
876 }
877
878 return do_sync;
879}
880
881void gfs2_quota_unlock(struct gfs2_inode *ip)
882{
883 struct gfs2_alloc *al = &ip->i_alloc;
884 struct gfs2_quota_data *qda[4];
885 unsigned int count = 0;
886 unsigned int x;
887
888 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
889 goto out;
890
891 for (x = 0; x < al->al_qd_num; x++) {
892 struct gfs2_quota_data *qd;
893 int sync;
894
895 qd = al->al_qd[x];
896 sync = need_sync(qd);
897
898 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
899
900 if (sync && qd_trylock(qd))
901 qda[count++] = qd;
902 }
903
904 if (count) {
905 do_sync(count, qda);
906 for (x = 0; x < count; x++)
907 qd_unlock(qda[x]);
908 }
909
a91ea69f 910out:
b3b94faa
DT
911 gfs2_quota_unhold(ip);
912}
913
914#define MAX_LINE 256
915
916static int print_message(struct gfs2_quota_data *qd, char *type)
917{
918 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
b3b94faa 919
02630a12
SW
920 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
921 sdp->sd_fsname, type,
922 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
923 qd->qd_id);
b3b94faa
DT
924
925 return 0;
926}
927
cd915493 928int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
b3b94faa 929{
feaa7bba 930 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
931 struct gfs2_alloc *al = &ip->i_alloc;
932 struct gfs2_quota_data *qd;
cd915493 933 s64 value;
b3b94faa
DT
934 unsigned int x;
935 int error = 0;
936
937 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
938 return 0;
939
940 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
941 return 0;
942
943 for (x = 0; x < al->al_qd_num; x++) {
944 qd = al->al_qd[x];
945
946 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
947 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
948 continue;
949
e9fc2aa0 950 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
b3b94faa
DT
951 spin_lock(&sdp->sd_quota_spin);
952 value += qd->qd_change;
953 spin_unlock(&sdp->sd_quota_spin);
954
cd915493 955 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
b3b94faa
DT
956 print_message(qd, "exceeded");
957 error = -EDQUOT;
958 break;
e9fc2aa0 959 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
cd915493 960 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
b3b94faa 961 time_after_eq(jiffies, qd->qd_last_warn +
568f4c96
SW
962 gfs2_tune_get(sdp,
963 gt_quota_warn_period) * HZ)) {
b3b94faa
DT
964 error = print_message(qd, "warning");
965 qd->qd_last_warn = jiffies;
966 }
967 }
968
969 return error;
970}
971
cd915493
SW
972void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
973 u32 uid, u32 gid)
b3b94faa
DT
974{
975 struct gfs2_alloc *al = &ip->i_alloc;
976 struct gfs2_quota_data *qd;
977 unsigned int x;
978 unsigned int found = 0;
979
feaa7bba 980 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
b3b94faa
DT
981 return;
982 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
983 return;
984
985 for (x = 0; x < al->al_qd_num; x++) {
986 qd = al->al_qd[x];
987
988 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
989 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
990 do_qc(qd, change);
991 found++;
992 }
993 }
994}
995
996int gfs2_quota_sync(struct gfs2_sbd *sdp)
997{
998 struct gfs2_quota_data **qda;
999 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1000 unsigned int num_qd;
1001 unsigned int x;
1002 int error = 0;
1003
1004 sdp->sd_quota_sync_gen++;
1005
1006 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1007 if (!qda)
1008 return -ENOMEM;
1009
1010 do {
1011 num_qd = 0;
1012
1013 for (;;) {
1014 error = qd_fish(sdp, qda + num_qd);
1015 if (error || !qda[num_qd])
1016 break;
1017 if (++num_qd == max_qd)
1018 break;
1019 }
1020
1021 if (num_qd) {
1022 if (!error)
1023 error = do_sync(num_qd, qda);
1024 if (!error)
1025 for (x = 0; x < num_qd; x++)
1026 qda[x]->qd_sync_gen =
1027 sdp->sd_quota_sync_gen;
1028
1029 for (x = 0; x < num_qd; x++)
1030 qd_unlock(qda[x]);
1031 }
1032 } while (!error && num_qd == max_qd);
1033
1034 kfree(qda);
1035
1036 return error;
1037}
1038
cd915493 1039int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
b3b94faa
DT
1040{
1041 struct gfs2_quota_data *qd;
1042 struct gfs2_holder q_gh;
1043 int error;
1044
1045 error = qd_get(sdp, user, id, CREATE, &qd);
1046 if (error)
1047 return error;
1048
1049 error = do_glock(qd, FORCE, &q_gh);
1050 if (!error)
1051 gfs2_glock_dq_uninit(&q_gh);
1052
1053 qd_put(qd);
1054
1055 return error;
1056}
1057
b3b94faa
DT
1058int gfs2_quota_init(struct gfs2_sbd *sdp)
1059{
feaa7bba 1060 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa
DT
1061 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1062 unsigned int x, slot = 0;
1063 unsigned int found = 0;
cd915493
SW
1064 u64 dblock;
1065 u32 extlen = 0;
b3b94faa
DT
1066 int error;
1067
7276b3b0 1068 if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) ||
b3b94faa
DT
1069 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1070 gfs2_consist_inode(ip);
907b9bce 1071 return -EIO;
b3b94faa
DT
1072 }
1073 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
5c676f6d 1074 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
b3b94faa
DT
1075
1076 error = -ENOMEM;
1077
1078 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1079 sizeof(unsigned char *), GFP_KERNEL);
1080 if (!sdp->sd_quota_bitmap)
1081 return error;
1082
1083 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1084 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1085 if (!sdp->sd_quota_bitmap[x])
1086 goto fail;
1087 }
1088
1089 for (x = 0; x < blocks; x++) {
1090 struct buffer_head *bh;
1091 unsigned int y;
1092
1093 if (!extlen) {
1094 int new = 0;
feaa7bba 1095 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
b3b94faa
DT
1096 if (error)
1097 goto fail;
1098 }
b3b94faa 1099 error = -EIO;
7276b3b0
SW
1100 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1101 if (!bh)
1102 goto fail;
b3b94faa
DT
1103 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1104 brelse(bh);
1105 goto fail;
1106 }
1107
7276b3b0 1108 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
b3b94faa 1109 y++, slot++) {
b62f963e 1110 struct gfs2_quota_change_host qc;
b3b94faa
DT
1111 struct gfs2_quota_data *qd;
1112
1113 gfs2_quota_change_in(&qc, bh->b_data +
1114 sizeof(struct gfs2_meta_header) +
1115 y * sizeof(struct gfs2_quota_change));
1116 if (!qc.qc_change)
1117 continue;
1118
1119 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1120 qc.qc_id, &qd);
1121 if (error) {
1122 brelse(bh);
1123 goto fail;
1124 }
1125
1126 set_bit(QDF_CHANGE, &qd->qd_flags);
1127 qd->qd_change = qc.qc_change;
1128 qd->qd_slot = slot;
1129 qd->qd_slot_count = 1;
1130 qd->qd_last_touched = jiffies;
1131
1132 spin_lock(&sdp->sd_quota_spin);
1133 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1134 list_add(&qd->qd_list, &sdp->sd_quota_list);
1135 atomic_inc(&sdp->sd_quota_count);
1136 spin_unlock(&sdp->sd_quota_spin);
1137
1138 found++;
1139 }
1140
1141 brelse(bh);
1142 dblock++;
1143 extlen--;
1144 }
1145
1146 if (found)
1147 fs_info(sdp, "found %u quota changes\n", found);
1148
1149 return 0;
1150
a91ea69f 1151fail:
b3b94faa
DT
1152 gfs2_quota_cleanup(sdp);
1153 return error;
1154}
1155
1156void gfs2_quota_scan(struct gfs2_sbd *sdp)
1157{
1158 struct gfs2_quota_data *qd, *safe;
1159 LIST_HEAD(dead);
1160
1161 spin_lock(&sdp->sd_quota_spin);
1162 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1163 if (!qd->qd_count &&
1164 time_after_eq(jiffies, qd->qd_last_touched +
1165 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1166 list_move(&qd->qd_list, &dead);
1167 gfs2_assert_warn(sdp,
1168 atomic_read(&sdp->sd_quota_count) > 0);
1169 atomic_dec(&sdp->sd_quota_count);
1170 }
1171 }
1172 spin_unlock(&sdp->sd_quota_spin);
1173
1174 while (!list_empty(&dead)) {
1175 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1176 list_del(&qd->qd_list);
1177
1178 gfs2_assert_warn(sdp, !qd->qd_change);
1179 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1180 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1181
1182 gfs2_lvb_unhold(qd->qd_gl);
1183 kfree(qd);
1184 }
1185}
1186
1187void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1188{
1189 struct list_head *head = &sdp->sd_quota_list;
1190 struct gfs2_quota_data *qd;
1191 unsigned int x;
1192
1193 spin_lock(&sdp->sd_quota_spin);
1194 while (!list_empty(head)) {
1195 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1196
1197 if (qd->qd_count > 1 ||
1198 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1199 list_move(&qd->qd_list, head);
1200 spin_unlock(&sdp->sd_quota_spin);
1201 schedule();
1202 spin_lock(&sdp->sd_quota_spin);
1203 continue;
1204 }
1205
1206 list_del(&qd->qd_list);
1207 atomic_dec(&sdp->sd_quota_count);
1208 spin_unlock(&sdp->sd_quota_spin);
1209
1210 if (!qd->qd_count) {
1211 gfs2_assert_warn(sdp, !qd->qd_change);
1212 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1213 } else
1214 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1215 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1216
1217 gfs2_lvb_unhold(qd->qd_gl);
1218 kfree(qd);
1219
1220 spin_lock(&sdp->sd_quota_spin);
1221 }
1222 spin_unlock(&sdp->sd_quota_spin);
1223
1224 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1225
1226 if (sdp->sd_quota_bitmap) {
1227 for (x = 0; x < sdp->sd_quota_chunks; x++)
1228 kfree(sdp->sd_quota_bitmap[x]);
1229 kfree(sdp->sd_quota_bitmap);
1230 }
1231}
1232