]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/quota/dquot.c
fs: move code out of buffer.c
[mirror_ubuntu-bionic-kernel.git] / fs / quota / dquot.c
1 /*
2 * Implementation of the diskquota system for the LINUX operating system. QUOTA
3 * is implemented using the BSD system call interface as the means of
4 * communication with the user level. This file contains the generic routines
5 * called by the different filesystems on allocation of an inode or block.
6 * These routines take care of the administration needed to have a consistent
7 * diskquota tracking system. The ideas of both user and group quotas are based
8 * on the Melbourne quota system as used on BSD derived systems. The internal
9 * implementation is based on one of the several variants of the LINUX
10 * inode-subsystem with added complexity of the diskquota system.
11 *
12 * Author: Marco van Wieringen <mvw@planets.elm.net>
13 *
14 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
15 *
16 * Revised list management to avoid races
17 * -- Bill Hawes, <whawes@star.net>, 9/98
18 *
19 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
20 * As the consequence the locking was moved from dquot_decr_...(),
21 * dquot_incr_...() to calling functions.
22 * invalidate_dquots() now writes modified dquots.
23 * Serialized quota_off() and quota_on() for mount point.
24 * Fixed a few bugs in grow_dquots().
25 * Fixed deadlock in write_dquot() - we no longer account quotas on
26 * quota files
27 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
28 * add_dquot_ref() restarts after blocking
29 * Added check for bogus uid and fixed check for group in quotactl.
30 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
31 *
32 * Used struct list_head instead of own list struct
33 * Invalidation of referenced dquots is no longer possible
34 * Improved free_dquots list management
35 * Quota and i_blocks are now updated in one place to avoid races
36 * Warnings are now delayed so we won't block in critical section
37 * Write updated not to require dquot lock
38 * Jan Kara, <jack@suse.cz>, 9/2000
39 *
40 * Added dynamic quota structure allocation
41 * Jan Kara <jack@suse.cz> 12/2000
42 *
43 * Rewritten quota interface. Implemented new quota format and
44 * formats registering.
45 * Jan Kara, <jack@suse.cz>, 2001,2002
46 *
47 * New SMP locking.
48 * Jan Kara, <jack@suse.cz>, 10/2002
49 *
50 * Added journalled quota support, fix lock inversion problems
51 * Jan Kara, <jack@suse.cz>, 2003,2004
52 *
53 * (C) Copyright 1994 - 1997 Marco van Wieringen
54 */
55
56 #include <linux/errno.h>
57 #include <linux/kernel.h>
58 #include <linux/fs.h>
59 #include <linux/mount.h>
60 #include <linux/mm.h>
61 #include <linux/time.h>
62 #include <linux/types.h>
63 #include <linux/string.h>
64 #include <linux/fcntl.h>
65 #include <linux/stat.h>
66 #include <linux/tty.h>
67 #include <linux/file.h>
68 #include <linux/slab.h>
69 #include <linux/sysctl.h>
70 #include <linux/init.h>
71 #include <linux/module.h>
72 #include <linux/proc_fs.h>
73 #include <linux/security.h>
74 #include <linux/kmod.h>
75 #include <linux/namei.h>
76 #include <linux/capability.h>
77 #include <linux/quotaops.h>
78 #include "../internal.h" /* ugh */
79
80 #include <asm/uaccess.h>
81
82 /*
83 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
84 * and quota formats.
85 * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
86 * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
87 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
88 * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
89 * modifications of quota state (on quotaon and quotaoff) and readers who care
90 * about latest values take it as well.
91 *
92 * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
93 * dq_list_lock > dq_state_lock
94 *
95 * Note that some things (eg. sb pointer, type, id) doesn't change during
96 * the life of the dquot structure and so needn't to be protected by a lock
97 *
98 * Any operation working on dquots via inode pointers must hold dqptr_sem. If
99 * operation is just reading pointers from inode (or not using them at all) the
100 * read lock is enough. If pointers are altered function must hold write lock.
101 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
102 * inode is a quota file). Functions adding pointers from inode to dquots have
103 * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
104 * have to do all pointer modifications before dropping dqptr_sem. This makes
105 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
106 * then drops all pointers to dquots from an inode.
107 *
108 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
109 * from inodes (dquot_alloc_space() and such don't check the dq_lock).
110 * Currently dquot is locked only when it is being read to memory (or space for
111 * it is being allocated) on the first dqget() and when it is being released on
112 * the last dqput(). The allocation and release oparations are serialized by
113 * the dq_lock and by checking the use count in dquot_release(). Write
114 * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
115 * spinlock to internal buffers before writing.
116 *
117 * Lock ordering (including related VFS locks) is the following:
118 * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
119 * dqio_mutex
120 * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
121 * dqptr_sem. But filesystem has to count with the fact that functions such as
122 * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
123 * from inside a transaction to keep filesystem consistency after a crash. Also
124 * filesystems usually want to do some IO on dquot from ->mark_dirty which is
125 * called with dqptr_sem held.
126 * i_mutex on quota files is special (it's below dqio_mutex)
127 */
128
129 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
130 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
131 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
132 EXPORT_SYMBOL(dq_data_lock);
133
134 void __quota_error(struct super_block *sb, const char *func,
135 const char *fmt, ...)
136 {
137 if (printk_ratelimit()) {
138 va_list args;
139 struct va_format vaf;
140
141 va_start(args, fmt);
142
143 vaf.fmt = fmt;
144 vaf.va = &args;
145
146 printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
147 sb->s_id, func, &vaf);
148
149 va_end(args);
150 }
151 }
152 EXPORT_SYMBOL(__quota_error);
153
154 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
155 static char *quotatypes[] = INITQFNAMES;
156 #endif
157 static struct quota_format_type *quota_formats; /* List of registered formats */
158 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
159
160 /* SLAB cache for dquot structures */
161 static struct kmem_cache *dquot_cachep;
162
163 int register_quota_format(struct quota_format_type *fmt)
164 {
165 spin_lock(&dq_list_lock);
166 fmt->qf_next = quota_formats;
167 quota_formats = fmt;
168 spin_unlock(&dq_list_lock);
169 return 0;
170 }
171 EXPORT_SYMBOL(register_quota_format);
172
173 void unregister_quota_format(struct quota_format_type *fmt)
174 {
175 struct quota_format_type **actqf;
176
177 spin_lock(&dq_list_lock);
178 for (actqf = &quota_formats; *actqf && *actqf != fmt;
179 actqf = &(*actqf)->qf_next)
180 ;
181 if (*actqf)
182 *actqf = (*actqf)->qf_next;
183 spin_unlock(&dq_list_lock);
184 }
185 EXPORT_SYMBOL(unregister_quota_format);
186
187 static struct quota_format_type *find_quota_format(int id)
188 {
189 struct quota_format_type *actqf;
190
191 spin_lock(&dq_list_lock);
192 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
193 actqf = actqf->qf_next)
194 ;
195 if (!actqf || !try_module_get(actqf->qf_owner)) {
196 int qm;
197
198 spin_unlock(&dq_list_lock);
199
200 for (qm = 0; module_names[qm].qm_fmt_id &&
201 module_names[qm].qm_fmt_id != id; qm++)
202 ;
203 if (!module_names[qm].qm_fmt_id ||
204 request_module(module_names[qm].qm_mod_name))
205 return NULL;
206
207 spin_lock(&dq_list_lock);
208 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
209 actqf = actqf->qf_next)
210 ;
211 if (actqf && !try_module_get(actqf->qf_owner))
212 actqf = NULL;
213 }
214 spin_unlock(&dq_list_lock);
215 return actqf;
216 }
217
218 static void put_quota_format(struct quota_format_type *fmt)
219 {
220 module_put(fmt->qf_owner);
221 }
222
223 /*
224 * Dquot List Management:
225 * The quota code uses three lists for dquot management: the inuse_list,
226 * free_dquots, and dquot_hash[] array. A single dquot structure may be
227 * on all three lists, depending on its current state.
228 *
229 * All dquots are placed to the end of inuse_list when first created, and this
230 * list is used for invalidate operation, which must look at every dquot.
231 *
232 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
233 * and this list is searched whenever we need an available dquot. Dquots are
234 * removed from the list as soon as they are used again, and
235 * dqstats.free_dquots gives the number of dquots on the list. When
236 * dquot is invalidated it's completely released from memory.
237 *
238 * Dquots with a specific identity (device, type and id) are placed on
239 * one of the dquot_hash[] hash chains. The provides an efficient search
240 * mechanism to locate a specific dquot.
241 */
242
243 static LIST_HEAD(inuse_list);
244 static LIST_HEAD(free_dquots);
245 static unsigned int dq_hash_bits, dq_hash_mask;
246 static struct hlist_head *dquot_hash;
247
248 struct dqstats dqstats;
249 EXPORT_SYMBOL(dqstats);
250
251 static qsize_t inode_get_rsv_space(struct inode *inode);
252 static void __dquot_initialize(struct inode *inode, int type);
253
254 static inline unsigned int
255 hashfn(const struct super_block *sb, unsigned int id, int type)
256 {
257 unsigned long tmp;
258
259 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
260 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
261 }
262
263 /*
264 * Following list functions expect dq_list_lock to be held
265 */
266 static inline void insert_dquot_hash(struct dquot *dquot)
267 {
268 struct hlist_head *head;
269 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
270 hlist_add_head(&dquot->dq_hash, head);
271 }
272
273 static inline void remove_dquot_hash(struct dquot *dquot)
274 {
275 hlist_del_init(&dquot->dq_hash);
276 }
277
278 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
279 unsigned int id, int type)
280 {
281 struct hlist_node *node;
282 struct dquot *dquot;
283
284 hlist_for_each (node, dquot_hash+hashent) {
285 dquot = hlist_entry(node, struct dquot, dq_hash);
286 if (dquot->dq_sb == sb && dquot->dq_id == id &&
287 dquot->dq_type == type)
288 return dquot;
289 }
290 return NULL;
291 }
292
293 /* Add a dquot to the tail of the free list */
294 static inline void put_dquot_last(struct dquot *dquot)
295 {
296 list_add_tail(&dquot->dq_free, &free_dquots);
297 dqstats_inc(DQST_FREE_DQUOTS);
298 }
299
300 static inline void remove_free_dquot(struct dquot *dquot)
301 {
302 if (list_empty(&dquot->dq_free))
303 return;
304 list_del_init(&dquot->dq_free);
305 dqstats_dec(DQST_FREE_DQUOTS);
306 }
307
308 static inline void put_inuse(struct dquot *dquot)
309 {
310 /* We add to the back of inuse list so we don't have to restart
311 * when traversing this list and we block */
312 list_add_tail(&dquot->dq_inuse, &inuse_list);
313 dqstats_inc(DQST_ALLOC_DQUOTS);
314 }
315
316 static inline void remove_inuse(struct dquot *dquot)
317 {
318 dqstats_dec(DQST_ALLOC_DQUOTS);
319 list_del(&dquot->dq_inuse);
320 }
321 /*
322 * End of list functions needing dq_list_lock
323 */
324
325 static void wait_on_dquot(struct dquot *dquot)
326 {
327 mutex_lock(&dquot->dq_lock);
328 mutex_unlock(&dquot->dq_lock);
329 }
330
331 static inline int dquot_dirty(struct dquot *dquot)
332 {
333 return test_bit(DQ_MOD_B, &dquot->dq_flags);
334 }
335
336 static inline int mark_dquot_dirty(struct dquot *dquot)
337 {
338 return dquot->dq_sb->dq_op->mark_dirty(dquot);
339 }
340
341 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
342 int dquot_mark_dquot_dirty(struct dquot *dquot)
343 {
344 int ret = 1;
345
346 /* If quota is dirty already, we don't have to acquire dq_list_lock */
347 if (test_bit(DQ_MOD_B, &dquot->dq_flags))
348 return 1;
349
350 spin_lock(&dq_list_lock);
351 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
352 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
353 info[dquot->dq_type].dqi_dirty_list);
354 ret = 0;
355 }
356 spin_unlock(&dq_list_lock);
357 return ret;
358 }
359 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
360
361 /* Dirtify all the dquots - this can block when journalling */
362 static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
363 {
364 int ret, err, cnt;
365
366 ret = err = 0;
367 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
368 if (dquot[cnt])
369 /* Even in case of error we have to continue */
370 ret = mark_dquot_dirty(dquot[cnt]);
371 if (!err)
372 err = ret;
373 }
374 return err;
375 }
376
377 static inline void dqput_all(struct dquot **dquot)
378 {
379 unsigned int cnt;
380
381 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
382 dqput(dquot[cnt]);
383 }
384
385 /* This function needs dq_list_lock */
386 static inline int clear_dquot_dirty(struct dquot *dquot)
387 {
388 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
389 return 0;
390 list_del_init(&dquot->dq_dirty);
391 return 1;
392 }
393
394 void mark_info_dirty(struct super_block *sb, int type)
395 {
396 set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
397 }
398 EXPORT_SYMBOL(mark_info_dirty);
399
400 /*
401 * Read dquot from disk and alloc space for it
402 */
403
404 int dquot_acquire(struct dquot *dquot)
405 {
406 int ret = 0, ret2 = 0;
407 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
408
409 mutex_lock(&dquot->dq_lock);
410 mutex_lock(&dqopt->dqio_mutex);
411 if (!test_bit(DQ_READ_B, &dquot->dq_flags))
412 ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
413 if (ret < 0)
414 goto out_iolock;
415 set_bit(DQ_READ_B, &dquot->dq_flags);
416 /* Instantiate dquot if needed */
417 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
418 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
419 /* Write the info if needed */
420 if (info_dirty(&dqopt->info[dquot->dq_type])) {
421 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
422 dquot->dq_sb, dquot->dq_type);
423 }
424 if (ret < 0)
425 goto out_iolock;
426 if (ret2 < 0) {
427 ret = ret2;
428 goto out_iolock;
429 }
430 }
431 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
432 out_iolock:
433 mutex_unlock(&dqopt->dqio_mutex);
434 mutex_unlock(&dquot->dq_lock);
435 return ret;
436 }
437 EXPORT_SYMBOL(dquot_acquire);
438
439 /*
440 * Write dquot to disk
441 */
442 int dquot_commit(struct dquot *dquot)
443 {
444 int ret = 0;
445 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
446
447 mutex_lock(&dqopt->dqio_mutex);
448 spin_lock(&dq_list_lock);
449 if (!clear_dquot_dirty(dquot)) {
450 spin_unlock(&dq_list_lock);
451 goto out_sem;
452 }
453 spin_unlock(&dq_list_lock);
454 /* Inactive dquot can be only if there was error during read/init
455 * => we have better not writing it */
456 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
457 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
458 else
459 ret = -EIO;
460 out_sem:
461 mutex_unlock(&dqopt->dqio_mutex);
462 return ret;
463 }
464 EXPORT_SYMBOL(dquot_commit);
465
466 /*
467 * Release dquot
468 */
469 int dquot_release(struct dquot *dquot)
470 {
471 int ret = 0, ret2 = 0;
472 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
473
474 mutex_lock(&dquot->dq_lock);
475 /* Check whether we are not racing with some other dqget() */
476 if (atomic_read(&dquot->dq_count) > 1)
477 goto out_dqlock;
478 mutex_lock(&dqopt->dqio_mutex);
479 if (dqopt->ops[dquot->dq_type]->release_dqblk) {
480 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
481 /* Write the info */
482 if (info_dirty(&dqopt->info[dquot->dq_type])) {
483 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
484 dquot->dq_sb, dquot->dq_type);
485 }
486 if (ret >= 0)
487 ret = ret2;
488 }
489 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
490 mutex_unlock(&dqopt->dqio_mutex);
491 out_dqlock:
492 mutex_unlock(&dquot->dq_lock);
493 return ret;
494 }
495 EXPORT_SYMBOL(dquot_release);
496
497 void dquot_destroy(struct dquot *dquot)
498 {
499 kmem_cache_free(dquot_cachep, dquot);
500 }
501 EXPORT_SYMBOL(dquot_destroy);
502
503 static inline void do_destroy_dquot(struct dquot *dquot)
504 {
505 dquot->dq_sb->dq_op->destroy_dquot(dquot);
506 }
507
508 /* Invalidate all dquots on the list. Note that this function is called after
509 * quota is disabled and pointers from inodes removed so there cannot be new
510 * quota users. There can still be some users of quotas due to inodes being
511 * just deleted or pruned by prune_icache() (those are not attached to any
512 * list) or parallel quotactl call. We have to wait for such users.
513 */
514 static void invalidate_dquots(struct super_block *sb, int type)
515 {
516 struct dquot *dquot, *tmp;
517
518 restart:
519 spin_lock(&dq_list_lock);
520 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
521 if (dquot->dq_sb != sb)
522 continue;
523 if (dquot->dq_type != type)
524 continue;
525 /* Wait for dquot users */
526 if (atomic_read(&dquot->dq_count)) {
527 DEFINE_WAIT(wait);
528
529 atomic_inc(&dquot->dq_count);
530 prepare_to_wait(&dquot->dq_wait_unused, &wait,
531 TASK_UNINTERRUPTIBLE);
532 spin_unlock(&dq_list_lock);
533 /* Once dqput() wakes us up, we know it's time to free
534 * the dquot.
535 * IMPORTANT: we rely on the fact that there is always
536 * at most one process waiting for dquot to free.
537 * Otherwise dq_count would be > 1 and we would never
538 * wake up.
539 */
540 if (atomic_read(&dquot->dq_count) > 1)
541 schedule();
542 finish_wait(&dquot->dq_wait_unused, &wait);
543 dqput(dquot);
544 /* At this moment dquot() need not exist (it could be
545 * reclaimed by prune_dqcache(). Hence we must
546 * restart. */
547 goto restart;
548 }
549 /*
550 * Quota now has no users and it has been written on last
551 * dqput()
552 */
553 remove_dquot_hash(dquot);
554 remove_free_dquot(dquot);
555 remove_inuse(dquot);
556 do_destroy_dquot(dquot);
557 }
558 spin_unlock(&dq_list_lock);
559 }
560
561 /* Call callback for every active dquot on given filesystem */
562 int dquot_scan_active(struct super_block *sb,
563 int (*fn)(struct dquot *dquot, unsigned long priv),
564 unsigned long priv)
565 {
566 struct dquot *dquot, *old_dquot = NULL;
567 int ret = 0;
568
569 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
570 spin_lock(&dq_list_lock);
571 list_for_each_entry(dquot, &inuse_list, dq_inuse) {
572 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
573 continue;
574 if (dquot->dq_sb != sb)
575 continue;
576 /* Now we have active dquot so we can just increase use count */
577 atomic_inc(&dquot->dq_count);
578 spin_unlock(&dq_list_lock);
579 dqstats_inc(DQST_LOOKUPS);
580 dqput(old_dquot);
581 old_dquot = dquot;
582 ret = fn(dquot, priv);
583 if (ret < 0)
584 goto out;
585 spin_lock(&dq_list_lock);
586 /* We are safe to continue now because our dquot could not
587 * be moved out of the inuse list while we hold the reference */
588 }
589 spin_unlock(&dq_list_lock);
590 out:
591 dqput(old_dquot);
592 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
593 return ret;
594 }
595 EXPORT_SYMBOL(dquot_scan_active);
596
597 int dquot_quota_sync(struct super_block *sb, int type, int wait)
598 {
599 struct list_head *dirty;
600 struct dquot *dquot;
601 struct quota_info *dqopt = sb_dqopt(sb);
602 int cnt;
603
604 mutex_lock(&dqopt->dqonoff_mutex);
605 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
606 if (type != -1 && cnt != type)
607 continue;
608 if (!sb_has_quota_active(sb, cnt))
609 continue;
610 spin_lock(&dq_list_lock);
611 dirty = &dqopt->info[cnt].dqi_dirty_list;
612 while (!list_empty(dirty)) {
613 dquot = list_first_entry(dirty, struct dquot,
614 dq_dirty);
615 /* Dirty and inactive can be only bad dquot... */
616 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
617 clear_dquot_dirty(dquot);
618 continue;
619 }
620 /* Now we have active dquot from which someone is
621 * holding reference so we can safely just increase
622 * use count */
623 atomic_inc(&dquot->dq_count);
624 spin_unlock(&dq_list_lock);
625 dqstats_inc(DQST_LOOKUPS);
626 sb->dq_op->write_dquot(dquot);
627 dqput(dquot);
628 spin_lock(&dq_list_lock);
629 }
630 spin_unlock(&dq_list_lock);
631 }
632
633 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
634 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
635 && info_dirty(&dqopt->info[cnt]))
636 sb->dq_op->write_info(sb, cnt);
637 dqstats_inc(DQST_SYNCS);
638 mutex_unlock(&dqopt->dqonoff_mutex);
639
640 if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
641 return 0;
642
643 /* This is not very clever (and fast) but currently I don't know about
644 * any other simple way of getting quota data to disk and we must get
645 * them there for userspace to be visible... */
646 if (sb->s_op->sync_fs)
647 sb->s_op->sync_fs(sb, 1);
648 sync_blockdev(sb->s_bdev);
649
650 /*
651 * Now when everything is written we can discard the pagecache so
652 * that userspace sees the changes.
653 */
654 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
655 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
656 if (type != -1 && cnt != type)
657 continue;
658 if (!sb_has_quota_active(sb, cnt))
659 continue;
660 mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
661 I_MUTEX_QUOTA);
662 truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
663 mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
664 }
665 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
666
667 return 0;
668 }
669 EXPORT_SYMBOL(dquot_quota_sync);
670
671 /* Free unused dquots from cache */
672 static void prune_dqcache(int count)
673 {
674 struct list_head *head;
675 struct dquot *dquot;
676
677 head = free_dquots.prev;
678 while (head != &free_dquots && count) {
679 dquot = list_entry(head, struct dquot, dq_free);
680 remove_dquot_hash(dquot);
681 remove_free_dquot(dquot);
682 remove_inuse(dquot);
683 do_destroy_dquot(dquot);
684 count--;
685 head = free_dquots.prev;
686 }
687 }
688
689 /*
690 * This is called from kswapd when we think we need some
691 * more memory
692 */
693 static int shrink_dqcache_memory(struct shrinker *shrink,
694 struct shrink_control *sc)
695 {
696 int nr = sc->nr_to_scan;
697
698 if (nr) {
699 spin_lock(&dq_list_lock);
700 prune_dqcache(nr);
701 spin_unlock(&dq_list_lock);
702 }
703 return ((unsigned)
704 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])
705 /100) * sysctl_vfs_cache_pressure;
706 }
707
708 static struct shrinker dqcache_shrinker = {
709 .shrink = shrink_dqcache_memory,
710 .seeks = DEFAULT_SEEKS,
711 };
712
713 /*
714 * Put reference to dquot
715 * NOTE: If you change this function please check whether dqput_blocks() works right...
716 */
717 void dqput(struct dquot *dquot)
718 {
719 int ret;
720
721 if (!dquot)
722 return;
723 #ifdef CONFIG_QUOTA_DEBUG
724 if (!atomic_read(&dquot->dq_count)) {
725 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
726 quotatypes[dquot->dq_type], dquot->dq_id);
727 BUG();
728 }
729 #endif
730 dqstats_inc(DQST_DROPS);
731 we_slept:
732 spin_lock(&dq_list_lock);
733 if (atomic_read(&dquot->dq_count) > 1) {
734 /* We have more than one user... nothing to do */
735 atomic_dec(&dquot->dq_count);
736 /* Releasing dquot during quotaoff phase? */
737 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) &&
738 atomic_read(&dquot->dq_count) == 1)
739 wake_up(&dquot->dq_wait_unused);
740 spin_unlock(&dq_list_lock);
741 return;
742 }
743 /* Need to release dquot? */
744 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
745 spin_unlock(&dq_list_lock);
746 /* Commit dquot before releasing */
747 ret = dquot->dq_sb->dq_op->write_dquot(dquot);
748 if (ret < 0) {
749 quota_error(dquot->dq_sb, "Can't write quota structure"
750 " (error %d). Quota may get out of sync!",
751 ret);
752 /*
753 * We clear dirty bit anyway, so that we avoid
754 * infinite loop here
755 */
756 spin_lock(&dq_list_lock);
757 clear_dquot_dirty(dquot);
758 spin_unlock(&dq_list_lock);
759 }
760 goto we_slept;
761 }
762 /* Clear flag in case dquot was inactive (something bad happened) */
763 clear_dquot_dirty(dquot);
764 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
765 spin_unlock(&dq_list_lock);
766 dquot->dq_sb->dq_op->release_dquot(dquot);
767 goto we_slept;
768 }
769 atomic_dec(&dquot->dq_count);
770 #ifdef CONFIG_QUOTA_DEBUG
771 /* sanity check */
772 BUG_ON(!list_empty(&dquot->dq_free));
773 #endif
774 put_dquot_last(dquot);
775 spin_unlock(&dq_list_lock);
776 }
777 EXPORT_SYMBOL(dqput);
778
779 struct dquot *dquot_alloc(struct super_block *sb, int type)
780 {
781 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
782 }
783 EXPORT_SYMBOL(dquot_alloc);
784
785 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
786 {
787 struct dquot *dquot;
788
789 dquot = sb->dq_op->alloc_dquot(sb, type);
790 if(!dquot)
791 return NULL;
792
793 mutex_init(&dquot->dq_lock);
794 INIT_LIST_HEAD(&dquot->dq_free);
795 INIT_LIST_HEAD(&dquot->dq_inuse);
796 INIT_HLIST_NODE(&dquot->dq_hash);
797 INIT_LIST_HEAD(&dquot->dq_dirty);
798 init_waitqueue_head(&dquot->dq_wait_unused);
799 dquot->dq_sb = sb;
800 dquot->dq_type = type;
801 atomic_set(&dquot->dq_count, 1);
802
803 return dquot;
804 }
805
806 /*
807 * Get reference to dquot
808 *
809 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
810 * destroying our dquot by:
811 * a) checking for quota flags under dq_list_lock and
812 * b) getting a reference to dquot before we release dq_list_lock
813 */
814 struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
815 {
816 unsigned int hashent = hashfn(sb, id, type);
817 struct dquot *dquot = NULL, *empty = NULL;
818
819 if (!sb_has_quota_active(sb, type))
820 return NULL;
821 we_slept:
822 spin_lock(&dq_list_lock);
823 spin_lock(&dq_state_lock);
824 if (!sb_has_quota_active(sb, type)) {
825 spin_unlock(&dq_state_lock);
826 spin_unlock(&dq_list_lock);
827 goto out;
828 }
829 spin_unlock(&dq_state_lock);
830
831 dquot = find_dquot(hashent, sb, id, type);
832 if (!dquot) {
833 if (!empty) {
834 spin_unlock(&dq_list_lock);
835 empty = get_empty_dquot(sb, type);
836 if (!empty)
837 schedule(); /* Try to wait for a moment... */
838 goto we_slept;
839 }
840 dquot = empty;
841 empty = NULL;
842 dquot->dq_id = id;
843 /* all dquots go on the inuse_list */
844 put_inuse(dquot);
845 /* hash it first so it can be found */
846 insert_dquot_hash(dquot);
847 spin_unlock(&dq_list_lock);
848 dqstats_inc(DQST_LOOKUPS);
849 } else {
850 if (!atomic_read(&dquot->dq_count))
851 remove_free_dquot(dquot);
852 atomic_inc(&dquot->dq_count);
853 spin_unlock(&dq_list_lock);
854 dqstats_inc(DQST_CACHE_HITS);
855 dqstats_inc(DQST_LOOKUPS);
856 }
857 /* Wait for dq_lock - after this we know that either dquot_release() is
858 * already finished or it will be canceled due to dq_count > 1 test */
859 wait_on_dquot(dquot);
860 /* Read the dquot / allocate space in quota file */
861 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
862 sb->dq_op->acquire_dquot(dquot) < 0) {
863 dqput(dquot);
864 dquot = NULL;
865 goto out;
866 }
867 #ifdef CONFIG_QUOTA_DEBUG
868 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
869 #endif
870 out:
871 if (empty)
872 do_destroy_dquot(empty);
873
874 return dquot;
875 }
876 EXPORT_SYMBOL(dqget);
877
878 static int dqinit_needed(struct inode *inode, int type)
879 {
880 int cnt;
881
882 if (IS_NOQUOTA(inode))
883 return 0;
884 if (type != -1)
885 return !inode->i_dquot[type];
886 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
887 if (!inode->i_dquot[cnt])
888 return 1;
889 return 0;
890 }
891
892 /* This routine is guarded by dqonoff_mutex mutex */
893 static void add_dquot_ref(struct super_block *sb, int type)
894 {
895 struct inode *inode, *old_inode = NULL;
896 #ifdef CONFIG_QUOTA_DEBUG
897 int reserved = 0;
898 #endif
899
900 spin_lock(&inode_sb_list_lock);
901 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
902 spin_lock(&inode->i_lock);
903 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
904 !atomic_read(&inode->i_writecount) ||
905 !dqinit_needed(inode, type)) {
906 spin_unlock(&inode->i_lock);
907 continue;
908 }
909 #ifdef CONFIG_QUOTA_DEBUG
910 if (unlikely(inode_get_rsv_space(inode) > 0))
911 reserved = 1;
912 #endif
913 __iget(inode);
914 spin_unlock(&inode->i_lock);
915 spin_unlock(&inode_sb_list_lock);
916
917 iput(old_inode);
918 __dquot_initialize(inode, type);
919
920 /*
921 * We hold a reference to 'inode' so it couldn't have been
922 * removed from s_inodes list while we dropped the
923 * inode_sb_list_lock We cannot iput the inode now as we can be
924 * holding the last reference and we cannot iput it under
925 * inode_sb_list_lock. So we keep the reference and iput it
926 * later.
927 */
928 old_inode = inode;
929 spin_lock(&inode_sb_list_lock);
930 }
931 spin_unlock(&inode_sb_list_lock);
932 iput(old_inode);
933
934 #ifdef CONFIG_QUOTA_DEBUG
935 if (reserved) {
936 quota_error(sb, "Writes happened before quota was turned on "
937 "thus quota information is probably inconsistent. "
938 "Please run quotacheck(8)");
939 }
940 #endif
941 }
942
943 /*
944 * Return 0 if dqput() won't block.
945 * (note that 1 doesn't necessarily mean blocking)
946 */
947 static inline int dqput_blocks(struct dquot *dquot)
948 {
949 if (atomic_read(&dquot->dq_count) <= 1)
950 return 1;
951 return 0;
952 }
953
954 /*
955 * Remove references to dquots from inode and add dquot to list for freeing
956 * if we have the last reference to dquot
957 * We can't race with anybody because we hold dqptr_sem for writing...
958 */
959 static int remove_inode_dquot_ref(struct inode *inode, int type,
960 struct list_head *tofree_head)
961 {
962 struct dquot *dquot = inode->i_dquot[type];
963
964 inode->i_dquot[type] = NULL;
965 if (dquot) {
966 if (dqput_blocks(dquot)) {
967 #ifdef CONFIG_QUOTA_DEBUG
968 if (atomic_read(&dquot->dq_count) != 1)
969 quota_error(inode->i_sb, "Adding dquot with "
970 "dq_count %d to dispose list",
971 atomic_read(&dquot->dq_count));
972 #endif
973 spin_lock(&dq_list_lock);
974 /* As dquot must have currently users it can't be on
975 * the free list... */
976 list_add(&dquot->dq_free, tofree_head);
977 spin_unlock(&dq_list_lock);
978 return 1;
979 }
980 else
981 dqput(dquot); /* We have guaranteed we won't block */
982 }
983 return 0;
984 }
985
986 /*
987 * Free list of dquots
988 * Dquots are removed from inodes and no new references can be got so we are
989 * the only ones holding reference
990 */
991 static void put_dquot_list(struct list_head *tofree_head)
992 {
993 struct list_head *act_head;
994 struct dquot *dquot;
995
996 act_head = tofree_head->next;
997 while (act_head != tofree_head) {
998 dquot = list_entry(act_head, struct dquot, dq_free);
999 act_head = act_head->next;
1000 /* Remove dquot from the list so we won't have problems... */
1001 list_del_init(&dquot->dq_free);
1002 dqput(dquot);
1003 }
1004 }
1005
1006 static void remove_dquot_ref(struct super_block *sb, int type,
1007 struct list_head *tofree_head)
1008 {
1009 struct inode *inode;
1010 int reserved = 0;
1011
1012 spin_lock(&inode_sb_list_lock);
1013 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1014 /*
1015 * We have to scan also I_NEW inodes because they can already
1016 * have quota pointer initialized. Luckily, we need to touch
1017 * only quota pointers and these have separate locking
1018 * (dqptr_sem).
1019 */
1020 if (!IS_NOQUOTA(inode)) {
1021 if (unlikely(inode_get_rsv_space(inode) > 0))
1022 reserved = 1;
1023 remove_inode_dquot_ref(inode, type, tofree_head);
1024 }
1025 }
1026 spin_unlock(&inode_sb_list_lock);
1027 #ifdef CONFIG_QUOTA_DEBUG
1028 if (reserved) {
1029 printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1030 " was disabled thus quota information is probably "
1031 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
1032 }
1033 #endif
1034 }
1035
1036 /* Gather all references from inodes and drop them */
1037 static void drop_dquot_ref(struct super_block *sb, int type)
1038 {
1039 LIST_HEAD(tofree_head);
1040
1041 if (sb->dq_op) {
1042 down_write(&sb_dqopt(sb)->dqptr_sem);
1043 remove_dquot_ref(sb, type, &tofree_head);
1044 up_write(&sb_dqopt(sb)->dqptr_sem);
1045 put_dquot_list(&tofree_head);
1046 }
1047 }
1048
1049 static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
1050 {
1051 dquot->dq_dqb.dqb_curinodes += number;
1052 }
1053
1054 static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
1055 {
1056 dquot->dq_dqb.dqb_curspace += number;
1057 }
1058
1059 static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
1060 {
1061 dquot->dq_dqb.dqb_rsvspace += number;
1062 }
1063
1064 /*
1065 * Claim reserved quota space
1066 */
1067 static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
1068 {
1069 if (dquot->dq_dqb.dqb_rsvspace < number) {
1070 WARN_ON_ONCE(1);
1071 number = dquot->dq_dqb.dqb_rsvspace;
1072 }
1073 dquot->dq_dqb.dqb_curspace += number;
1074 dquot->dq_dqb.dqb_rsvspace -= number;
1075 }
1076
1077 static inline
1078 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1079 {
1080 if (dquot->dq_dqb.dqb_rsvspace >= number)
1081 dquot->dq_dqb.dqb_rsvspace -= number;
1082 else {
1083 WARN_ON_ONCE(1);
1084 dquot->dq_dqb.dqb_rsvspace = 0;
1085 }
1086 }
1087
1088 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1089 {
1090 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1091 dquot->dq_dqb.dqb_curinodes >= number)
1092 dquot->dq_dqb.dqb_curinodes -= number;
1093 else
1094 dquot->dq_dqb.dqb_curinodes = 0;
1095 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1096 dquot->dq_dqb.dqb_itime = (time_t) 0;
1097 clear_bit(DQ_INODES_B, &dquot->dq_flags);
1098 }
1099
1100 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1101 {
1102 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1103 dquot->dq_dqb.dqb_curspace >= number)
1104 dquot->dq_dqb.dqb_curspace -= number;
1105 else
1106 dquot->dq_dqb.dqb_curspace = 0;
1107 if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1108 dquot->dq_dqb.dqb_btime = (time_t) 0;
1109 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1110 }
1111
1112 static int warning_issued(struct dquot *dquot, const int warntype)
1113 {
1114 int flag = (warntype == QUOTA_NL_BHARDWARN ||
1115 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1116 ((warntype == QUOTA_NL_IHARDWARN ||
1117 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1118
1119 if (!flag)
1120 return 0;
1121 return test_and_set_bit(flag, &dquot->dq_flags);
1122 }
1123
1124 #ifdef CONFIG_PRINT_QUOTA_WARNING
1125 static int flag_print_warnings = 1;
1126
1127 static int need_print_warning(struct dquot *dquot)
1128 {
1129 if (!flag_print_warnings)
1130 return 0;
1131
1132 switch (dquot->dq_type) {
1133 case USRQUOTA:
1134 return current_fsuid() == dquot->dq_id;
1135 case GRPQUOTA:
1136 return in_group_p(dquot->dq_id);
1137 }
1138 return 0;
1139 }
1140
1141 /* Print warning to user which exceeded quota */
1142 static void print_warning(struct dquot *dquot, const int warntype)
1143 {
1144 char *msg = NULL;
1145 struct tty_struct *tty;
1146
1147 if (warntype == QUOTA_NL_IHARDBELOW ||
1148 warntype == QUOTA_NL_ISOFTBELOW ||
1149 warntype == QUOTA_NL_BHARDBELOW ||
1150 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot))
1151 return;
1152
1153 tty = get_current_tty();
1154 if (!tty)
1155 return;
1156 tty_write_message(tty, dquot->dq_sb->s_id);
1157 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1158 tty_write_message(tty, ": warning, ");
1159 else
1160 tty_write_message(tty, ": write failed, ");
1161 tty_write_message(tty, quotatypes[dquot->dq_type]);
1162 switch (warntype) {
1163 case QUOTA_NL_IHARDWARN:
1164 msg = " file limit reached.\r\n";
1165 break;
1166 case QUOTA_NL_ISOFTLONGWARN:
1167 msg = " file quota exceeded too long.\r\n";
1168 break;
1169 case QUOTA_NL_ISOFTWARN:
1170 msg = " file quota exceeded.\r\n";
1171 break;
1172 case QUOTA_NL_BHARDWARN:
1173 msg = " block limit reached.\r\n";
1174 break;
1175 case QUOTA_NL_BSOFTLONGWARN:
1176 msg = " block quota exceeded too long.\r\n";
1177 break;
1178 case QUOTA_NL_BSOFTWARN:
1179 msg = " block quota exceeded.\r\n";
1180 break;
1181 }
1182 tty_write_message(tty, msg);
1183 tty_kref_put(tty);
1184 }
1185 #endif
1186
1187 /*
1188 * Write warnings to the console and send warning messages over netlink.
1189 *
1190 * Note that this function can sleep.
1191 */
1192 static void flush_warnings(struct dquot *const *dquots, char *warntype)
1193 {
1194 struct dquot *dq;
1195 int i;
1196
1197 for (i = 0; i < MAXQUOTAS; i++) {
1198 dq = dquots[i];
1199 if (dq && warntype[i] != QUOTA_NL_NOWARN &&
1200 !warning_issued(dq, warntype[i])) {
1201 #ifdef CONFIG_PRINT_QUOTA_WARNING
1202 print_warning(dq, warntype[i]);
1203 #endif
1204 quota_send_warning(dq->dq_type, dq->dq_id,
1205 dq->dq_sb->s_dev, warntype[i]);
1206 }
1207 }
1208 }
1209
1210 static int ignore_hardlimit(struct dquot *dquot)
1211 {
1212 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
1213
1214 return capable(CAP_SYS_RESOURCE) &&
1215 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1216 !(info->dqi_flags & V1_DQF_RSQUASH));
1217 }
1218
1219 /* needs dq_data_lock */
1220 static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
1221 {
1222 qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1223
1224 *warntype = QUOTA_NL_NOWARN;
1225 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
1226 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1227 return 0;
1228
1229 if (dquot->dq_dqb.dqb_ihardlimit &&
1230 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1231 !ignore_hardlimit(dquot)) {
1232 *warntype = QUOTA_NL_IHARDWARN;
1233 return -EDQUOT;
1234 }
1235
1236 if (dquot->dq_dqb.dqb_isoftlimit &&
1237 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1238 dquot->dq_dqb.dqb_itime &&
1239 get_seconds() >= dquot->dq_dqb.dqb_itime &&
1240 !ignore_hardlimit(dquot)) {
1241 *warntype = QUOTA_NL_ISOFTLONGWARN;
1242 return -EDQUOT;
1243 }
1244
1245 if (dquot->dq_dqb.dqb_isoftlimit &&
1246 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1247 dquot->dq_dqb.dqb_itime == 0) {
1248 *warntype = QUOTA_NL_ISOFTWARN;
1249 dquot->dq_dqb.dqb_itime = get_seconds() +
1250 sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
1251 }
1252
1253 return 0;
1254 }
1255
1256 /* needs dq_data_lock */
1257 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
1258 {
1259 qsize_t tspace;
1260 struct super_block *sb = dquot->dq_sb;
1261
1262 *warntype = QUOTA_NL_NOWARN;
1263 if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
1264 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1265 return 0;
1266
1267 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1268 + space;
1269
1270 if (dquot->dq_dqb.dqb_bhardlimit &&
1271 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1272 !ignore_hardlimit(dquot)) {
1273 if (!prealloc)
1274 *warntype = QUOTA_NL_BHARDWARN;
1275 return -EDQUOT;
1276 }
1277
1278 if (dquot->dq_dqb.dqb_bsoftlimit &&
1279 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1280 dquot->dq_dqb.dqb_btime &&
1281 get_seconds() >= dquot->dq_dqb.dqb_btime &&
1282 !ignore_hardlimit(dquot)) {
1283 if (!prealloc)
1284 *warntype = QUOTA_NL_BSOFTLONGWARN;
1285 return -EDQUOT;
1286 }
1287
1288 if (dquot->dq_dqb.dqb_bsoftlimit &&
1289 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1290 dquot->dq_dqb.dqb_btime == 0) {
1291 if (!prealloc) {
1292 *warntype = QUOTA_NL_BSOFTWARN;
1293 dquot->dq_dqb.dqb_btime = get_seconds() +
1294 sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
1295 }
1296 else
1297 /*
1298 * We don't allow preallocation to exceed softlimit so exceeding will
1299 * be always printed
1300 */
1301 return -EDQUOT;
1302 }
1303
1304 return 0;
1305 }
1306
1307 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1308 {
1309 qsize_t newinodes;
1310
1311 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1312 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1313 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
1314 return QUOTA_NL_NOWARN;
1315
1316 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1317 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1318 return QUOTA_NL_ISOFTBELOW;
1319 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1320 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1321 return QUOTA_NL_IHARDBELOW;
1322 return QUOTA_NL_NOWARN;
1323 }
1324
1325 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1326 {
1327 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1328 dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1329 return QUOTA_NL_NOWARN;
1330
1331 if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1332 return QUOTA_NL_BSOFTBELOW;
1333 if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
1334 dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
1335 return QUOTA_NL_BHARDBELOW;
1336 return QUOTA_NL_NOWARN;
1337 }
1338
1339 static int dquot_active(const struct inode *inode)
1340 {
1341 struct super_block *sb = inode->i_sb;
1342
1343 if (IS_NOQUOTA(inode))
1344 return 0;
1345 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1346 }
1347
1348 /*
1349 * Initialize quota pointers in inode
1350 *
1351 * We do things in a bit complicated way but by that we avoid calling
1352 * dqget() and thus filesystem callbacks under dqptr_sem.
1353 *
1354 * It is better to call this function outside of any transaction as it
1355 * might need a lot of space in journal for dquot structure allocation.
1356 */
1357 static void __dquot_initialize(struct inode *inode, int type)
1358 {
1359 unsigned int id = 0;
1360 int cnt;
1361 struct dquot *got[MAXQUOTAS];
1362 struct super_block *sb = inode->i_sb;
1363 qsize_t rsv;
1364
1365 /* First test before acquiring mutex - solves deadlocks when we
1366 * re-enter the quota code and are already holding the mutex */
1367 if (!dquot_active(inode))
1368 return;
1369
1370 /* First get references to structures we might need. */
1371 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1372 got[cnt] = NULL;
1373 if (type != -1 && cnt != type)
1374 continue;
1375 switch (cnt) {
1376 case USRQUOTA:
1377 id = inode->i_uid;
1378 break;
1379 case GRPQUOTA:
1380 id = inode->i_gid;
1381 break;
1382 }
1383 got[cnt] = dqget(sb, id, cnt);
1384 }
1385
1386 down_write(&sb_dqopt(sb)->dqptr_sem);
1387 if (IS_NOQUOTA(inode))
1388 goto out_err;
1389 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1390 if (type != -1 && cnt != type)
1391 continue;
1392 /* Avoid races with quotaoff() */
1393 if (!sb_has_quota_active(sb, cnt))
1394 continue;
1395 /* We could race with quotaon or dqget() could have failed */
1396 if (!got[cnt])
1397 continue;
1398 if (!inode->i_dquot[cnt]) {
1399 inode->i_dquot[cnt] = got[cnt];
1400 got[cnt] = NULL;
1401 /*
1402 * Make quota reservation system happy if someone
1403 * did a write before quota was turned on
1404 */
1405 rsv = inode_get_rsv_space(inode);
1406 if (unlikely(rsv))
1407 dquot_resv_space(inode->i_dquot[cnt], rsv);
1408 }
1409 }
1410 out_err:
1411 up_write(&sb_dqopt(sb)->dqptr_sem);
1412 /* Drop unused references */
1413 dqput_all(got);
1414 }
1415
1416 void dquot_initialize(struct inode *inode)
1417 {
1418 __dquot_initialize(inode, -1);
1419 }
1420 EXPORT_SYMBOL(dquot_initialize);
1421
1422 /*
1423 * Release all quotas referenced by inode
1424 */
1425 static void __dquot_drop(struct inode *inode)
1426 {
1427 int cnt;
1428 struct dquot *put[MAXQUOTAS];
1429
1430 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1431 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1432 put[cnt] = inode->i_dquot[cnt];
1433 inode->i_dquot[cnt] = NULL;
1434 }
1435 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1436 dqput_all(put);
1437 }
1438
1439 void dquot_drop(struct inode *inode)
1440 {
1441 int cnt;
1442
1443 if (IS_NOQUOTA(inode))
1444 return;
1445
1446 /*
1447 * Test before calling to rule out calls from proc and such
1448 * where we are not allowed to block. Note that this is
1449 * actually reliable test even without the lock - the caller
1450 * must assure that nobody can come after the DQUOT_DROP and
1451 * add quota pointers back anyway.
1452 */
1453 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1454 if (inode->i_dquot[cnt])
1455 break;
1456 }
1457
1458 if (cnt < MAXQUOTAS)
1459 __dquot_drop(inode);
1460 }
1461 EXPORT_SYMBOL(dquot_drop);
1462
1463 /*
1464 * inode_reserved_space is managed internally by quota, and protected by
1465 * i_lock similar to i_blocks+i_bytes.
1466 */
1467 static qsize_t *inode_reserved_space(struct inode * inode)
1468 {
1469 /* Filesystem must explicitly define it's own method in order to use
1470 * quota reservation interface */
1471 BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1472 return inode->i_sb->dq_op->get_reserved_space(inode);
1473 }
1474
1475 void inode_add_rsv_space(struct inode *inode, qsize_t number)
1476 {
1477 spin_lock(&inode->i_lock);
1478 *inode_reserved_space(inode) += number;
1479 spin_unlock(&inode->i_lock);
1480 }
1481 EXPORT_SYMBOL(inode_add_rsv_space);
1482
1483 void inode_claim_rsv_space(struct inode *inode, qsize_t number)
1484 {
1485 spin_lock(&inode->i_lock);
1486 *inode_reserved_space(inode) -= number;
1487 __inode_add_bytes(inode, number);
1488 spin_unlock(&inode->i_lock);
1489 }
1490 EXPORT_SYMBOL(inode_claim_rsv_space);
1491
1492 void inode_sub_rsv_space(struct inode *inode, qsize_t number)
1493 {
1494 spin_lock(&inode->i_lock);
1495 *inode_reserved_space(inode) -= number;
1496 spin_unlock(&inode->i_lock);
1497 }
1498 EXPORT_SYMBOL(inode_sub_rsv_space);
1499
1500 static qsize_t inode_get_rsv_space(struct inode *inode)
1501 {
1502 qsize_t ret;
1503
1504 if (!inode->i_sb->dq_op->get_reserved_space)
1505 return 0;
1506 spin_lock(&inode->i_lock);
1507 ret = *inode_reserved_space(inode);
1508 spin_unlock(&inode->i_lock);
1509 return ret;
1510 }
1511
1512 static void inode_incr_space(struct inode *inode, qsize_t number,
1513 int reserve)
1514 {
1515 if (reserve)
1516 inode_add_rsv_space(inode, number);
1517 else
1518 inode_add_bytes(inode, number);
1519 }
1520
1521 static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1522 {
1523 if (reserve)
1524 inode_sub_rsv_space(inode, number);
1525 else
1526 inode_sub_bytes(inode, number);
1527 }
1528
1529 /*
1530 * This functions updates i_blocks+i_bytes fields and quota information
1531 * (together with appropriate checks).
1532 *
1533 * NOTE: We absolutely rely on the fact that caller dirties the inode
1534 * (usually helpers in quotaops.h care about this) and holds a handle for
1535 * the current transaction so that dquot write and inode write go into the
1536 * same transaction.
1537 */
1538
1539 /*
1540 * This operation can block, but only after everything is updated
1541 */
1542 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1543 {
1544 int cnt, ret = 0;
1545 char warntype[MAXQUOTAS];
1546 int warn = flags & DQUOT_SPACE_WARN;
1547 int reserve = flags & DQUOT_SPACE_RESERVE;
1548 int nofail = flags & DQUOT_SPACE_NOFAIL;
1549
1550 /*
1551 * First test before acquiring mutex - solves deadlocks when we
1552 * re-enter the quota code and are already holding the mutex
1553 */
1554 if (!dquot_active(inode)) {
1555 inode_incr_space(inode, number, reserve);
1556 goto out;
1557 }
1558
1559 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1560 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1561 warntype[cnt] = QUOTA_NL_NOWARN;
1562
1563 spin_lock(&dq_data_lock);
1564 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1565 if (!inode->i_dquot[cnt])
1566 continue;
1567 ret = check_bdq(inode->i_dquot[cnt], number, !warn,
1568 warntype+cnt);
1569 if (ret && !nofail) {
1570 spin_unlock(&dq_data_lock);
1571 goto out_flush_warn;
1572 }
1573 }
1574 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1575 if (!inode->i_dquot[cnt])
1576 continue;
1577 if (reserve)
1578 dquot_resv_space(inode->i_dquot[cnt], number);
1579 else
1580 dquot_incr_space(inode->i_dquot[cnt], number);
1581 }
1582 inode_incr_space(inode, number, reserve);
1583 spin_unlock(&dq_data_lock);
1584
1585 if (reserve)
1586 goto out_flush_warn;
1587 mark_all_dquot_dirty(inode->i_dquot);
1588 out_flush_warn:
1589 flush_warnings(inode->i_dquot, warntype);
1590 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1591 out:
1592 return ret;
1593 }
1594 EXPORT_SYMBOL(__dquot_alloc_space);
1595
1596 /*
1597 * This operation can block, but only after everything is updated
1598 */
1599 int dquot_alloc_inode(const struct inode *inode)
1600 {
1601 int cnt, ret = 0;
1602 char warntype[MAXQUOTAS];
1603
1604 /* First test before acquiring mutex - solves deadlocks when we
1605 * re-enter the quota code and are already holding the mutex */
1606 if (!dquot_active(inode))
1607 return 0;
1608 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1609 warntype[cnt] = QUOTA_NL_NOWARN;
1610 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1611 spin_lock(&dq_data_lock);
1612 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1613 if (!inode->i_dquot[cnt])
1614 continue;
1615 ret = check_idq(inode->i_dquot[cnt], 1, warntype + cnt);
1616 if (ret)
1617 goto warn_put_all;
1618 }
1619
1620 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1621 if (!inode->i_dquot[cnt])
1622 continue;
1623 dquot_incr_inodes(inode->i_dquot[cnt], 1);
1624 }
1625
1626 warn_put_all:
1627 spin_unlock(&dq_data_lock);
1628 if (ret == 0)
1629 mark_all_dquot_dirty(inode->i_dquot);
1630 flush_warnings(inode->i_dquot, warntype);
1631 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1632 return ret;
1633 }
1634 EXPORT_SYMBOL(dquot_alloc_inode);
1635
1636 /*
1637 * Convert in-memory reserved quotas to real consumed quotas
1638 */
1639 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1640 {
1641 int cnt;
1642
1643 if (!dquot_active(inode)) {
1644 inode_claim_rsv_space(inode, number);
1645 return 0;
1646 }
1647
1648 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1649 spin_lock(&dq_data_lock);
1650 /* Claim reserved quotas to allocated quotas */
1651 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1652 if (inode->i_dquot[cnt])
1653 dquot_claim_reserved_space(inode->i_dquot[cnt],
1654 number);
1655 }
1656 /* Update inode bytes */
1657 inode_claim_rsv_space(inode, number);
1658 spin_unlock(&dq_data_lock);
1659 mark_all_dquot_dirty(inode->i_dquot);
1660 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1661 return 0;
1662 }
1663 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1664
1665 /*
1666 * This operation can block, but only after everything is updated
1667 */
1668 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1669 {
1670 unsigned int cnt;
1671 char warntype[MAXQUOTAS];
1672 int reserve = flags & DQUOT_SPACE_RESERVE;
1673
1674 /* First test before acquiring mutex - solves deadlocks when we
1675 * re-enter the quota code and are already holding the mutex */
1676 if (!dquot_active(inode)) {
1677 inode_decr_space(inode, number, reserve);
1678 return;
1679 }
1680
1681 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1682 spin_lock(&dq_data_lock);
1683 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1684 if (!inode->i_dquot[cnt])
1685 continue;
1686 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
1687 if (reserve)
1688 dquot_free_reserved_space(inode->i_dquot[cnt], number);
1689 else
1690 dquot_decr_space(inode->i_dquot[cnt], number);
1691 }
1692 inode_decr_space(inode, number, reserve);
1693 spin_unlock(&dq_data_lock);
1694
1695 if (reserve)
1696 goto out_unlock;
1697 mark_all_dquot_dirty(inode->i_dquot);
1698 out_unlock:
1699 flush_warnings(inode->i_dquot, warntype);
1700 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1701 }
1702 EXPORT_SYMBOL(__dquot_free_space);
1703
1704 /*
1705 * This operation can block, but only after everything is updated
1706 */
1707 void dquot_free_inode(const struct inode *inode)
1708 {
1709 unsigned int cnt;
1710 char warntype[MAXQUOTAS];
1711
1712 /* First test before acquiring mutex - solves deadlocks when we
1713 * re-enter the quota code and are already holding the mutex */
1714 if (!dquot_active(inode))
1715 return;
1716
1717 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1718 spin_lock(&dq_data_lock);
1719 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1720 if (!inode->i_dquot[cnt])
1721 continue;
1722 warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1);
1723 dquot_decr_inodes(inode->i_dquot[cnt], 1);
1724 }
1725 spin_unlock(&dq_data_lock);
1726 mark_all_dquot_dirty(inode->i_dquot);
1727 flush_warnings(inode->i_dquot, warntype);
1728 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1729 }
1730 EXPORT_SYMBOL(dquot_free_inode);
1731
1732 /*
1733 * Transfer the number of inode and blocks from one diskquota to an other.
1734 * On success, dquot references in transfer_to are consumed and references
1735 * to original dquots that need to be released are placed there. On failure,
1736 * references are kept untouched.
1737 *
1738 * This operation can block, but only after everything is updated
1739 * A transaction must be started when entering this function.
1740 *
1741 */
1742 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1743 {
1744 qsize_t space, cur_space;
1745 qsize_t rsv_space = 0;
1746 struct dquot *transfer_from[MAXQUOTAS] = {};
1747 int cnt, ret = 0;
1748 char is_valid[MAXQUOTAS] = {};
1749 char warntype_to[MAXQUOTAS];
1750 char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
1751
1752 /* First test before acquiring mutex - solves deadlocks when we
1753 * re-enter the quota code and are already holding the mutex */
1754 if (IS_NOQUOTA(inode))
1755 return 0;
1756 /* Initialize the arrays */
1757 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1758 warntype_to[cnt] = QUOTA_NL_NOWARN;
1759 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1760 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1761 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1762 return 0;
1763 }
1764 spin_lock(&dq_data_lock);
1765 cur_space = inode_get_bytes(inode);
1766 rsv_space = inode_get_rsv_space(inode);
1767 space = cur_space + rsv_space;
1768 /* Build the transfer_from list and check the limits */
1769 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1770 /*
1771 * Skip changes for same uid or gid or for turned off quota-type.
1772 */
1773 if (!transfer_to[cnt])
1774 continue;
1775 /* Avoid races with quotaoff() */
1776 if (!sb_has_quota_active(inode->i_sb, cnt))
1777 continue;
1778 is_valid[cnt] = 1;
1779 transfer_from[cnt] = inode->i_dquot[cnt];
1780 ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt);
1781 if (ret)
1782 goto over_quota;
1783 ret = check_bdq(transfer_to[cnt], space, 0, warntype_to + cnt);
1784 if (ret)
1785 goto over_quota;
1786 }
1787
1788 /*
1789 * Finally perform the needed transfer from transfer_from to transfer_to
1790 */
1791 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1792 if (!is_valid[cnt])
1793 continue;
1794 /* Due to IO error we might not have transfer_from[] structure */
1795 if (transfer_from[cnt]) {
1796 warntype_from_inodes[cnt] =
1797 info_idq_free(transfer_from[cnt], 1);
1798 warntype_from_space[cnt] =
1799 info_bdq_free(transfer_from[cnt], space);
1800 dquot_decr_inodes(transfer_from[cnt], 1);
1801 dquot_decr_space(transfer_from[cnt], cur_space);
1802 dquot_free_reserved_space(transfer_from[cnt],
1803 rsv_space);
1804 }
1805
1806 dquot_incr_inodes(transfer_to[cnt], 1);
1807 dquot_incr_space(transfer_to[cnt], cur_space);
1808 dquot_resv_space(transfer_to[cnt], rsv_space);
1809
1810 inode->i_dquot[cnt] = transfer_to[cnt];
1811 }
1812 spin_unlock(&dq_data_lock);
1813 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1814
1815 mark_all_dquot_dirty(transfer_from);
1816 mark_all_dquot_dirty(transfer_to);
1817 flush_warnings(transfer_to, warntype_to);
1818 flush_warnings(transfer_from, warntype_from_inodes);
1819 flush_warnings(transfer_from, warntype_from_space);
1820 /* Pass back references to put */
1821 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1822 if (is_valid[cnt])
1823 transfer_to[cnt] = transfer_from[cnt];
1824 return 0;
1825 over_quota:
1826 spin_unlock(&dq_data_lock);
1827 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1828 flush_warnings(transfer_to, warntype_to);
1829 return ret;
1830 }
1831 EXPORT_SYMBOL(__dquot_transfer);
1832
1833 /* Wrapper for transferring ownership of an inode for uid/gid only
1834 * Called from FSXXX_setattr()
1835 */
1836 int dquot_transfer(struct inode *inode, struct iattr *iattr)
1837 {
1838 struct dquot *transfer_to[MAXQUOTAS] = {};
1839 struct super_block *sb = inode->i_sb;
1840 int ret;
1841
1842 if (!dquot_active(inode))
1843 return 0;
1844
1845 if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid)
1846 transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA);
1847 if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)
1848 transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_gid, GRPQUOTA);
1849
1850 ret = __dquot_transfer(inode, transfer_to);
1851 dqput_all(transfer_to);
1852 return ret;
1853 }
1854 EXPORT_SYMBOL(dquot_transfer);
1855
1856 /*
1857 * Write info of quota file to disk
1858 */
1859 int dquot_commit_info(struct super_block *sb, int type)
1860 {
1861 int ret;
1862 struct quota_info *dqopt = sb_dqopt(sb);
1863
1864 mutex_lock(&dqopt->dqio_mutex);
1865 ret = dqopt->ops[type]->write_file_info(sb, type);
1866 mutex_unlock(&dqopt->dqio_mutex);
1867 return ret;
1868 }
1869 EXPORT_SYMBOL(dquot_commit_info);
1870
1871 /*
1872 * Definitions of diskquota operations.
1873 */
1874 const struct dquot_operations dquot_operations = {
1875 .write_dquot = dquot_commit,
1876 .acquire_dquot = dquot_acquire,
1877 .release_dquot = dquot_release,
1878 .mark_dirty = dquot_mark_dquot_dirty,
1879 .write_info = dquot_commit_info,
1880 .alloc_dquot = dquot_alloc,
1881 .destroy_dquot = dquot_destroy,
1882 };
1883 EXPORT_SYMBOL(dquot_operations);
1884
1885 /*
1886 * Generic helper for ->open on filesystems supporting disk quotas.
1887 */
1888 int dquot_file_open(struct inode *inode, struct file *file)
1889 {
1890 int error;
1891
1892 error = generic_file_open(inode, file);
1893 if (!error && (file->f_mode & FMODE_WRITE))
1894 dquot_initialize(inode);
1895 return error;
1896 }
1897 EXPORT_SYMBOL(dquot_file_open);
1898
1899 /*
1900 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
1901 */
1902 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
1903 {
1904 int cnt, ret = 0;
1905 struct quota_info *dqopt = sb_dqopt(sb);
1906 struct inode *toputinode[MAXQUOTAS];
1907
1908 /* Cannot turn off usage accounting without turning off limits, or
1909 * suspend quotas and simultaneously turn quotas off. */
1910 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
1911 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
1912 DQUOT_USAGE_ENABLED)))
1913 return -EINVAL;
1914
1915 /* We need to serialize quota_off() for device */
1916 mutex_lock(&dqopt->dqonoff_mutex);
1917
1918 /*
1919 * Skip everything if there's nothing to do. We have to do this because
1920 * sometimes we are called when fill_super() failed and calling
1921 * sync_fs() in such cases does no good.
1922 */
1923 if (!sb_any_quota_loaded(sb)) {
1924 mutex_unlock(&dqopt->dqonoff_mutex);
1925 return 0;
1926 }
1927 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1928 toputinode[cnt] = NULL;
1929 if (type != -1 && cnt != type)
1930 continue;
1931 if (!sb_has_quota_loaded(sb, cnt))
1932 continue;
1933
1934 if (flags & DQUOT_SUSPENDED) {
1935 spin_lock(&dq_state_lock);
1936 dqopt->flags |=
1937 dquot_state_flag(DQUOT_SUSPENDED, cnt);
1938 spin_unlock(&dq_state_lock);
1939 } else {
1940 spin_lock(&dq_state_lock);
1941 dqopt->flags &= ~dquot_state_flag(flags, cnt);
1942 /* Turning off suspended quotas? */
1943 if (!sb_has_quota_loaded(sb, cnt) &&
1944 sb_has_quota_suspended(sb, cnt)) {
1945 dqopt->flags &= ~dquot_state_flag(
1946 DQUOT_SUSPENDED, cnt);
1947 spin_unlock(&dq_state_lock);
1948 iput(dqopt->files[cnt]);
1949 dqopt->files[cnt] = NULL;
1950 continue;
1951 }
1952 spin_unlock(&dq_state_lock);
1953 }
1954
1955 /* We still have to keep quota loaded? */
1956 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
1957 continue;
1958
1959 /* Note: these are blocking operations */
1960 drop_dquot_ref(sb, cnt);
1961 invalidate_dquots(sb, cnt);
1962 /*
1963 * Now all dquots should be invalidated, all writes done so we
1964 * should be only users of the info. No locks needed.
1965 */
1966 if (info_dirty(&dqopt->info[cnt]))
1967 sb->dq_op->write_info(sb, cnt);
1968 if (dqopt->ops[cnt]->free_file_info)
1969 dqopt->ops[cnt]->free_file_info(sb, cnt);
1970 put_quota_format(dqopt->info[cnt].dqi_format);
1971
1972 toputinode[cnt] = dqopt->files[cnt];
1973 if (!sb_has_quota_loaded(sb, cnt))
1974 dqopt->files[cnt] = NULL;
1975 dqopt->info[cnt].dqi_flags = 0;
1976 dqopt->info[cnt].dqi_igrace = 0;
1977 dqopt->info[cnt].dqi_bgrace = 0;
1978 dqopt->ops[cnt] = NULL;
1979 }
1980 mutex_unlock(&dqopt->dqonoff_mutex);
1981
1982 /* Skip syncing and setting flags if quota files are hidden */
1983 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
1984 goto put_inodes;
1985
1986 /* Sync the superblock so that buffers with quota data are written to
1987 * disk (and so userspace sees correct data afterwards). */
1988 if (sb->s_op->sync_fs)
1989 sb->s_op->sync_fs(sb, 1);
1990 sync_blockdev(sb->s_bdev);
1991 /* Now the quota files are just ordinary files and we can set the
1992 * inode flags back. Moreover we discard the pagecache so that
1993 * userspace sees the writes we did bypassing the pagecache. We
1994 * must also discard the blockdev buffers so that we see the
1995 * changes done by userspace on the next quotaon() */
1996 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1997 if (toputinode[cnt]) {
1998 mutex_lock(&dqopt->dqonoff_mutex);
1999 /* If quota was reenabled in the meantime, we have
2000 * nothing to do */
2001 if (!sb_has_quota_loaded(sb, cnt)) {
2002 mutex_lock_nested(&toputinode[cnt]->i_mutex,
2003 I_MUTEX_QUOTA);
2004 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
2005 S_NOATIME | S_NOQUOTA);
2006 truncate_inode_pages(&toputinode[cnt]->i_data,
2007 0);
2008 mutex_unlock(&toputinode[cnt]->i_mutex);
2009 mark_inode_dirty_sync(toputinode[cnt]);
2010 }
2011 mutex_unlock(&dqopt->dqonoff_mutex);
2012 }
2013 if (sb->s_bdev)
2014 invalidate_bdev(sb->s_bdev);
2015 put_inodes:
2016 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2017 if (toputinode[cnt]) {
2018 /* On remount RO, we keep the inode pointer so that we
2019 * can reenable quota on the subsequent remount RW. We
2020 * have to check 'flags' variable and not use sb_has_
2021 * function because another quotaon / quotaoff could
2022 * change global state before we got here. We refuse
2023 * to suspend quotas when there is pending delete on
2024 * the quota file... */
2025 if (!(flags & DQUOT_SUSPENDED))
2026 iput(toputinode[cnt]);
2027 else if (!toputinode[cnt]->i_nlink)
2028 ret = -EBUSY;
2029 }
2030 return ret;
2031 }
2032 EXPORT_SYMBOL(dquot_disable);
2033
2034 int dquot_quota_off(struct super_block *sb, int type)
2035 {
2036 return dquot_disable(sb, type,
2037 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2038 }
2039 EXPORT_SYMBOL(dquot_quota_off);
2040
2041 /*
2042 * Turn quotas on on a device
2043 */
2044
2045 /*
2046 * Helper function to turn quotas on when we already have the inode of
2047 * quota file and no quota information is loaded.
2048 */
2049 static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2050 unsigned int flags)
2051 {
2052 struct quota_format_type *fmt = find_quota_format(format_id);
2053 struct super_block *sb = inode->i_sb;
2054 struct quota_info *dqopt = sb_dqopt(sb);
2055 int error;
2056 int oldflags = -1;
2057
2058 if (!fmt)
2059 return -ESRCH;
2060 if (!S_ISREG(inode->i_mode)) {
2061 error = -EACCES;
2062 goto out_fmt;
2063 }
2064 if (IS_RDONLY(inode)) {
2065 error = -EROFS;
2066 goto out_fmt;
2067 }
2068 if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
2069 error = -EINVAL;
2070 goto out_fmt;
2071 }
2072 /* Usage always has to be set... */
2073 if (!(flags & DQUOT_USAGE_ENABLED)) {
2074 error = -EINVAL;
2075 goto out_fmt;
2076 }
2077
2078 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2079 /* As we bypass the pagecache we must now flush all the
2080 * dirty data and invalidate caches so that kernel sees
2081 * changes from userspace. It is not enough to just flush
2082 * the quota file since if blocksize < pagesize, invalidation
2083 * of the cache could fail because of other unrelated dirty
2084 * data */
2085 sync_filesystem(sb);
2086 invalidate_bdev(sb->s_bdev);
2087 }
2088 mutex_lock(&dqopt->dqonoff_mutex);
2089 if (sb_has_quota_loaded(sb, type)) {
2090 error = -EBUSY;
2091 goto out_lock;
2092 }
2093
2094 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2095 /* We don't want quota and atime on quota files (deadlocks
2096 * possible) Also nobody should write to the file - we use
2097 * special IO operations which ignore the immutable bit. */
2098 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2099 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
2100 S_NOQUOTA);
2101 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
2102 mutex_unlock(&inode->i_mutex);
2103 /*
2104 * When S_NOQUOTA is set, remove dquot references as no more
2105 * references can be added
2106 */
2107 __dquot_drop(inode);
2108 }
2109
2110 error = -EIO;
2111 dqopt->files[type] = igrab(inode);
2112 if (!dqopt->files[type])
2113 goto out_lock;
2114 error = -EINVAL;
2115 if (!fmt->qf_ops->check_quota_file(sb, type))
2116 goto out_file_init;
2117
2118 dqopt->ops[type] = fmt->qf_ops;
2119 dqopt->info[type].dqi_format = fmt;
2120 dqopt->info[type].dqi_fmt_id = format_id;
2121 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2122 mutex_lock(&dqopt->dqio_mutex);
2123 error = dqopt->ops[type]->read_file_info(sb, type);
2124 if (error < 0) {
2125 mutex_unlock(&dqopt->dqio_mutex);
2126 goto out_file_init;
2127 }
2128 mutex_unlock(&dqopt->dqio_mutex);
2129 spin_lock(&dq_state_lock);
2130 dqopt->flags |= dquot_state_flag(flags, type);
2131 spin_unlock(&dq_state_lock);
2132
2133 add_dquot_ref(sb, type);
2134 mutex_unlock(&dqopt->dqonoff_mutex);
2135
2136 return 0;
2137
2138 out_file_init:
2139 dqopt->files[type] = NULL;
2140 iput(inode);
2141 out_lock:
2142 if (oldflags != -1) {
2143 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2144 /* Set the flags back (in the case of accidental quotaon()
2145 * on a wrong file we don't want to mess up the flags) */
2146 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
2147 inode->i_flags |= oldflags;
2148 mutex_unlock(&inode->i_mutex);
2149 }
2150 mutex_unlock(&dqopt->dqonoff_mutex);
2151 out_fmt:
2152 put_quota_format(fmt);
2153
2154 return error;
2155 }
2156
2157 /* Reenable quotas on remount RW */
2158 int dquot_resume(struct super_block *sb, int type)
2159 {
2160 struct quota_info *dqopt = sb_dqopt(sb);
2161 struct inode *inode;
2162 int ret = 0, cnt;
2163 unsigned int flags;
2164
2165 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2166 if (type != -1 && cnt != type)
2167 continue;
2168
2169 mutex_lock(&dqopt->dqonoff_mutex);
2170 if (!sb_has_quota_suspended(sb, cnt)) {
2171 mutex_unlock(&dqopt->dqonoff_mutex);
2172 continue;
2173 }
2174 inode = dqopt->files[cnt];
2175 dqopt->files[cnt] = NULL;
2176 spin_lock(&dq_state_lock);
2177 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2178 DQUOT_LIMITS_ENABLED,
2179 cnt);
2180 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2181 spin_unlock(&dq_state_lock);
2182 mutex_unlock(&dqopt->dqonoff_mutex);
2183
2184 flags = dquot_generic_flag(flags, cnt);
2185 ret = vfs_load_quota_inode(inode, cnt,
2186 dqopt->info[cnt].dqi_fmt_id, flags);
2187 iput(inode);
2188 }
2189
2190 return ret;
2191 }
2192 EXPORT_SYMBOL(dquot_resume);
2193
2194 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2195 struct path *path)
2196 {
2197 int error = security_quota_on(path->dentry);
2198 if (error)
2199 return error;
2200 /* Quota file not on the same filesystem? */
2201 if (path->mnt->mnt_sb != sb)
2202 error = -EXDEV;
2203 else
2204 error = vfs_load_quota_inode(path->dentry->d_inode, type,
2205 format_id, DQUOT_USAGE_ENABLED |
2206 DQUOT_LIMITS_ENABLED);
2207 return error;
2208 }
2209 EXPORT_SYMBOL(dquot_quota_on);
2210
2211 /*
2212 * More powerful function for turning on quotas allowing setting
2213 * of individual quota flags
2214 */
2215 int dquot_enable(struct inode *inode, int type, int format_id,
2216 unsigned int flags)
2217 {
2218 int ret = 0;
2219 struct super_block *sb = inode->i_sb;
2220 struct quota_info *dqopt = sb_dqopt(sb);
2221
2222 /* Just unsuspend quotas? */
2223 BUG_ON(flags & DQUOT_SUSPENDED);
2224
2225 if (!flags)
2226 return 0;
2227 /* Just updating flags needed? */
2228 if (sb_has_quota_loaded(sb, type)) {
2229 mutex_lock(&dqopt->dqonoff_mutex);
2230 /* Now do a reliable test... */
2231 if (!sb_has_quota_loaded(sb, type)) {
2232 mutex_unlock(&dqopt->dqonoff_mutex);
2233 goto load_quota;
2234 }
2235 if (flags & DQUOT_USAGE_ENABLED &&
2236 sb_has_quota_usage_enabled(sb, type)) {
2237 ret = -EBUSY;
2238 goto out_lock;
2239 }
2240 if (flags & DQUOT_LIMITS_ENABLED &&
2241 sb_has_quota_limits_enabled(sb, type)) {
2242 ret = -EBUSY;
2243 goto out_lock;
2244 }
2245 spin_lock(&dq_state_lock);
2246 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
2247 spin_unlock(&dq_state_lock);
2248 out_lock:
2249 mutex_unlock(&dqopt->dqonoff_mutex);
2250 return ret;
2251 }
2252
2253 load_quota:
2254 return vfs_load_quota_inode(inode, type, format_id, flags);
2255 }
2256 EXPORT_SYMBOL(dquot_enable);
2257
2258 /*
2259 * This function is used when filesystem needs to initialize quotas
2260 * during mount time.
2261 */
2262 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2263 int format_id, int type)
2264 {
2265 struct dentry *dentry;
2266 int error;
2267
2268 mutex_lock(&sb->s_root->d_inode->i_mutex);
2269 dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
2270 mutex_unlock(&sb->s_root->d_inode->i_mutex);
2271 if (IS_ERR(dentry))
2272 return PTR_ERR(dentry);
2273
2274 if (!dentry->d_inode) {
2275 error = -ENOENT;
2276 goto out;
2277 }
2278
2279 error = security_quota_on(dentry);
2280 if (!error)
2281 error = vfs_load_quota_inode(dentry->d_inode, type, format_id,
2282 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2283
2284 out:
2285 dput(dentry);
2286 return error;
2287 }
2288 EXPORT_SYMBOL(dquot_quota_on_mount);
2289
2290 static inline qsize_t qbtos(qsize_t blocks)
2291 {
2292 return blocks << QIF_DQBLKSIZE_BITS;
2293 }
2294
2295 static inline qsize_t stoqb(qsize_t space)
2296 {
2297 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
2298 }
2299
2300 /* Generic routine for getting common part of quota structure */
2301 static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2302 {
2303 struct mem_dqblk *dm = &dquot->dq_dqb;
2304
2305 memset(di, 0, sizeof(*di));
2306 di->d_version = FS_DQUOT_VERSION;
2307 di->d_flags = dquot->dq_type == USRQUOTA ?
2308 FS_USER_QUOTA : FS_GROUP_QUOTA;
2309 di->d_id = dquot->dq_id;
2310
2311 spin_lock(&dq_data_lock);
2312 di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
2313 di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
2314 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2315 di->d_ino_softlimit = dm->dqb_isoftlimit;
2316 di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
2317 di->d_icount = dm->dqb_curinodes;
2318 di->d_btimer = dm->dqb_btime;
2319 di->d_itimer = dm->dqb_itime;
2320 spin_unlock(&dq_data_lock);
2321 }
2322
2323 int dquot_get_dqblk(struct super_block *sb, int type, qid_t id,
2324 struct fs_disk_quota *di)
2325 {
2326 struct dquot *dquot;
2327
2328 dquot = dqget(sb, id, type);
2329 if (!dquot)
2330 return -ESRCH;
2331 do_get_dqblk(dquot, di);
2332 dqput(dquot);
2333
2334 return 0;
2335 }
2336 EXPORT_SYMBOL(dquot_get_dqblk);
2337
2338 #define VFS_FS_DQ_MASK \
2339 (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
2340 FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
2341 FS_DQ_BTIMER | FS_DQ_ITIMER)
2342
2343 /* Generic routine for setting common part of quota structure */
2344 static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2345 {
2346 struct mem_dqblk *dm = &dquot->dq_dqb;
2347 int check_blim = 0, check_ilim = 0;
2348 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
2349
2350 if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
2351 return -EINVAL;
2352
2353 if (((di->d_fieldmask & FS_DQ_BSOFT) &&
2354 (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
2355 ((di->d_fieldmask & FS_DQ_BHARD) &&
2356 (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
2357 ((di->d_fieldmask & FS_DQ_ISOFT) &&
2358 (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
2359 ((di->d_fieldmask & FS_DQ_IHARD) &&
2360 (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
2361 return -ERANGE;
2362
2363 spin_lock(&dq_data_lock);
2364 if (di->d_fieldmask & FS_DQ_BCOUNT) {
2365 dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
2366 check_blim = 1;
2367 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2368 }
2369
2370 if (di->d_fieldmask & FS_DQ_BSOFT)
2371 dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
2372 if (di->d_fieldmask & FS_DQ_BHARD)
2373 dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
2374 if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
2375 check_blim = 1;
2376 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2377 }
2378
2379 if (di->d_fieldmask & FS_DQ_ICOUNT) {
2380 dm->dqb_curinodes = di->d_icount;
2381 check_ilim = 1;
2382 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2383 }
2384
2385 if (di->d_fieldmask & FS_DQ_ISOFT)
2386 dm->dqb_isoftlimit = di->d_ino_softlimit;
2387 if (di->d_fieldmask & FS_DQ_IHARD)
2388 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2389 if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
2390 check_ilim = 1;
2391 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2392 }
2393
2394 if (di->d_fieldmask & FS_DQ_BTIMER) {
2395 dm->dqb_btime = di->d_btimer;
2396 check_blim = 1;
2397 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2398 }
2399
2400 if (di->d_fieldmask & FS_DQ_ITIMER) {
2401 dm->dqb_itime = di->d_itimer;
2402 check_ilim = 1;
2403 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2404 }
2405
2406 if (check_blim) {
2407 if (!dm->dqb_bsoftlimit ||
2408 dm->dqb_curspace < dm->dqb_bsoftlimit) {
2409 dm->dqb_btime = 0;
2410 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2411 } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
2412 /* Set grace only if user hasn't provided his own... */
2413 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
2414 }
2415 if (check_ilim) {
2416 if (!dm->dqb_isoftlimit ||
2417 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2418 dm->dqb_itime = 0;
2419 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2420 } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
2421 /* Set grace only if user hasn't provided his own... */
2422 dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
2423 }
2424 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2425 dm->dqb_isoftlimit)
2426 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2427 else
2428 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2429 spin_unlock(&dq_data_lock);
2430 mark_dquot_dirty(dquot);
2431
2432 return 0;
2433 }
2434
2435 int dquot_set_dqblk(struct super_block *sb, int type, qid_t id,
2436 struct fs_disk_quota *di)
2437 {
2438 struct dquot *dquot;
2439 int rc;
2440
2441 dquot = dqget(sb, id, type);
2442 if (!dquot) {
2443 rc = -ESRCH;
2444 goto out;
2445 }
2446 rc = do_set_dqblk(dquot, di);
2447 dqput(dquot);
2448 out:
2449 return rc;
2450 }
2451 EXPORT_SYMBOL(dquot_set_dqblk);
2452
2453 /* Generic routine for getting common part of quota file information */
2454 int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2455 {
2456 struct mem_dqinfo *mi;
2457
2458 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2459 if (!sb_has_quota_active(sb, type)) {
2460 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2461 return -ESRCH;
2462 }
2463 mi = sb_dqopt(sb)->info + type;
2464 spin_lock(&dq_data_lock);
2465 ii->dqi_bgrace = mi->dqi_bgrace;
2466 ii->dqi_igrace = mi->dqi_igrace;
2467 ii->dqi_flags = mi->dqi_flags & DQF_MASK;
2468 ii->dqi_valid = IIF_ALL;
2469 spin_unlock(&dq_data_lock);
2470 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2471 return 0;
2472 }
2473 EXPORT_SYMBOL(dquot_get_dqinfo);
2474
2475 /* Generic routine for setting common part of quota file information */
2476 int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2477 {
2478 struct mem_dqinfo *mi;
2479 int err = 0;
2480
2481 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2482 if (!sb_has_quota_active(sb, type)) {
2483 err = -ESRCH;
2484 goto out;
2485 }
2486 mi = sb_dqopt(sb)->info + type;
2487 spin_lock(&dq_data_lock);
2488 if (ii->dqi_valid & IIF_BGRACE)
2489 mi->dqi_bgrace = ii->dqi_bgrace;
2490 if (ii->dqi_valid & IIF_IGRACE)
2491 mi->dqi_igrace = ii->dqi_igrace;
2492 if (ii->dqi_valid & IIF_FLAGS)
2493 mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
2494 (ii->dqi_flags & DQF_MASK);
2495 spin_unlock(&dq_data_lock);
2496 mark_info_dirty(sb, type);
2497 /* Force write to disk */
2498 sb->dq_op->write_info(sb, type);
2499 out:
2500 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2501 return err;
2502 }
2503 EXPORT_SYMBOL(dquot_set_dqinfo);
2504
2505 const struct quotactl_ops dquot_quotactl_ops = {
2506 .quota_on = dquot_quota_on,
2507 .quota_off = dquot_quota_off,
2508 .quota_sync = dquot_quota_sync,
2509 .get_info = dquot_get_dqinfo,
2510 .set_info = dquot_set_dqinfo,
2511 .get_dqblk = dquot_get_dqblk,
2512 .set_dqblk = dquot_set_dqblk
2513 };
2514 EXPORT_SYMBOL(dquot_quotactl_ops);
2515
2516 static int do_proc_dqstats(struct ctl_table *table, int write,
2517 void __user *buffer, size_t *lenp, loff_t *ppos)
2518 {
2519 unsigned int type = (int *)table->data - dqstats.stat;
2520
2521 /* Update global table */
2522 dqstats.stat[type] =
2523 percpu_counter_sum_positive(&dqstats.counter[type]);
2524 return proc_dointvec(table, write, buffer, lenp, ppos);
2525 }
2526
2527 static ctl_table fs_dqstats_table[] = {
2528 {
2529 .procname = "lookups",
2530 .data = &dqstats.stat[DQST_LOOKUPS],
2531 .maxlen = sizeof(int),
2532 .mode = 0444,
2533 .proc_handler = do_proc_dqstats,
2534 },
2535 {
2536 .procname = "drops",
2537 .data = &dqstats.stat[DQST_DROPS],
2538 .maxlen = sizeof(int),
2539 .mode = 0444,
2540 .proc_handler = do_proc_dqstats,
2541 },
2542 {
2543 .procname = "reads",
2544 .data = &dqstats.stat[DQST_READS],
2545 .maxlen = sizeof(int),
2546 .mode = 0444,
2547 .proc_handler = do_proc_dqstats,
2548 },
2549 {
2550 .procname = "writes",
2551 .data = &dqstats.stat[DQST_WRITES],
2552 .maxlen = sizeof(int),
2553 .mode = 0444,
2554 .proc_handler = do_proc_dqstats,
2555 },
2556 {
2557 .procname = "cache_hits",
2558 .data = &dqstats.stat[DQST_CACHE_HITS],
2559 .maxlen = sizeof(int),
2560 .mode = 0444,
2561 .proc_handler = do_proc_dqstats,
2562 },
2563 {
2564 .procname = "allocated_dquots",
2565 .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
2566 .maxlen = sizeof(int),
2567 .mode = 0444,
2568 .proc_handler = do_proc_dqstats,
2569 },
2570 {
2571 .procname = "free_dquots",
2572 .data = &dqstats.stat[DQST_FREE_DQUOTS],
2573 .maxlen = sizeof(int),
2574 .mode = 0444,
2575 .proc_handler = do_proc_dqstats,
2576 },
2577 {
2578 .procname = "syncs",
2579 .data = &dqstats.stat[DQST_SYNCS],
2580 .maxlen = sizeof(int),
2581 .mode = 0444,
2582 .proc_handler = do_proc_dqstats,
2583 },
2584 #ifdef CONFIG_PRINT_QUOTA_WARNING
2585 {
2586 .procname = "warnings",
2587 .data = &flag_print_warnings,
2588 .maxlen = sizeof(int),
2589 .mode = 0644,
2590 .proc_handler = proc_dointvec,
2591 },
2592 #endif
2593 { },
2594 };
2595
2596 static ctl_table fs_table[] = {
2597 {
2598 .procname = "quota",
2599 .mode = 0555,
2600 .child = fs_dqstats_table,
2601 },
2602 { },
2603 };
2604
2605 static ctl_table sys_table[] = {
2606 {
2607 .procname = "fs",
2608 .mode = 0555,
2609 .child = fs_table,
2610 },
2611 { },
2612 };
2613
2614 static int __init dquot_init(void)
2615 {
2616 int i, ret;
2617 unsigned long nr_hash, order;
2618
2619 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2620
2621 register_sysctl_table(sys_table);
2622
2623 dquot_cachep = kmem_cache_create("dquot",
2624 sizeof(struct dquot), sizeof(unsigned long) * 4,
2625 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2626 SLAB_MEM_SPREAD|SLAB_PANIC),
2627 NULL);
2628
2629 order = 0;
2630 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
2631 if (!dquot_hash)
2632 panic("Cannot create dquot hash table");
2633
2634 for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
2635 ret = percpu_counter_init(&dqstats.counter[i], 0);
2636 if (ret)
2637 panic("Cannot create dquot stat counters");
2638 }
2639
2640 /* Find power-of-two hlist_heads which can fit into allocation */
2641 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2642 dq_hash_bits = 0;
2643 do {
2644 dq_hash_bits++;
2645 } while (nr_hash >> dq_hash_bits);
2646 dq_hash_bits--;
2647
2648 nr_hash = 1UL << dq_hash_bits;
2649 dq_hash_mask = nr_hash - 1;
2650 for (i = 0; i < nr_hash; i++)
2651 INIT_HLIST_HEAD(dquot_hash + i);
2652
2653 printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
2654 nr_hash, order, (PAGE_SIZE << order));
2655
2656 register_shrinker(&dqcache_shrinker);
2657
2658 return 0;
2659 }
2660 module_init(dquot_init);