]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/quota/dquot.c
Merge tag 'pm+acpi-3.15-rc1-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafae...
[mirror_ubuntu-artful-kernel.git] / fs / quota / dquot.c
1 /*
2 * Implementation of the diskquota system for the LINUX operating system. QUOTA
3 * is implemented using the BSD system call interface as the means of
4 * communication with the user level. This file contains the generic routines
5 * called by the different filesystems on allocation of an inode or block.
6 * These routines take care of the administration needed to have a consistent
7 * diskquota tracking system. The ideas of both user and group quotas are based
8 * on the Melbourne quota system as used on BSD derived systems. The internal
9 * implementation is based on one of the several variants of the LINUX
10 * inode-subsystem with added complexity of the diskquota system.
11 *
12 * Author: Marco van Wieringen <mvw@planets.elm.net>
13 *
14 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
15 *
16 * Revised list management to avoid races
17 * -- Bill Hawes, <whawes@star.net>, 9/98
18 *
19 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
20 * As the consequence the locking was moved from dquot_decr_...(),
21 * dquot_incr_...() to calling functions.
22 * invalidate_dquots() now writes modified dquots.
23 * Serialized quota_off() and quota_on() for mount point.
24 * Fixed a few bugs in grow_dquots().
25 * Fixed deadlock in write_dquot() - we no longer account quotas on
26 * quota files
27 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
28 * add_dquot_ref() restarts after blocking
29 * Added check for bogus uid and fixed check for group in quotactl.
30 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
31 *
32 * Used struct list_head instead of own list struct
33 * Invalidation of referenced dquots is no longer possible
34 * Improved free_dquots list management
35 * Quota and i_blocks are now updated in one place to avoid races
36 * Warnings are now delayed so we won't block in critical section
37 * Write updated not to require dquot lock
38 * Jan Kara, <jack@suse.cz>, 9/2000
39 *
40 * Added dynamic quota structure allocation
41 * Jan Kara <jack@suse.cz> 12/2000
42 *
43 * Rewritten quota interface. Implemented new quota format and
44 * formats registering.
45 * Jan Kara, <jack@suse.cz>, 2001,2002
46 *
47 * New SMP locking.
48 * Jan Kara, <jack@suse.cz>, 10/2002
49 *
50 * Added journalled quota support, fix lock inversion problems
51 * Jan Kara, <jack@suse.cz>, 2003,2004
52 *
53 * (C) Copyright 1994 - 1997 Marco van Wieringen
54 */
55
56 #include <linux/errno.h>
57 #include <linux/kernel.h>
58 #include <linux/fs.h>
59 #include <linux/mount.h>
60 #include <linux/mm.h>
61 #include <linux/time.h>
62 #include <linux/types.h>
63 #include <linux/string.h>
64 #include <linux/fcntl.h>
65 #include <linux/stat.h>
66 #include <linux/tty.h>
67 #include <linux/file.h>
68 #include <linux/slab.h>
69 #include <linux/sysctl.h>
70 #include <linux/init.h>
71 #include <linux/module.h>
72 #include <linux/proc_fs.h>
73 #include <linux/security.h>
74 #include <linux/sched.h>
75 #include <linux/kmod.h>
76 #include <linux/namei.h>
77 #include <linux/capability.h>
78 #include <linux/quotaops.h>
79 #include "../internal.h" /* ugh */
80
81 #include <linux/uaccess.h>
82
83 /*
84 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
85 * and quota formats.
86 * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
87 * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
88 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
89 * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
90 * modifications of quota state (on quotaon and quotaoff) and readers who care
91 * about latest values take it as well.
92 *
93 * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
94 * dq_list_lock > dq_state_lock
95 *
96 * Note that some things (eg. sb pointer, type, id) doesn't change during
97 * the life of the dquot structure and so needn't to be protected by a lock
98 *
99 * Any operation working on dquots via inode pointers must hold dqptr_sem. If
100 * operation is just reading pointers from inode (or not using them at all) the
101 * read lock is enough. If pointers are altered function must hold write lock.
102 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
103 * inode is a quota file). Functions adding pointers from inode to dquots have
104 * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
105 * have to do all pointer modifications before dropping dqptr_sem. This makes
106 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
107 * then drops all pointers to dquots from an inode.
108 *
109 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
110 * from inodes (dquot_alloc_space() and such don't check the dq_lock).
111 * Currently dquot is locked only when it is being read to memory (or space for
112 * it is being allocated) on the first dqget() and when it is being released on
113 * the last dqput(). The allocation and release oparations are serialized by
114 * the dq_lock and by checking the use count in dquot_release(). Write
115 * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
116 * spinlock to internal buffers before writing.
117 *
118 * Lock ordering (including related VFS locks) is the following:
119 * dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock >
120 * dqio_mutex
121 * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc.
122 * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
123 * dqptr_sem. But filesystem has to count with the fact that functions such as
124 * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
125 * from inside a transaction to keep filesystem consistency after a crash. Also
126 * filesystems usually want to do some IO on dquot from ->mark_dirty which is
127 * called with dqptr_sem held.
128 */
129
130 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
131 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
132 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
133 EXPORT_SYMBOL(dq_data_lock);
134
135 void __quota_error(struct super_block *sb, const char *func,
136 const char *fmt, ...)
137 {
138 if (printk_ratelimit()) {
139 va_list args;
140 struct va_format vaf;
141
142 va_start(args, fmt);
143
144 vaf.fmt = fmt;
145 vaf.va = &args;
146
147 printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
148 sb->s_id, func, &vaf);
149
150 va_end(args);
151 }
152 }
153 EXPORT_SYMBOL(__quota_error);
154
155 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
156 static char *quotatypes[] = INITQFNAMES;
157 #endif
158 static struct quota_format_type *quota_formats; /* List of registered formats */
159 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
160
161 /* SLAB cache for dquot structures */
162 static struct kmem_cache *dquot_cachep;
163
164 int register_quota_format(struct quota_format_type *fmt)
165 {
166 spin_lock(&dq_list_lock);
167 fmt->qf_next = quota_formats;
168 quota_formats = fmt;
169 spin_unlock(&dq_list_lock);
170 return 0;
171 }
172 EXPORT_SYMBOL(register_quota_format);
173
174 void unregister_quota_format(struct quota_format_type *fmt)
175 {
176 struct quota_format_type **actqf;
177
178 spin_lock(&dq_list_lock);
179 for (actqf = &quota_formats; *actqf && *actqf != fmt;
180 actqf = &(*actqf)->qf_next)
181 ;
182 if (*actqf)
183 *actqf = (*actqf)->qf_next;
184 spin_unlock(&dq_list_lock);
185 }
186 EXPORT_SYMBOL(unregister_quota_format);
187
188 static struct quota_format_type *find_quota_format(int id)
189 {
190 struct quota_format_type *actqf;
191
192 spin_lock(&dq_list_lock);
193 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
194 actqf = actqf->qf_next)
195 ;
196 if (!actqf || !try_module_get(actqf->qf_owner)) {
197 int qm;
198
199 spin_unlock(&dq_list_lock);
200
201 for (qm = 0; module_names[qm].qm_fmt_id &&
202 module_names[qm].qm_fmt_id != id; qm++)
203 ;
204 if (!module_names[qm].qm_fmt_id ||
205 request_module(module_names[qm].qm_mod_name))
206 return NULL;
207
208 spin_lock(&dq_list_lock);
209 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
210 actqf = actqf->qf_next)
211 ;
212 if (actqf && !try_module_get(actqf->qf_owner))
213 actqf = NULL;
214 }
215 spin_unlock(&dq_list_lock);
216 return actqf;
217 }
218
219 static void put_quota_format(struct quota_format_type *fmt)
220 {
221 module_put(fmt->qf_owner);
222 }
223
224 /*
225 * Dquot List Management:
226 * The quota code uses three lists for dquot management: the inuse_list,
227 * free_dquots, and dquot_hash[] array. A single dquot structure may be
228 * on all three lists, depending on its current state.
229 *
230 * All dquots are placed to the end of inuse_list when first created, and this
231 * list is used for invalidate operation, which must look at every dquot.
232 *
233 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
234 * and this list is searched whenever we need an available dquot. Dquots are
235 * removed from the list as soon as they are used again, and
236 * dqstats.free_dquots gives the number of dquots on the list. When
237 * dquot is invalidated it's completely released from memory.
238 *
239 * Dquots with a specific identity (device, type and id) are placed on
240 * one of the dquot_hash[] hash chains. The provides an efficient search
241 * mechanism to locate a specific dquot.
242 */
243
244 static LIST_HEAD(inuse_list);
245 static LIST_HEAD(free_dquots);
246 static unsigned int dq_hash_bits, dq_hash_mask;
247 static struct hlist_head *dquot_hash;
248
249 struct dqstats dqstats;
250 EXPORT_SYMBOL(dqstats);
251
252 static qsize_t inode_get_rsv_space(struct inode *inode);
253 static void __dquot_initialize(struct inode *inode, int type);
254
255 static inline unsigned int
256 hashfn(const struct super_block *sb, struct kqid qid)
257 {
258 unsigned int id = from_kqid(&init_user_ns, qid);
259 int type = qid.type;
260 unsigned long tmp;
261
262 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
263 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
264 }
265
266 /*
267 * Following list functions expect dq_list_lock to be held
268 */
269 static inline void insert_dquot_hash(struct dquot *dquot)
270 {
271 struct hlist_head *head;
272 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
273 hlist_add_head(&dquot->dq_hash, head);
274 }
275
276 static inline void remove_dquot_hash(struct dquot *dquot)
277 {
278 hlist_del_init(&dquot->dq_hash);
279 }
280
281 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
282 struct kqid qid)
283 {
284 struct hlist_node *node;
285 struct dquot *dquot;
286
287 hlist_for_each (node, dquot_hash+hashent) {
288 dquot = hlist_entry(node, struct dquot, dq_hash);
289 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
290 return dquot;
291 }
292 return NULL;
293 }
294
295 /* Add a dquot to the tail of the free list */
296 static inline void put_dquot_last(struct dquot *dquot)
297 {
298 list_add_tail(&dquot->dq_free, &free_dquots);
299 dqstats_inc(DQST_FREE_DQUOTS);
300 }
301
302 static inline void remove_free_dquot(struct dquot *dquot)
303 {
304 if (list_empty(&dquot->dq_free))
305 return;
306 list_del_init(&dquot->dq_free);
307 dqstats_dec(DQST_FREE_DQUOTS);
308 }
309
310 static inline void put_inuse(struct dquot *dquot)
311 {
312 /* We add to the back of inuse list so we don't have to restart
313 * when traversing this list and we block */
314 list_add_tail(&dquot->dq_inuse, &inuse_list);
315 dqstats_inc(DQST_ALLOC_DQUOTS);
316 }
317
318 static inline void remove_inuse(struct dquot *dquot)
319 {
320 dqstats_dec(DQST_ALLOC_DQUOTS);
321 list_del(&dquot->dq_inuse);
322 }
323 /*
324 * End of list functions needing dq_list_lock
325 */
326
327 static void wait_on_dquot(struct dquot *dquot)
328 {
329 mutex_lock(&dquot->dq_lock);
330 mutex_unlock(&dquot->dq_lock);
331 }
332
333 static inline int dquot_dirty(struct dquot *dquot)
334 {
335 return test_bit(DQ_MOD_B, &dquot->dq_flags);
336 }
337
338 static inline int mark_dquot_dirty(struct dquot *dquot)
339 {
340 return dquot->dq_sb->dq_op->mark_dirty(dquot);
341 }
342
343 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
344 int dquot_mark_dquot_dirty(struct dquot *dquot)
345 {
346 int ret = 1;
347
348 /* If quota is dirty already, we don't have to acquire dq_list_lock */
349 if (test_bit(DQ_MOD_B, &dquot->dq_flags))
350 return 1;
351
352 spin_lock(&dq_list_lock);
353 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
354 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
355 info[dquot->dq_id.type].dqi_dirty_list);
356 ret = 0;
357 }
358 spin_unlock(&dq_list_lock);
359 return ret;
360 }
361 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
362
363 /* Dirtify all the dquots - this can block when journalling */
364 static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
365 {
366 int ret, err, cnt;
367
368 ret = err = 0;
369 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
370 if (dquot[cnt])
371 /* Even in case of error we have to continue */
372 ret = mark_dquot_dirty(dquot[cnt]);
373 if (!err)
374 err = ret;
375 }
376 return err;
377 }
378
379 static inline void dqput_all(struct dquot **dquot)
380 {
381 unsigned int cnt;
382
383 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
384 dqput(dquot[cnt]);
385 }
386
387 /* This function needs dq_list_lock */
388 static inline int clear_dquot_dirty(struct dquot *dquot)
389 {
390 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
391 return 0;
392 list_del_init(&dquot->dq_dirty);
393 return 1;
394 }
395
396 void mark_info_dirty(struct super_block *sb, int type)
397 {
398 set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
399 }
400 EXPORT_SYMBOL(mark_info_dirty);
401
402 /*
403 * Read dquot from disk and alloc space for it
404 */
405
406 int dquot_acquire(struct dquot *dquot)
407 {
408 int ret = 0, ret2 = 0;
409 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
410
411 mutex_lock(&dquot->dq_lock);
412 mutex_lock(&dqopt->dqio_mutex);
413 if (!test_bit(DQ_READ_B, &dquot->dq_flags))
414 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
415 if (ret < 0)
416 goto out_iolock;
417 set_bit(DQ_READ_B, &dquot->dq_flags);
418 /* Instantiate dquot if needed */
419 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
420 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
421 /* Write the info if needed */
422 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
423 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
424 dquot->dq_sb, dquot->dq_id.type);
425 }
426 if (ret < 0)
427 goto out_iolock;
428 if (ret2 < 0) {
429 ret = ret2;
430 goto out_iolock;
431 }
432 }
433 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
434 out_iolock:
435 mutex_unlock(&dqopt->dqio_mutex);
436 mutex_unlock(&dquot->dq_lock);
437 return ret;
438 }
439 EXPORT_SYMBOL(dquot_acquire);
440
441 /*
442 * Write dquot to disk
443 */
444 int dquot_commit(struct dquot *dquot)
445 {
446 int ret = 0;
447 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
448
449 mutex_lock(&dqopt->dqio_mutex);
450 spin_lock(&dq_list_lock);
451 if (!clear_dquot_dirty(dquot)) {
452 spin_unlock(&dq_list_lock);
453 goto out_sem;
454 }
455 spin_unlock(&dq_list_lock);
456 /* Inactive dquot can be only if there was error during read/init
457 * => we have better not writing it */
458 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
459 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
460 else
461 ret = -EIO;
462 out_sem:
463 mutex_unlock(&dqopt->dqio_mutex);
464 return ret;
465 }
466 EXPORT_SYMBOL(dquot_commit);
467
468 /*
469 * Release dquot
470 */
471 int dquot_release(struct dquot *dquot)
472 {
473 int ret = 0, ret2 = 0;
474 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
475
476 mutex_lock(&dquot->dq_lock);
477 /* Check whether we are not racing with some other dqget() */
478 if (atomic_read(&dquot->dq_count) > 1)
479 goto out_dqlock;
480 mutex_lock(&dqopt->dqio_mutex);
481 if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
482 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
483 /* Write the info */
484 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
485 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
486 dquot->dq_sb, dquot->dq_id.type);
487 }
488 if (ret >= 0)
489 ret = ret2;
490 }
491 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
492 mutex_unlock(&dqopt->dqio_mutex);
493 out_dqlock:
494 mutex_unlock(&dquot->dq_lock);
495 return ret;
496 }
497 EXPORT_SYMBOL(dquot_release);
498
499 void dquot_destroy(struct dquot *dquot)
500 {
501 kmem_cache_free(dquot_cachep, dquot);
502 }
503 EXPORT_SYMBOL(dquot_destroy);
504
505 static inline void do_destroy_dquot(struct dquot *dquot)
506 {
507 dquot->dq_sb->dq_op->destroy_dquot(dquot);
508 }
509
510 /* Invalidate all dquots on the list. Note that this function is called after
511 * quota is disabled and pointers from inodes removed so there cannot be new
512 * quota users. There can still be some users of quotas due to inodes being
513 * just deleted or pruned by prune_icache() (those are not attached to any
514 * list) or parallel quotactl call. We have to wait for such users.
515 */
516 static void invalidate_dquots(struct super_block *sb, int type)
517 {
518 struct dquot *dquot, *tmp;
519
520 restart:
521 spin_lock(&dq_list_lock);
522 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
523 if (dquot->dq_sb != sb)
524 continue;
525 if (dquot->dq_id.type != type)
526 continue;
527 /* Wait for dquot users */
528 if (atomic_read(&dquot->dq_count)) {
529 DEFINE_WAIT(wait);
530
531 dqgrab(dquot);
532 prepare_to_wait(&dquot->dq_wait_unused, &wait,
533 TASK_UNINTERRUPTIBLE);
534 spin_unlock(&dq_list_lock);
535 /* Once dqput() wakes us up, we know it's time to free
536 * the dquot.
537 * IMPORTANT: we rely on the fact that there is always
538 * at most one process waiting for dquot to free.
539 * Otherwise dq_count would be > 1 and we would never
540 * wake up.
541 */
542 if (atomic_read(&dquot->dq_count) > 1)
543 schedule();
544 finish_wait(&dquot->dq_wait_unused, &wait);
545 dqput(dquot);
546 /* At this moment dquot() need not exist (it could be
547 * reclaimed by prune_dqcache(). Hence we must
548 * restart. */
549 goto restart;
550 }
551 /*
552 * Quota now has no users and it has been written on last
553 * dqput()
554 */
555 remove_dquot_hash(dquot);
556 remove_free_dquot(dquot);
557 remove_inuse(dquot);
558 do_destroy_dquot(dquot);
559 }
560 spin_unlock(&dq_list_lock);
561 }
562
563 /* Call callback for every active dquot on given filesystem */
564 int dquot_scan_active(struct super_block *sb,
565 int (*fn)(struct dquot *dquot, unsigned long priv),
566 unsigned long priv)
567 {
568 struct dquot *dquot, *old_dquot = NULL;
569 int ret = 0;
570
571 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
572 spin_lock(&dq_list_lock);
573 list_for_each_entry(dquot, &inuse_list, dq_inuse) {
574 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
575 continue;
576 if (dquot->dq_sb != sb)
577 continue;
578 /* Now we have active dquot so we can just increase use count */
579 atomic_inc(&dquot->dq_count);
580 spin_unlock(&dq_list_lock);
581 dqstats_inc(DQST_LOOKUPS);
582 dqput(old_dquot);
583 old_dquot = dquot;
584 /*
585 * ->release_dquot() can be racing with us. Our reference
586 * protects us from new calls to it so just wait for any
587 * outstanding call and recheck the DQ_ACTIVE_B after that.
588 */
589 wait_on_dquot(dquot);
590 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
591 ret = fn(dquot, priv);
592 if (ret < 0)
593 goto out;
594 }
595 spin_lock(&dq_list_lock);
596 /* We are safe to continue now because our dquot could not
597 * be moved out of the inuse list while we hold the reference */
598 }
599 spin_unlock(&dq_list_lock);
600 out:
601 dqput(old_dquot);
602 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
603 return ret;
604 }
605 EXPORT_SYMBOL(dquot_scan_active);
606
607 /* Write all dquot structures to quota files */
608 int dquot_writeback_dquots(struct super_block *sb, int type)
609 {
610 struct list_head *dirty;
611 struct dquot *dquot;
612 struct quota_info *dqopt = sb_dqopt(sb);
613 int cnt;
614 int err, ret = 0;
615
616 mutex_lock(&dqopt->dqonoff_mutex);
617 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
618 if (type != -1 && cnt != type)
619 continue;
620 if (!sb_has_quota_active(sb, cnt))
621 continue;
622 spin_lock(&dq_list_lock);
623 dirty = &dqopt->info[cnt].dqi_dirty_list;
624 while (!list_empty(dirty)) {
625 dquot = list_first_entry(dirty, struct dquot,
626 dq_dirty);
627 /* Dirty and inactive can be only bad dquot... */
628 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
629 clear_dquot_dirty(dquot);
630 continue;
631 }
632 /* Now we have active dquot from which someone is
633 * holding reference so we can safely just increase
634 * use count */
635 dqgrab(dquot);
636 spin_unlock(&dq_list_lock);
637 dqstats_inc(DQST_LOOKUPS);
638 err = sb->dq_op->write_dquot(dquot);
639 if (!ret && err)
640 err = ret;
641 dqput(dquot);
642 spin_lock(&dq_list_lock);
643 }
644 spin_unlock(&dq_list_lock);
645 }
646
647 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
648 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
649 && info_dirty(&dqopt->info[cnt]))
650 sb->dq_op->write_info(sb, cnt);
651 dqstats_inc(DQST_SYNCS);
652 mutex_unlock(&dqopt->dqonoff_mutex);
653
654 return ret;
655 }
656 EXPORT_SYMBOL(dquot_writeback_dquots);
657
658 /* Write all dquot structures to disk and make them visible from userspace */
659 int dquot_quota_sync(struct super_block *sb, int type)
660 {
661 struct quota_info *dqopt = sb_dqopt(sb);
662 int cnt;
663 int ret;
664
665 ret = dquot_writeback_dquots(sb, type);
666 if (ret)
667 return ret;
668 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
669 return 0;
670
671 /* This is not very clever (and fast) but currently I don't know about
672 * any other simple way of getting quota data to disk and we must get
673 * them there for userspace to be visible... */
674 if (sb->s_op->sync_fs)
675 sb->s_op->sync_fs(sb, 1);
676 sync_blockdev(sb->s_bdev);
677
678 /*
679 * Now when everything is written we can discard the pagecache so
680 * that userspace sees the changes.
681 */
682 mutex_lock(&dqopt->dqonoff_mutex);
683 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
684 if (type != -1 && cnt != type)
685 continue;
686 if (!sb_has_quota_active(sb, cnt))
687 continue;
688 mutex_lock(&dqopt->files[cnt]->i_mutex);
689 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
690 mutex_unlock(&dqopt->files[cnt]->i_mutex);
691 }
692 mutex_unlock(&dqopt->dqonoff_mutex);
693
694 return 0;
695 }
696 EXPORT_SYMBOL(dquot_quota_sync);
697
698 static unsigned long
699 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
700 {
701 struct list_head *head;
702 struct dquot *dquot;
703 unsigned long freed = 0;
704
705 head = free_dquots.prev;
706 while (head != &free_dquots && sc->nr_to_scan) {
707 dquot = list_entry(head, struct dquot, dq_free);
708 remove_dquot_hash(dquot);
709 remove_free_dquot(dquot);
710 remove_inuse(dquot);
711 do_destroy_dquot(dquot);
712 sc->nr_to_scan--;
713 freed++;
714 head = free_dquots.prev;
715 }
716 return freed;
717 }
718
719 static unsigned long
720 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
721 {
722 return vfs_pressure_ratio(
723 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
724 }
725
726 static struct shrinker dqcache_shrinker = {
727 .count_objects = dqcache_shrink_count,
728 .scan_objects = dqcache_shrink_scan,
729 .seeks = DEFAULT_SEEKS,
730 };
731
732 /*
733 * Put reference to dquot
734 * NOTE: If you change this function please check whether dqput_blocks() works right...
735 */
736 void dqput(struct dquot *dquot)
737 {
738 int ret;
739
740 if (!dquot)
741 return;
742 #ifdef CONFIG_QUOTA_DEBUG
743 if (!atomic_read(&dquot->dq_count)) {
744 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
745 quotatypes[dquot->dq_id.type],
746 from_kqid(&init_user_ns, dquot->dq_id));
747 BUG();
748 }
749 #endif
750 dqstats_inc(DQST_DROPS);
751 we_slept:
752 spin_lock(&dq_list_lock);
753 if (atomic_read(&dquot->dq_count) > 1) {
754 /* We have more than one user... nothing to do */
755 atomic_dec(&dquot->dq_count);
756 /* Releasing dquot during quotaoff phase? */
757 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
758 atomic_read(&dquot->dq_count) == 1)
759 wake_up(&dquot->dq_wait_unused);
760 spin_unlock(&dq_list_lock);
761 return;
762 }
763 /* Need to release dquot? */
764 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
765 spin_unlock(&dq_list_lock);
766 /* Commit dquot before releasing */
767 ret = dquot->dq_sb->dq_op->write_dquot(dquot);
768 if (ret < 0) {
769 quota_error(dquot->dq_sb, "Can't write quota structure"
770 " (error %d). Quota may get out of sync!",
771 ret);
772 /*
773 * We clear dirty bit anyway, so that we avoid
774 * infinite loop here
775 */
776 spin_lock(&dq_list_lock);
777 clear_dquot_dirty(dquot);
778 spin_unlock(&dq_list_lock);
779 }
780 goto we_slept;
781 }
782 /* Clear flag in case dquot was inactive (something bad happened) */
783 clear_dquot_dirty(dquot);
784 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
785 spin_unlock(&dq_list_lock);
786 dquot->dq_sb->dq_op->release_dquot(dquot);
787 goto we_slept;
788 }
789 atomic_dec(&dquot->dq_count);
790 #ifdef CONFIG_QUOTA_DEBUG
791 /* sanity check */
792 BUG_ON(!list_empty(&dquot->dq_free));
793 #endif
794 put_dquot_last(dquot);
795 spin_unlock(&dq_list_lock);
796 }
797 EXPORT_SYMBOL(dqput);
798
799 struct dquot *dquot_alloc(struct super_block *sb, int type)
800 {
801 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
802 }
803 EXPORT_SYMBOL(dquot_alloc);
804
805 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
806 {
807 struct dquot *dquot;
808
809 dquot = sb->dq_op->alloc_dquot(sb, type);
810 if(!dquot)
811 return NULL;
812
813 mutex_init(&dquot->dq_lock);
814 INIT_LIST_HEAD(&dquot->dq_free);
815 INIT_LIST_HEAD(&dquot->dq_inuse);
816 INIT_HLIST_NODE(&dquot->dq_hash);
817 INIT_LIST_HEAD(&dquot->dq_dirty);
818 init_waitqueue_head(&dquot->dq_wait_unused);
819 dquot->dq_sb = sb;
820 dquot->dq_id = make_kqid_invalid(type);
821 atomic_set(&dquot->dq_count, 1);
822
823 return dquot;
824 }
825
826 /*
827 * Get reference to dquot
828 *
829 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
830 * destroying our dquot by:
831 * a) checking for quota flags under dq_list_lock and
832 * b) getting a reference to dquot before we release dq_list_lock
833 */
834 struct dquot *dqget(struct super_block *sb, struct kqid qid)
835 {
836 unsigned int hashent = hashfn(sb, qid);
837 struct dquot *dquot = NULL, *empty = NULL;
838
839 if (!sb_has_quota_active(sb, qid.type))
840 return NULL;
841 we_slept:
842 spin_lock(&dq_list_lock);
843 spin_lock(&dq_state_lock);
844 if (!sb_has_quota_active(sb, qid.type)) {
845 spin_unlock(&dq_state_lock);
846 spin_unlock(&dq_list_lock);
847 goto out;
848 }
849 spin_unlock(&dq_state_lock);
850
851 dquot = find_dquot(hashent, sb, qid);
852 if (!dquot) {
853 if (!empty) {
854 spin_unlock(&dq_list_lock);
855 empty = get_empty_dquot(sb, qid.type);
856 if (!empty)
857 schedule(); /* Try to wait for a moment... */
858 goto we_slept;
859 }
860 dquot = empty;
861 empty = NULL;
862 dquot->dq_id = qid;
863 /* all dquots go on the inuse_list */
864 put_inuse(dquot);
865 /* hash it first so it can be found */
866 insert_dquot_hash(dquot);
867 spin_unlock(&dq_list_lock);
868 dqstats_inc(DQST_LOOKUPS);
869 } else {
870 if (!atomic_read(&dquot->dq_count))
871 remove_free_dquot(dquot);
872 atomic_inc(&dquot->dq_count);
873 spin_unlock(&dq_list_lock);
874 dqstats_inc(DQST_CACHE_HITS);
875 dqstats_inc(DQST_LOOKUPS);
876 }
877 /* Wait for dq_lock - after this we know that either dquot_release() is
878 * already finished or it will be canceled due to dq_count > 1 test */
879 wait_on_dquot(dquot);
880 /* Read the dquot / allocate space in quota file */
881 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
882 sb->dq_op->acquire_dquot(dquot) < 0) {
883 dqput(dquot);
884 dquot = NULL;
885 goto out;
886 }
887 #ifdef CONFIG_QUOTA_DEBUG
888 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
889 #endif
890 out:
891 if (empty)
892 do_destroy_dquot(empty);
893
894 return dquot;
895 }
896 EXPORT_SYMBOL(dqget);
897
898 static int dqinit_needed(struct inode *inode, int type)
899 {
900 int cnt;
901
902 if (IS_NOQUOTA(inode))
903 return 0;
904 if (type != -1)
905 return !inode->i_dquot[type];
906 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
907 if (!inode->i_dquot[cnt])
908 return 1;
909 return 0;
910 }
911
912 /* This routine is guarded by dqonoff_mutex mutex */
913 static void add_dquot_ref(struct super_block *sb, int type)
914 {
915 struct inode *inode, *old_inode = NULL;
916 #ifdef CONFIG_QUOTA_DEBUG
917 int reserved = 0;
918 #endif
919
920 spin_lock(&inode_sb_list_lock);
921 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
922 spin_lock(&inode->i_lock);
923 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
924 !atomic_read(&inode->i_writecount) ||
925 !dqinit_needed(inode, type)) {
926 spin_unlock(&inode->i_lock);
927 continue;
928 }
929 __iget(inode);
930 spin_unlock(&inode->i_lock);
931 spin_unlock(&inode_sb_list_lock);
932
933 #ifdef CONFIG_QUOTA_DEBUG
934 if (unlikely(inode_get_rsv_space(inode) > 0))
935 reserved = 1;
936 #endif
937 iput(old_inode);
938 __dquot_initialize(inode, type);
939
940 /*
941 * We hold a reference to 'inode' so it couldn't have been
942 * removed from s_inodes list while we dropped the
943 * inode_sb_list_lock We cannot iput the inode now as we can be
944 * holding the last reference and we cannot iput it under
945 * inode_sb_list_lock. So we keep the reference and iput it
946 * later.
947 */
948 old_inode = inode;
949 spin_lock(&inode_sb_list_lock);
950 }
951 spin_unlock(&inode_sb_list_lock);
952 iput(old_inode);
953
954 #ifdef CONFIG_QUOTA_DEBUG
955 if (reserved) {
956 quota_error(sb, "Writes happened before quota was turned on "
957 "thus quota information is probably inconsistent. "
958 "Please run quotacheck(8)");
959 }
960 #endif
961 }
962
963 /*
964 * Return 0 if dqput() won't block.
965 * (note that 1 doesn't necessarily mean blocking)
966 */
967 static inline int dqput_blocks(struct dquot *dquot)
968 {
969 if (atomic_read(&dquot->dq_count) <= 1)
970 return 1;
971 return 0;
972 }
973
974 /*
975 * Remove references to dquots from inode and add dquot to list for freeing
976 * if we have the last reference to dquot
977 * We can't race with anybody because we hold dqptr_sem for writing...
978 */
979 static int remove_inode_dquot_ref(struct inode *inode, int type,
980 struct list_head *tofree_head)
981 {
982 struct dquot *dquot = inode->i_dquot[type];
983
984 inode->i_dquot[type] = NULL;
985 if (dquot) {
986 if (dqput_blocks(dquot)) {
987 #ifdef CONFIG_QUOTA_DEBUG
988 if (atomic_read(&dquot->dq_count) != 1)
989 quota_error(inode->i_sb, "Adding dquot with "
990 "dq_count %d to dispose list",
991 atomic_read(&dquot->dq_count));
992 #endif
993 spin_lock(&dq_list_lock);
994 /* As dquot must have currently users it can't be on
995 * the free list... */
996 list_add(&dquot->dq_free, tofree_head);
997 spin_unlock(&dq_list_lock);
998 return 1;
999 }
1000 else
1001 dqput(dquot); /* We have guaranteed we won't block */
1002 }
1003 return 0;
1004 }
1005
1006 /*
1007 * Free list of dquots
1008 * Dquots are removed from inodes and no new references can be got so we are
1009 * the only ones holding reference
1010 */
1011 static void put_dquot_list(struct list_head *tofree_head)
1012 {
1013 struct list_head *act_head;
1014 struct dquot *dquot;
1015
1016 act_head = tofree_head->next;
1017 while (act_head != tofree_head) {
1018 dquot = list_entry(act_head, struct dquot, dq_free);
1019 act_head = act_head->next;
1020 /* Remove dquot from the list so we won't have problems... */
1021 list_del_init(&dquot->dq_free);
1022 dqput(dquot);
1023 }
1024 }
1025
1026 static void remove_dquot_ref(struct super_block *sb, int type,
1027 struct list_head *tofree_head)
1028 {
1029 struct inode *inode;
1030 int reserved = 0;
1031
1032 spin_lock(&inode_sb_list_lock);
1033 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1034 /*
1035 * We have to scan also I_NEW inodes because they can already
1036 * have quota pointer initialized. Luckily, we need to touch
1037 * only quota pointers and these have separate locking
1038 * (dqptr_sem).
1039 */
1040 if (!IS_NOQUOTA(inode)) {
1041 if (unlikely(inode_get_rsv_space(inode) > 0))
1042 reserved = 1;
1043 remove_inode_dquot_ref(inode, type, tofree_head);
1044 }
1045 }
1046 spin_unlock(&inode_sb_list_lock);
1047 #ifdef CONFIG_QUOTA_DEBUG
1048 if (reserved) {
1049 printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1050 " was disabled thus quota information is probably "
1051 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
1052 }
1053 #endif
1054 }
1055
1056 /* Gather all references from inodes and drop them */
1057 static void drop_dquot_ref(struct super_block *sb, int type)
1058 {
1059 LIST_HEAD(tofree_head);
1060
1061 if (sb->dq_op) {
1062 down_write(&sb_dqopt(sb)->dqptr_sem);
1063 remove_dquot_ref(sb, type, &tofree_head);
1064 up_write(&sb_dqopt(sb)->dqptr_sem);
1065 put_dquot_list(&tofree_head);
1066 }
1067 }
1068
1069 static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
1070 {
1071 dquot->dq_dqb.dqb_curinodes += number;
1072 }
1073
1074 static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
1075 {
1076 dquot->dq_dqb.dqb_curspace += number;
1077 }
1078
1079 static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
1080 {
1081 dquot->dq_dqb.dqb_rsvspace += number;
1082 }
1083
1084 /*
1085 * Claim reserved quota space
1086 */
1087 static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
1088 {
1089 if (dquot->dq_dqb.dqb_rsvspace < number) {
1090 WARN_ON_ONCE(1);
1091 number = dquot->dq_dqb.dqb_rsvspace;
1092 }
1093 dquot->dq_dqb.dqb_curspace += number;
1094 dquot->dq_dqb.dqb_rsvspace -= number;
1095 }
1096
1097 static void dquot_reclaim_reserved_space(struct dquot *dquot, qsize_t number)
1098 {
1099 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1100 number = dquot->dq_dqb.dqb_curspace;
1101 dquot->dq_dqb.dqb_rsvspace += number;
1102 dquot->dq_dqb.dqb_curspace -= number;
1103 }
1104
1105 static inline
1106 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1107 {
1108 if (dquot->dq_dqb.dqb_rsvspace >= number)
1109 dquot->dq_dqb.dqb_rsvspace -= number;
1110 else {
1111 WARN_ON_ONCE(1);
1112 dquot->dq_dqb.dqb_rsvspace = 0;
1113 }
1114 }
1115
1116 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1117 {
1118 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1119 dquot->dq_dqb.dqb_curinodes >= number)
1120 dquot->dq_dqb.dqb_curinodes -= number;
1121 else
1122 dquot->dq_dqb.dqb_curinodes = 0;
1123 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1124 dquot->dq_dqb.dqb_itime = (time_t) 0;
1125 clear_bit(DQ_INODES_B, &dquot->dq_flags);
1126 }
1127
1128 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1129 {
1130 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1131 dquot->dq_dqb.dqb_curspace >= number)
1132 dquot->dq_dqb.dqb_curspace -= number;
1133 else
1134 dquot->dq_dqb.dqb_curspace = 0;
1135 if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1136 dquot->dq_dqb.dqb_btime = (time_t) 0;
1137 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1138 }
1139
1140 struct dquot_warn {
1141 struct super_block *w_sb;
1142 struct kqid w_dq_id;
1143 short w_type;
1144 };
1145
1146 static int warning_issued(struct dquot *dquot, const int warntype)
1147 {
1148 int flag = (warntype == QUOTA_NL_BHARDWARN ||
1149 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1150 ((warntype == QUOTA_NL_IHARDWARN ||
1151 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1152
1153 if (!flag)
1154 return 0;
1155 return test_and_set_bit(flag, &dquot->dq_flags);
1156 }
1157
1158 #ifdef CONFIG_PRINT_QUOTA_WARNING
1159 static int flag_print_warnings = 1;
1160
1161 static int need_print_warning(struct dquot_warn *warn)
1162 {
1163 if (!flag_print_warnings)
1164 return 0;
1165
1166 switch (warn->w_dq_id.type) {
1167 case USRQUOTA:
1168 return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1169 case GRPQUOTA:
1170 return in_group_p(warn->w_dq_id.gid);
1171 case PRJQUOTA: /* Never taken... Just make gcc happy */
1172 return 0;
1173 }
1174 return 0;
1175 }
1176
1177 /* Print warning to user which exceeded quota */
1178 static void print_warning(struct dquot_warn *warn)
1179 {
1180 char *msg = NULL;
1181 struct tty_struct *tty;
1182 int warntype = warn->w_type;
1183
1184 if (warntype == QUOTA_NL_IHARDBELOW ||
1185 warntype == QUOTA_NL_ISOFTBELOW ||
1186 warntype == QUOTA_NL_BHARDBELOW ||
1187 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1188 return;
1189
1190 tty = get_current_tty();
1191 if (!tty)
1192 return;
1193 tty_write_message(tty, warn->w_sb->s_id);
1194 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1195 tty_write_message(tty, ": warning, ");
1196 else
1197 tty_write_message(tty, ": write failed, ");
1198 tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1199 switch (warntype) {
1200 case QUOTA_NL_IHARDWARN:
1201 msg = " file limit reached.\r\n";
1202 break;
1203 case QUOTA_NL_ISOFTLONGWARN:
1204 msg = " file quota exceeded too long.\r\n";
1205 break;
1206 case QUOTA_NL_ISOFTWARN:
1207 msg = " file quota exceeded.\r\n";
1208 break;
1209 case QUOTA_NL_BHARDWARN:
1210 msg = " block limit reached.\r\n";
1211 break;
1212 case QUOTA_NL_BSOFTLONGWARN:
1213 msg = " block quota exceeded too long.\r\n";
1214 break;
1215 case QUOTA_NL_BSOFTWARN:
1216 msg = " block quota exceeded.\r\n";
1217 break;
1218 }
1219 tty_write_message(tty, msg);
1220 tty_kref_put(tty);
1221 }
1222 #endif
1223
1224 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1225 int warntype)
1226 {
1227 if (warning_issued(dquot, warntype))
1228 return;
1229 warn->w_type = warntype;
1230 warn->w_sb = dquot->dq_sb;
1231 warn->w_dq_id = dquot->dq_id;
1232 }
1233
1234 /*
1235 * Write warnings to the console and send warning messages over netlink.
1236 *
1237 * Note that this function can call into tty and networking code.
1238 */
1239 static void flush_warnings(struct dquot_warn *warn)
1240 {
1241 int i;
1242
1243 for (i = 0; i < MAXQUOTAS; i++) {
1244 if (warn[i].w_type == QUOTA_NL_NOWARN)
1245 continue;
1246 #ifdef CONFIG_PRINT_QUOTA_WARNING
1247 print_warning(&warn[i]);
1248 #endif
1249 quota_send_warning(warn[i].w_dq_id,
1250 warn[i].w_sb->s_dev, warn[i].w_type);
1251 }
1252 }
1253
1254 static int ignore_hardlimit(struct dquot *dquot)
1255 {
1256 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1257
1258 return capable(CAP_SYS_RESOURCE) &&
1259 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1260 !(info->dqi_flags & V1_DQF_RSQUASH));
1261 }
1262
1263 /* needs dq_data_lock */
1264 static int check_idq(struct dquot *dquot, qsize_t inodes,
1265 struct dquot_warn *warn)
1266 {
1267 qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1268
1269 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1270 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1271 return 0;
1272
1273 if (dquot->dq_dqb.dqb_ihardlimit &&
1274 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1275 !ignore_hardlimit(dquot)) {
1276 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1277 return -EDQUOT;
1278 }
1279
1280 if (dquot->dq_dqb.dqb_isoftlimit &&
1281 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1282 dquot->dq_dqb.dqb_itime &&
1283 get_seconds() >= dquot->dq_dqb.dqb_itime &&
1284 !ignore_hardlimit(dquot)) {
1285 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1286 return -EDQUOT;
1287 }
1288
1289 if (dquot->dq_dqb.dqb_isoftlimit &&
1290 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1291 dquot->dq_dqb.dqb_itime == 0) {
1292 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1293 dquot->dq_dqb.dqb_itime = get_seconds() +
1294 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1295 }
1296
1297 return 0;
1298 }
1299
1300 /* needs dq_data_lock */
1301 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc,
1302 struct dquot_warn *warn)
1303 {
1304 qsize_t tspace;
1305 struct super_block *sb = dquot->dq_sb;
1306
1307 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1308 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1309 return 0;
1310
1311 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1312 + space;
1313
1314 if (dquot->dq_dqb.dqb_bhardlimit &&
1315 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1316 !ignore_hardlimit(dquot)) {
1317 if (!prealloc)
1318 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1319 return -EDQUOT;
1320 }
1321
1322 if (dquot->dq_dqb.dqb_bsoftlimit &&
1323 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1324 dquot->dq_dqb.dqb_btime &&
1325 get_seconds() >= dquot->dq_dqb.dqb_btime &&
1326 !ignore_hardlimit(dquot)) {
1327 if (!prealloc)
1328 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1329 return -EDQUOT;
1330 }
1331
1332 if (dquot->dq_dqb.dqb_bsoftlimit &&
1333 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1334 dquot->dq_dqb.dqb_btime == 0) {
1335 if (!prealloc) {
1336 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1337 dquot->dq_dqb.dqb_btime = get_seconds() +
1338 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1339 }
1340 else
1341 /*
1342 * We don't allow preallocation to exceed softlimit so exceeding will
1343 * be always printed
1344 */
1345 return -EDQUOT;
1346 }
1347
1348 return 0;
1349 }
1350
1351 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1352 {
1353 qsize_t newinodes;
1354
1355 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1356 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1357 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1358 return QUOTA_NL_NOWARN;
1359
1360 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1361 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1362 return QUOTA_NL_ISOFTBELOW;
1363 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1364 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1365 return QUOTA_NL_IHARDBELOW;
1366 return QUOTA_NL_NOWARN;
1367 }
1368
1369 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1370 {
1371 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1372 dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1373 return QUOTA_NL_NOWARN;
1374
1375 if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1376 return QUOTA_NL_BSOFTBELOW;
1377 if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
1378 dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
1379 return QUOTA_NL_BHARDBELOW;
1380 return QUOTA_NL_NOWARN;
1381 }
1382
1383 static int dquot_active(const struct inode *inode)
1384 {
1385 struct super_block *sb = inode->i_sb;
1386
1387 if (IS_NOQUOTA(inode))
1388 return 0;
1389 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1390 }
1391
1392 /*
1393 * Initialize quota pointers in inode
1394 *
1395 * We do things in a bit complicated way but by that we avoid calling
1396 * dqget() and thus filesystem callbacks under dqptr_sem.
1397 *
1398 * It is better to call this function outside of any transaction as it
1399 * might need a lot of space in journal for dquot structure allocation.
1400 */
1401 static void __dquot_initialize(struct inode *inode, int type)
1402 {
1403 int cnt;
1404 struct dquot *got[MAXQUOTAS];
1405 struct super_block *sb = inode->i_sb;
1406 qsize_t rsv;
1407
1408 /* First test before acquiring mutex - solves deadlocks when we
1409 * re-enter the quota code and are already holding the mutex */
1410 if (!dquot_active(inode))
1411 return;
1412
1413 /* First get references to structures we might need. */
1414 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1415 struct kqid qid;
1416 got[cnt] = NULL;
1417 if (type != -1 && cnt != type)
1418 continue;
1419 switch (cnt) {
1420 case USRQUOTA:
1421 qid = make_kqid_uid(inode->i_uid);
1422 break;
1423 case GRPQUOTA:
1424 qid = make_kqid_gid(inode->i_gid);
1425 break;
1426 }
1427 got[cnt] = dqget(sb, qid);
1428 }
1429
1430 down_write(&sb_dqopt(sb)->dqptr_sem);
1431 if (IS_NOQUOTA(inode))
1432 goto out_err;
1433 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1434 if (type != -1 && cnt != type)
1435 continue;
1436 /* Avoid races with quotaoff() */
1437 if (!sb_has_quota_active(sb, cnt))
1438 continue;
1439 /* We could race with quotaon or dqget() could have failed */
1440 if (!got[cnt])
1441 continue;
1442 if (!inode->i_dquot[cnt]) {
1443 inode->i_dquot[cnt] = got[cnt];
1444 got[cnt] = NULL;
1445 /*
1446 * Make quota reservation system happy if someone
1447 * did a write before quota was turned on
1448 */
1449 rsv = inode_get_rsv_space(inode);
1450 if (unlikely(rsv)) {
1451 spin_lock(&dq_data_lock);
1452 dquot_resv_space(inode->i_dquot[cnt], rsv);
1453 spin_unlock(&dq_data_lock);
1454 }
1455 }
1456 }
1457 out_err:
1458 up_write(&sb_dqopt(sb)->dqptr_sem);
1459 /* Drop unused references */
1460 dqput_all(got);
1461 }
1462
1463 void dquot_initialize(struct inode *inode)
1464 {
1465 __dquot_initialize(inode, -1);
1466 }
1467 EXPORT_SYMBOL(dquot_initialize);
1468
1469 /*
1470 * Release all quotas referenced by inode
1471 */
1472 static void __dquot_drop(struct inode *inode)
1473 {
1474 int cnt;
1475 struct dquot *put[MAXQUOTAS];
1476
1477 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1478 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1479 put[cnt] = inode->i_dquot[cnt];
1480 inode->i_dquot[cnt] = NULL;
1481 }
1482 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1483 dqput_all(put);
1484 }
1485
1486 void dquot_drop(struct inode *inode)
1487 {
1488 int cnt;
1489
1490 if (IS_NOQUOTA(inode))
1491 return;
1492
1493 /*
1494 * Test before calling to rule out calls from proc and such
1495 * where we are not allowed to block. Note that this is
1496 * actually reliable test even without the lock - the caller
1497 * must assure that nobody can come after the DQUOT_DROP and
1498 * add quota pointers back anyway.
1499 */
1500 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1501 if (inode->i_dquot[cnt])
1502 break;
1503 }
1504
1505 if (cnt < MAXQUOTAS)
1506 __dquot_drop(inode);
1507 }
1508 EXPORT_SYMBOL(dquot_drop);
1509
1510 /*
1511 * inode_reserved_space is managed internally by quota, and protected by
1512 * i_lock similar to i_blocks+i_bytes.
1513 */
1514 static qsize_t *inode_reserved_space(struct inode * inode)
1515 {
1516 /* Filesystem must explicitly define it's own method in order to use
1517 * quota reservation interface */
1518 BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1519 return inode->i_sb->dq_op->get_reserved_space(inode);
1520 }
1521
1522 void inode_add_rsv_space(struct inode *inode, qsize_t number)
1523 {
1524 spin_lock(&inode->i_lock);
1525 *inode_reserved_space(inode) += number;
1526 spin_unlock(&inode->i_lock);
1527 }
1528 EXPORT_SYMBOL(inode_add_rsv_space);
1529
1530 void inode_claim_rsv_space(struct inode *inode, qsize_t number)
1531 {
1532 spin_lock(&inode->i_lock);
1533 *inode_reserved_space(inode) -= number;
1534 __inode_add_bytes(inode, number);
1535 spin_unlock(&inode->i_lock);
1536 }
1537 EXPORT_SYMBOL(inode_claim_rsv_space);
1538
1539 void inode_reclaim_rsv_space(struct inode *inode, qsize_t number)
1540 {
1541 spin_lock(&inode->i_lock);
1542 *inode_reserved_space(inode) += number;
1543 __inode_sub_bytes(inode, number);
1544 spin_unlock(&inode->i_lock);
1545 }
1546 EXPORT_SYMBOL(inode_reclaim_rsv_space);
1547
1548 void inode_sub_rsv_space(struct inode *inode, qsize_t number)
1549 {
1550 spin_lock(&inode->i_lock);
1551 *inode_reserved_space(inode) -= number;
1552 spin_unlock(&inode->i_lock);
1553 }
1554 EXPORT_SYMBOL(inode_sub_rsv_space);
1555
1556 static qsize_t inode_get_rsv_space(struct inode *inode)
1557 {
1558 qsize_t ret;
1559
1560 if (!inode->i_sb->dq_op->get_reserved_space)
1561 return 0;
1562 spin_lock(&inode->i_lock);
1563 ret = *inode_reserved_space(inode);
1564 spin_unlock(&inode->i_lock);
1565 return ret;
1566 }
1567
1568 static void inode_incr_space(struct inode *inode, qsize_t number,
1569 int reserve)
1570 {
1571 if (reserve)
1572 inode_add_rsv_space(inode, number);
1573 else
1574 inode_add_bytes(inode, number);
1575 }
1576
1577 static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1578 {
1579 if (reserve)
1580 inode_sub_rsv_space(inode, number);
1581 else
1582 inode_sub_bytes(inode, number);
1583 }
1584
1585 /*
1586 * This functions updates i_blocks+i_bytes fields and quota information
1587 * (together with appropriate checks).
1588 *
1589 * NOTE: We absolutely rely on the fact that caller dirties the inode
1590 * (usually helpers in quotaops.h care about this) and holds a handle for
1591 * the current transaction so that dquot write and inode write go into the
1592 * same transaction.
1593 */
1594
1595 /*
1596 * This operation can block, but only after everything is updated
1597 */
1598 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1599 {
1600 int cnt, ret = 0;
1601 struct dquot_warn warn[MAXQUOTAS];
1602 struct dquot **dquots = inode->i_dquot;
1603 int reserve = flags & DQUOT_SPACE_RESERVE;
1604
1605 /*
1606 * First test before acquiring mutex - solves deadlocks when we
1607 * re-enter the quota code and are already holding the mutex
1608 */
1609 if (!dquot_active(inode)) {
1610 inode_incr_space(inode, number, reserve);
1611 goto out;
1612 }
1613
1614 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1615 warn[cnt].w_type = QUOTA_NL_NOWARN;
1616
1617 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1618 spin_lock(&dq_data_lock);
1619 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1620 if (!dquots[cnt])
1621 continue;
1622 ret = check_bdq(dquots[cnt], number,
1623 !(flags & DQUOT_SPACE_WARN), &warn[cnt]);
1624 if (ret && !(flags & DQUOT_SPACE_NOFAIL)) {
1625 spin_unlock(&dq_data_lock);
1626 goto out_flush_warn;
1627 }
1628 }
1629 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1630 if (!dquots[cnt])
1631 continue;
1632 if (reserve)
1633 dquot_resv_space(dquots[cnt], number);
1634 else
1635 dquot_incr_space(dquots[cnt], number);
1636 }
1637 inode_incr_space(inode, number, reserve);
1638 spin_unlock(&dq_data_lock);
1639
1640 if (reserve)
1641 goto out_flush_warn;
1642 mark_all_dquot_dirty(dquots);
1643 out_flush_warn:
1644 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1645 flush_warnings(warn);
1646 out:
1647 return ret;
1648 }
1649 EXPORT_SYMBOL(__dquot_alloc_space);
1650
1651 /*
1652 * This operation can block, but only after everything is updated
1653 */
1654 int dquot_alloc_inode(const struct inode *inode)
1655 {
1656 int cnt, ret = 0;
1657 struct dquot_warn warn[MAXQUOTAS];
1658 struct dquot * const *dquots = inode->i_dquot;
1659
1660 /* First test before acquiring mutex - solves deadlocks when we
1661 * re-enter the quota code and are already holding the mutex */
1662 if (!dquot_active(inode))
1663 return 0;
1664 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1665 warn[cnt].w_type = QUOTA_NL_NOWARN;
1666 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1667 spin_lock(&dq_data_lock);
1668 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1669 if (!dquots[cnt])
1670 continue;
1671 ret = check_idq(dquots[cnt], 1, &warn[cnt]);
1672 if (ret)
1673 goto warn_put_all;
1674 }
1675
1676 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1677 if (!dquots[cnt])
1678 continue;
1679 dquot_incr_inodes(dquots[cnt], 1);
1680 }
1681
1682 warn_put_all:
1683 spin_unlock(&dq_data_lock);
1684 if (ret == 0)
1685 mark_all_dquot_dirty(dquots);
1686 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1687 flush_warnings(warn);
1688 return ret;
1689 }
1690 EXPORT_SYMBOL(dquot_alloc_inode);
1691
1692 /*
1693 * Convert in-memory reserved quotas to real consumed quotas
1694 */
1695 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1696 {
1697 int cnt;
1698
1699 if (!dquot_active(inode)) {
1700 inode_claim_rsv_space(inode, number);
1701 return 0;
1702 }
1703
1704 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1705 spin_lock(&dq_data_lock);
1706 /* Claim reserved quotas to allocated quotas */
1707 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1708 if (inode->i_dquot[cnt])
1709 dquot_claim_reserved_space(inode->i_dquot[cnt],
1710 number);
1711 }
1712 /* Update inode bytes */
1713 inode_claim_rsv_space(inode, number);
1714 spin_unlock(&dq_data_lock);
1715 mark_all_dquot_dirty(inode->i_dquot);
1716 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1717 return 0;
1718 }
1719 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1720
1721 /*
1722 * Convert allocated space back to in-memory reserved quotas
1723 */
1724 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1725 {
1726 int cnt;
1727
1728 if (!dquot_active(inode)) {
1729 inode_reclaim_rsv_space(inode, number);
1730 return;
1731 }
1732
1733 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1734 spin_lock(&dq_data_lock);
1735 /* Claim reserved quotas to allocated quotas */
1736 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1737 if (inode->i_dquot[cnt])
1738 dquot_reclaim_reserved_space(inode->i_dquot[cnt],
1739 number);
1740 }
1741 /* Update inode bytes */
1742 inode_reclaim_rsv_space(inode, number);
1743 spin_unlock(&dq_data_lock);
1744 mark_all_dquot_dirty(inode->i_dquot);
1745 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1746 return;
1747 }
1748 EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1749
1750 /*
1751 * This operation can block, but only after everything is updated
1752 */
1753 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1754 {
1755 unsigned int cnt;
1756 struct dquot_warn warn[MAXQUOTAS];
1757 struct dquot **dquots = inode->i_dquot;
1758 int reserve = flags & DQUOT_SPACE_RESERVE;
1759
1760 /* First test before acquiring mutex - solves deadlocks when we
1761 * re-enter the quota code and are already holding the mutex */
1762 if (!dquot_active(inode)) {
1763 inode_decr_space(inode, number, reserve);
1764 return;
1765 }
1766
1767 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1768 spin_lock(&dq_data_lock);
1769 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1770 int wtype;
1771
1772 warn[cnt].w_type = QUOTA_NL_NOWARN;
1773 if (!dquots[cnt])
1774 continue;
1775 wtype = info_bdq_free(dquots[cnt], number);
1776 if (wtype != QUOTA_NL_NOWARN)
1777 prepare_warning(&warn[cnt], dquots[cnt], wtype);
1778 if (reserve)
1779 dquot_free_reserved_space(dquots[cnt], number);
1780 else
1781 dquot_decr_space(dquots[cnt], number);
1782 }
1783 inode_decr_space(inode, number, reserve);
1784 spin_unlock(&dq_data_lock);
1785
1786 if (reserve)
1787 goto out_unlock;
1788 mark_all_dquot_dirty(dquots);
1789 out_unlock:
1790 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1791 flush_warnings(warn);
1792 }
1793 EXPORT_SYMBOL(__dquot_free_space);
1794
1795 /*
1796 * This operation can block, but only after everything is updated
1797 */
1798 void dquot_free_inode(const struct inode *inode)
1799 {
1800 unsigned int cnt;
1801 struct dquot_warn warn[MAXQUOTAS];
1802 struct dquot * const *dquots = inode->i_dquot;
1803
1804 /* First test before acquiring mutex - solves deadlocks when we
1805 * re-enter the quota code and are already holding the mutex */
1806 if (!dquot_active(inode))
1807 return;
1808
1809 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1810 spin_lock(&dq_data_lock);
1811 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1812 int wtype;
1813
1814 warn[cnt].w_type = QUOTA_NL_NOWARN;
1815 if (!dquots[cnt])
1816 continue;
1817 wtype = info_idq_free(dquots[cnt], 1);
1818 if (wtype != QUOTA_NL_NOWARN)
1819 prepare_warning(&warn[cnt], dquots[cnt], wtype);
1820 dquot_decr_inodes(dquots[cnt], 1);
1821 }
1822 spin_unlock(&dq_data_lock);
1823 mark_all_dquot_dirty(dquots);
1824 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1825 flush_warnings(warn);
1826 }
1827 EXPORT_SYMBOL(dquot_free_inode);
1828
1829 /*
1830 * Transfer the number of inode and blocks from one diskquota to an other.
1831 * On success, dquot references in transfer_to are consumed and references
1832 * to original dquots that need to be released are placed there. On failure,
1833 * references are kept untouched.
1834 *
1835 * This operation can block, but only after everything is updated
1836 * A transaction must be started when entering this function.
1837 *
1838 */
1839 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1840 {
1841 qsize_t space, cur_space;
1842 qsize_t rsv_space = 0;
1843 struct dquot *transfer_from[MAXQUOTAS] = {};
1844 int cnt, ret = 0;
1845 char is_valid[MAXQUOTAS] = {};
1846 struct dquot_warn warn_to[MAXQUOTAS];
1847 struct dquot_warn warn_from_inodes[MAXQUOTAS];
1848 struct dquot_warn warn_from_space[MAXQUOTAS];
1849
1850 /* First test before acquiring mutex - solves deadlocks when we
1851 * re-enter the quota code and are already holding the mutex */
1852 if (IS_NOQUOTA(inode))
1853 return 0;
1854 /* Initialize the arrays */
1855 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1856 warn_to[cnt].w_type = QUOTA_NL_NOWARN;
1857 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
1858 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
1859 }
1860 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1861 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1862 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1863 return 0;
1864 }
1865 spin_lock(&dq_data_lock);
1866 cur_space = inode_get_bytes(inode);
1867 rsv_space = inode_get_rsv_space(inode);
1868 space = cur_space + rsv_space;
1869 /* Build the transfer_from list and check the limits */
1870 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1871 /*
1872 * Skip changes for same uid or gid or for turned off quota-type.
1873 */
1874 if (!transfer_to[cnt])
1875 continue;
1876 /* Avoid races with quotaoff() */
1877 if (!sb_has_quota_active(inode->i_sb, cnt))
1878 continue;
1879 is_valid[cnt] = 1;
1880 transfer_from[cnt] = inode->i_dquot[cnt];
1881 ret = check_idq(transfer_to[cnt], 1, &warn_to[cnt]);
1882 if (ret)
1883 goto over_quota;
1884 ret = check_bdq(transfer_to[cnt], space, 0, &warn_to[cnt]);
1885 if (ret)
1886 goto over_quota;
1887 }
1888
1889 /*
1890 * Finally perform the needed transfer from transfer_from to transfer_to
1891 */
1892 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1893 if (!is_valid[cnt])
1894 continue;
1895 /* Due to IO error we might not have transfer_from[] structure */
1896 if (transfer_from[cnt]) {
1897 int wtype;
1898 wtype = info_idq_free(transfer_from[cnt], 1);
1899 if (wtype != QUOTA_NL_NOWARN)
1900 prepare_warning(&warn_from_inodes[cnt],
1901 transfer_from[cnt], wtype);
1902 wtype = info_bdq_free(transfer_from[cnt], space);
1903 if (wtype != QUOTA_NL_NOWARN)
1904 prepare_warning(&warn_from_space[cnt],
1905 transfer_from[cnt], wtype);
1906 dquot_decr_inodes(transfer_from[cnt], 1);
1907 dquot_decr_space(transfer_from[cnt], cur_space);
1908 dquot_free_reserved_space(transfer_from[cnt],
1909 rsv_space);
1910 }
1911
1912 dquot_incr_inodes(transfer_to[cnt], 1);
1913 dquot_incr_space(transfer_to[cnt], cur_space);
1914 dquot_resv_space(transfer_to[cnt], rsv_space);
1915
1916 inode->i_dquot[cnt] = transfer_to[cnt];
1917 }
1918 spin_unlock(&dq_data_lock);
1919 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1920
1921 mark_all_dquot_dirty(transfer_from);
1922 mark_all_dquot_dirty(transfer_to);
1923 flush_warnings(warn_to);
1924 flush_warnings(warn_from_inodes);
1925 flush_warnings(warn_from_space);
1926 /* Pass back references to put */
1927 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1928 if (is_valid[cnt])
1929 transfer_to[cnt] = transfer_from[cnt];
1930 return 0;
1931 over_quota:
1932 spin_unlock(&dq_data_lock);
1933 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1934 flush_warnings(warn_to);
1935 return ret;
1936 }
1937 EXPORT_SYMBOL(__dquot_transfer);
1938
1939 /* Wrapper for transferring ownership of an inode for uid/gid only
1940 * Called from FSXXX_setattr()
1941 */
1942 int dquot_transfer(struct inode *inode, struct iattr *iattr)
1943 {
1944 struct dquot *transfer_to[MAXQUOTAS] = {};
1945 struct super_block *sb = inode->i_sb;
1946 int ret;
1947
1948 if (!dquot_active(inode))
1949 return 0;
1950
1951 if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid))
1952 transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(iattr->ia_uid));
1953 if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))
1954 transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(iattr->ia_gid));
1955
1956 ret = __dquot_transfer(inode, transfer_to);
1957 dqput_all(transfer_to);
1958 return ret;
1959 }
1960 EXPORT_SYMBOL(dquot_transfer);
1961
1962 /*
1963 * Write info of quota file to disk
1964 */
1965 int dquot_commit_info(struct super_block *sb, int type)
1966 {
1967 int ret;
1968 struct quota_info *dqopt = sb_dqopt(sb);
1969
1970 mutex_lock(&dqopt->dqio_mutex);
1971 ret = dqopt->ops[type]->write_file_info(sb, type);
1972 mutex_unlock(&dqopt->dqio_mutex);
1973 return ret;
1974 }
1975 EXPORT_SYMBOL(dquot_commit_info);
1976
1977 /*
1978 * Definitions of diskquota operations.
1979 */
1980 const struct dquot_operations dquot_operations = {
1981 .write_dquot = dquot_commit,
1982 .acquire_dquot = dquot_acquire,
1983 .release_dquot = dquot_release,
1984 .mark_dirty = dquot_mark_dquot_dirty,
1985 .write_info = dquot_commit_info,
1986 .alloc_dquot = dquot_alloc,
1987 .destroy_dquot = dquot_destroy,
1988 };
1989 EXPORT_SYMBOL(dquot_operations);
1990
1991 /*
1992 * Generic helper for ->open on filesystems supporting disk quotas.
1993 */
1994 int dquot_file_open(struct inode *inode, struct file *file)
1995 {
1996 int error;
1997
1998 error = generic_file_open(inode, file);
1999 if (!error && (file->f_mode & FMODE_WRITE))
2000 dquot_initialize(inode);
2001 return error;
2002 }
2003 EXPORT_SYMBOL(dquot_file_open);
2004
2005 /*
2006 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2007 */
2008 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2009 {
2010 int cnt, ret = 0;
2011 struct quota_info *dqopt = sb_dqopt(sb);
2012 struct inode *toputinode[MAXQUOTAS];
2013
2014 /* Cannot turn off usage accounting without turning off limits, or
2015 * suspend quotas and simultaneously turn quotas off. */
2016 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2017 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2018 DQUOT_USAGE_ENABLED)))
2019 return -EINVAL;
2020
2021 /* We need to serialize quota_off() for device */
2022 mutex_lock(&dqopt->dqonoff_mutex);
2023
2024 /*
2025 * Skip everything if there's nothing to do. We have to do this because
2026 * sometimes we are called when fill_super() failed and calling
2027 * sync_fs() in such cases does no good.
2028 */
2029 if (!sb_any_quota_loaded(sb)) {
2030 mutex_unlock(&dqopt->dqonoff_mutex);
2031 return 0;
2032 }
2033 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2034 toputinode[cnt] = NULL;
2035 if (type != -1 && cnt != type)
2036 continue;
2037 if (!sb_has_quota_loaded(sb, cnt))
2038 continue;
2039
2040 if (flags & DQUOT_SUSPENDED) {
2041 spin_lock(&dq_state_lock);
2042 dqopt->flags |=
2043 dquot_state_flag(DQUOT_SUSPENDED, cnt);
2044 spin_unlock(&dq_state_lock);
2045 } else {
2046 spin_lock(&dq_state_lock);
2047 dqopt->flags &= ~dquot_state_flag(flags, cnt);
2048 /* Turning off suspended quotas? */
2049 if (!sb_has_quota_loaded(sb, cnt) &&
2050 sb_has_quota_suspended(sb, cnt)) {
2051 dqopt->flags &= ~dquot_state_flag(
2052 DQUOT_SUSPENDED, cnt);
2053 spin_unlock(&dq_state_lock);
2054 iput(dqopt->files[cnt]);
2055 dqopt->files[cnt] = NULL;
2056 continue;
2057 }
2058 spin_unlock(&dq_state_lock);
2059 }
2060
2061 /* We still have to keep quota loaded? */
2062 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2063 continue;
2064
2065 /* Note: these are blocking operations */
2066 drop_dquot_ref(sb, cnt);
2067 invalidate_dquots(sb, cnt);
2068 /*
2069 * Now all dquots should be invalidated, all writes done so we
2070 * should be only users of the info. No locks needed.
2071 */
2072 if (info_dirty(&dqopt->info[cnt]))
2073 sb->dq_op->write_info(sb, cnt);
2074 if (dqopt->ops[cnt]->free_file_info)
2075 dqopt->ops[cnt]->free_file_info(sb, cnt);
2076 put_quota_format(dqopt->info[cnt].dqi_format);
2077
2078 toputinode[cnt] = dqopt->files[cnt];
2079 if (!sb_has_quota_loaded(sb, cnt))
2080 dqopt->files[cnt] = NULL;
2081 dqopt->info[cnt].dqi_flags = 0;
2082 dqopt->info[cnt].dqi_igrace = 0;
2083 dqopt->info[cnt].dqi_bgrace = 0;
2084 dqopt->ops[cnt] = NULL;
2085 }
2086 mutex_unlock(&dqopt->dqonoff_mutex);
2087
2088 /* Skip syncing and setting flags if quota files are hidden */
2089 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2090 goto put_inodes;
2091
2092 /* Sync the superblock so that buffers with quota data are written to
2093 * disk (and so userspace sees correct data afterwards). */
2094 if (sb->s_op->sync_fs)
2095 sb->s_op->sync_fs(sb, 1);
2096 sync_blockdev(sb->s_bdev);
2097 /* Now the quota files are just ordinary files and we can set the
2098 * inode flags back. Moreover we discard the pagecache so that
2099 * userspace sees the writes we did bypassing the pagecache. We
2100 * must also discard the blockdev buffers so that we see the
2101 * changes done by userspace on the next quotaon() */
2102 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2103 if (toputinode[cnt]) {
2104 mutex_lock(&dqopt->dqonoff_mutex);
2105 /* If quota was reenabled in the meantime, we have
2106 * nothing to do */
2107 if (!sb_has_quota_loaded(sb, cnt)) {
2108 mutex_lock(&toputinode[cnt]->i_mutex);
2109 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
2110 S_NOATIME | S_NOQUOTA);
2111 truncate_inode_pages(&toputinode[cnt]->i_data,
2112 0);
2113 mutex_unlock(&toputinode[cnt]->i_mutex);
2114 mark_inode_dirty_sync(toputinode[cnt]);
2115 }
2116 mutex_unlock(&dqopt->dqonoff_mutex);
2117 }
2118 if (sb->s_bdev)
2119 invalidate_bdev(sb->s_bdev);
2120 put_inodes:
2121 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2122 if (toputinode[cnt]) {
2123 /* On remount RO, we keep the inode pointer so that we
2124 * can reenable quota on the subsequent remount RW. We
2125 * have to check 'flags' variable and not use sb_has_
2126 * function because another quotaon / quotaoff could
2127 * change global state before we got here. We refuse
2128 * to suspend quotas when there is pending delete on
2129 * the quota file... */
2130 if (!(flags & DQUOT_SUSPENDED))
2131 iput(toputinode[cnt]);
2132 else if (!toputinode[cnt]->i_nlink)
2133 ret = -EBUSY;
2134 }
2135 return ret;
2136 }
2137 EXPORT_SYMBOL(dquot_disable);
2138
2139 int dquot_quota_off(struct super_block *sb, int type)
2140 {
2141 return dquot_disable(sb, type,
2142 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2143 }
2144 EXPORT_SYMBOL(dquot_quota_off);
2145
2146 /*
2147 * Turn quotas on on a device
2148 */
2149
2150 /*
2151 * Helper function to turn quotas on when we already have the inode of
2152 * quota file and no quota information is loaded.
2153 */
2154 static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2155 unsigned int flags)
2156 {
2157 struct quota_format_type *fmt = find_quota_format(format_id);
2158 struct super_block *sb = inode->i_sb;
2159 struct quota_info *dqopt = sb_dqopt(sb);
2160 int error;
2161 int oldflags = -1;
2162
2163 if (!fmt)
2164 return -ESRCH;
2165 if (!S_ISREG(inode->i_mode)) {
2166 error = -EACCES;
2167 goto out_fmt;
2168 }
2169 if (IS_RDONLY(inode)) {
2170 error = -EROFS;
2171 goto out_fmt;
2172 }
2173 if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
2174 error = -EINVAL;
2175 goto out_fmt;
2176 }
2177 /* Usage always has to be set... */
2178 if (!(flags & DQUOT_USAGE_ENABLED)) {
2179 error = -EINVAL;
2180 goto out_fmt;
2181 }
2182
2183 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2184 /* As we bypass the pagecache we must now flush all the
2185 * dirty data and invalidate caches so that kernel sees
2186 * changes from userspace. It is not enough to just flush
2187 * the quota file since if blocksize < pagesize, invalidation
2188 * of the cache could fail because of other unrelated dirty
2189 * data */
2190 sync_filesystem(sb);
2191 invalidate_bdev(sb->s_bdev);
2192 }
2193 mutex_lock(&dqopt->dqonoff_mutex);
2194 if (sb_has_quota_loaded(sb, type)) {
2195 error = -EBUSY;
2196 goto out_lock;
2197 }
2198
2199 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2200 /* We don't want quota and atime on quota files (deadlocks
2201 * possible) Also nobody should write to the file - we use
2202 * special IO operations which ignore the immutable bit. */
2203 mutex_lock(&inode->i_mutex);
2204 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
2205 S_NOQUOTA);
2206 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
2207 mutex_unlock(&inode->i_mutex);
2208 /*
2209 * When S_NOQUOTA is set, remove dquot references as no more
2210 * references can be added
2211 */
2212 __dquot_drop(inode);
2213 }
2214
2215 error = -EIO;
2216 dqopt->files[type] = igrab(inode);
2217 if (!dqopt->files[type])
2218 goto out_lock;
2219 error = -EINVAL;
2220 if (!fmt->qf_ops->check_quota_file(sb, type))
2221 goto out_file_init;
2222
2223 dqopt->ops[type] = fmt->qf_ops;
2224 dqopt->info[type].dqi_format = fmt;
2225 dqopt->info[type].dqi_fmt_id = format_id;
2226 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2227 mutex_lock(&dqopt->dqio_mutex);
2228 error = dqopt->ops[type]->read_file_info(sb, type);
2229 if (error < 0) {
2230 mutex_unlock(&dqopt->dqio_mutex);
2231 goto out_file_init;
2232 }
2233 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2234 dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2235 mutex_unlock(&dqopt->dqio_mutex);
2236 spin_lock(&dq_state_lock);
2237 dqopt->flags |= dquot_state_flag(flags, type);
2238 spin_unlock(&dq_state_lock);
2239
2240 add_dquot_ref(sb, type);
2241 mutex_unlock(&dqopt->dqonoff_mutex);
2242
2243 return 0;
2244
2245 out_file_init:
2246 dqopt->files[type] = NULL;
2247 iput(inode);
2248 out_lock:
2249 if (oldflags != -1) {
2250 mutex_lock(&inode->i_mutex);
2251 /* Set the flags back (in the case of accidental quotaon()
2252 * on a wrong file we don't want to mess up the flags) */
2253 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
2254 inode->i_flags |= oldflags;
2255 mutex_unlock(&inode->i_mutex);
2256 }
2257 mutex_unlock(&dqopt->dqonoff_mutex);
2258 out_fmt:
2259 put_quota_format(fmt);
2260
2261 return error;
2262 }
2263
2264 /* Reenable quotas on remount RW */
2265 int dquot_resume(struct super_block *sb, int type)
2266 {
2267 struct quota_info *dqopt = sb_dqopt(sb);
2268 struct inode *inode;
2269 int ret = 0, cnt;
2270 unsigned int flags;
2271
2272 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2273 if (type != -1 && cnt != type)
2274 continue;
2275
2276 mutex_lock(&dqopt->dqonoff_mutex);
2277 if (!sb_has_quota_suspended(sb, cnt)) {
2278 mutex_unlock(&dqopt->dqonoff_mutex);
2279 continue;
2280 }
2281 inode = dqopt->files[cnt];
2282 dqopt->files[cnt] = NULL;
2283 spin_lock(&dq_state_lock);
2284 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2285 DQUOT_LIMITS_ENABLED,
2286 cnt);
2287 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2288 spin_unlock(&dq_state_lock);
2289 mutex_unlock(&dqopt->dqonoff_mutex);
2290
2291 flags = dquot_generic_flag(flags, cnt);
2292 ret = vfs_load_quota_inode(inode, cnt,
2293 dqopt->info[cnt].dqi_fmt_id, flags);
2294 iput(inode);
2295 }
2296
2297 return ret;
2298 }
2299 EXPORT_SYMBOL(dquot_resume);
2300
2301 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2302 struct path *path)
2303 {
2304 int error = security_quota_on(path->dentry);
2305 if (error)
2306 return error;
2307 /* Quota file not on the same filesystem? */
2308 if (path->dentry->d_sb != sb)
2309 error = -EXDEV;
2310 else
2311 error = vfs_load_quota_inode(path->dentry->d_inode, type,
2312 format_id, DQUOT_USAGE_ENABLED |
2313 DQUOT_LIMITS_ENABLED);
2314 return error;
2315 }
2316 EXPORT_SYMBOL(dquot_quota_on);
2317
2318 /*
2319 * More powerful function for turning on quotas allowing setting
2320 * of individual quota flags
2321 */
2322 int dquot_enable(struct inode *inode, int type, int format_id,
2323 unsigned int flags)
2324 {
2325 int ret = 0;
2326 struct super_block *sb = inode->i_sb;
2327 struct quota_info *dqopt = sb_dqopt(sb);
2328
2329 /* Just unsuspend quotas? */
2330 BUG_ON(flags & DQUOT_SUSPENDED);
2331
2332 if (!flags)
2333 return 0;
2334 /* Just updating flags needed? */
2335 if (sb_has_quota_loaded(sb, type)) {
2336 mutex_lock(&dqopt->dqonoff_mutex);
2337 /* Now do a reliable test... */
2338 if (!sb_has_quota_loaded(sb, type)) {
2339 mutex_unlock(&dqopt->dqonoff_mutex);
2340 goto load_quota;
2341 }
2342 if (flags & DQUOT_USAGE_ENABLED &&
2343 sb_has_quota_usage_enabled(sb, type)) {
2344 ret = -EBUSY;
2345 goto out_lock;
2346 }
2347 if (flags & DQUOT_LIMITS_ENABLED &&
2348 sb_has_quota_limits_enabled(sb, type)) {
2349 ret = -EBUSY;
2350 goto out_lock;
2351 }
2352 spin_lock(&dq_state_lock);
2353 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
2354 spin_unlock(&dq_state_lock);
2355 out_lock:
2356 mutex_unlock(&dqopt->dqonoff_mutex);
2357 return ret;
2358 }
2359
2360 load_quota:
2361 return vfs_load_quota_inode(inode, type, format_id, flags);
2362 }
2363 EXPORT_SYMBOL(dquot_enable);
2364
2365 /*
2366 * This function is used when filesystem needs to initialize quotas
2367 * during mount time.
2368 */
2369 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2370 int format_id, int type)
2371 {
2372 struct dentry *dentry;
2373 int error;
2374
2375 mutex_lock(&sb->s_root->d_inode->i_mutex);
2376 dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
2377 mutex_unlock(&sb->s_root->d_inode->i_mutex);
2378 if (IS_ERR(dentry))
2379 return PTR_ERR(dentry);
2380
2381 if (!dentry->d_inode) {
2382 error = -ENOENT;
2383 goto out;
2384 }
2385
2386 error = security_quota_on(dentry);
2387 if (!error)
2388 error = vfs_load_quota_inode(dentry->d_inode, type, format_id,
2389 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2390
2391 out:
2392 dput(dentry);
2393 return error;
2394 }
2395 EXPORT_SYMBOL(dquot_quota_on_mount);
2396
2397 static inline qsize_t qbtos(qsize_t blocks)
2398 {
2399 return blocks << QIF_DQBLKSIZE_BITS;
2400 }
2401
2402 static inline qsize_t stoqb(qsize_t space)
2403 {
2404 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
2405 }
2406
2407 /* Generic routine for getting common part of quota structure */
2408 static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2409 {
2410 struct mem_dqblk *dm = &dquot->dq_dqb;
2411
2412 memset(di, 0, sizeof(*di));
2413 di->d_version = FS_DQUOT_VERSION;
2414 di->d_flags = dquot->dq_id.type == USRQUOTA ?
2415 FS_USER_QUOTA : FS_GROUP_QUOTA;
2416 di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
2417
2418 spin_lock(&dq_data_lock);
2419 di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
2420 di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
2421 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2422 di->d_ino_softlimit = dm->dqb_isoftlimit;
2423 di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
2424 di->d_icount = dm->dqb_curinodes;
2425 di->d_btimer = dm->dqb_btime;
2426 di->d_itimer = dm->dqb_itime;
2427 spin_unlock(&dq_data_lock);
2428 }
2429
2430 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2431 struct fs_disk_quota *di)
2432 {
2433 struct dquot *dquot;
2434
2435 dquot = dqget(sb, qid);
2436 if (!dquot)
2437 return -ESRCH;
2438 do_get_dqblk(dquot, di);
2439 dqput(dquot);
2440
2441 return 0;
2442 }
2443 EXPORT_SYMBOL(dquot_get_dqblk);
2444
2445 #define VFS_FS_DQ_MASK \
2446 (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
2447 FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
2448 FS_DQ_BTIMER | FS_DQ_ITIMER)
2449
2450 /* Generic routine for setting common part of quota structure */
2451 static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2452 {
2453 struct mem_dqblk *dm = &dquot->dq_dqb;
2454 int check_blim = 0, check_ilim = 0;
2455 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2456
2457 if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
2458 return -EINVAL;
2459
2460 if (((di->d_fieldmask & FS_DQ_BSOFT) &&
2461 (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
2462 ((di->d_fieldmask & FS_DQ_BHARD) &&
2463 (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
2464 ((di->d_fieldmask & FS_DQ_ISOFT) &&
2465 (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
2466 ((di->d_fieldmask & FS_DQ_IHARD) &&
2467 (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
2468 return -ERANGE;
2469
2470 spin_lock(&dq_data_lock);
2471 if (di->d_fieldmask & FS_DQ_BCOUNT) {
2472 dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
2473 check_blim = 1;
2474 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2475 }
2476
2477 if (di->d_fieldmask & FS_DQ_BSOFT)
2478 dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
2479 if (di->d_fieldmask & FS_DQ_BHARD)
2480 dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
2481 if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
2482 check_blim = 1;
2483 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2484 }
2485
2486 if (di->d_fieldmask & FS_DQ_ICOUNT) {
2487 dm->dqb_curinodes = di->d_icount;
2488 check_ilim = 1;
2489 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2490 }
2491
2492 if (di->d_fieldmask & FS_DQ_ISOFT)
2493 dm->dqb_isoftlimit = di->d_ino_softlimit;
2494 if (di->d_fieldmask & FS_DQ_IHARD)
2495 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2496 if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
2497 check_ilim = 1;
2498 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2499 }
2500
2501 if (di->d_fieldmask & FS_DQ_BTIMER) {
2502 dm->dqb_btime = di->d_btimer;
2503 check_blim = 1;
2504 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2505 }
2506
2507 if (di->d_fieldmask & FS_DQ_ITIMER) {
2508 dm->dqb_itime = di->d_itimer;
2509 check_ilim = 1;
2510 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2511 }
2512
2513 if (check_blim) {
2514 if (!dm->dqb_bsoftlimit ||
2515 dm->dqb_curspace < dm->dqb_bsoftlimit) {
2516 dm->dqb_btime = 0;
2517 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2518 } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
2519 /* Set grace only if user hasn't provided his own... */
2520 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
2521 }
2522 if (check_ilim) {
2523 if (!dm->dqb_isoftlimit ||
2524 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2525 dm->dqb_itime = 0;
2526 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2527 } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
2528 /* Set grace only if user hasn't provided his own... */
2529 dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
2530 }
2531 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2532 dm->dqb_isoftlimit)
2533 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2534 else
2535 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2536 spin_unlock(&dq_data_lock);
2537 mark_dquot_dirty(dquot);
2538
2539 return 0;
2540 }
2541
2542 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2543 struct fs_disk_quota *di)
2544 {
2545 struct dquot *dquot;
2546 int rc;
2547
2548 dquot = dqget(sb, qid);
2549 if (!dquot) {
2550 rc = -ESRCH;
2551 goto out;
2552 }
2553 rc = do_set_dqblk(dquot, di);
2554 dqput(dquot);
2555 out:
2556 return rc;
2557 }
2558 EXPORT_SYMBOL(dquot_set_dqblk);
2559
2560 /* Generic routine for getting common part of quota file information */
2561 int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2562 {
2563 struct mem_dqinfo *mi;
2564
2565 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2566 if (!sb_has_quota_active(sb, type)) {
2567 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2568 return -ESRCH;
2569 }
2570 mi = sb_dqopt(sb)->info + type;
2571 spin_lock(&dq_data_lock);
2572 ii->dqi_bgrace = mi->dqi_bgrace;
2573 ii->dqi_igrace = mi->dqi_igrace;
2574 ii->dqi_flags = mi->dqi_flags & DQF_GETINFO_MASK;
2575 ii->dqi_valid = IIF_ALL;
2576 spin_unlock(&dq_data_lock);
2577 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2578 return 0;
2579 }
2580 EXPORT_SYMBOL(dquot_get_dqinfo);
2581
2582 /* Generic routine for setting common part of quota file information */
2583 int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2584 {
2585 struct mem_dqinfo *mi;
2586 int err = 0;
2587
2588 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2589 if (!sb_has_quota_active(sb, type)) {
2590 err = -ESRCH;
2591 goto out;
2592 }
2593 mi = sb_dqopt(sb)->info + type;
2594 spin_lock(&dq_data_lock);
2595 if (ii->dqi_valid & IIF_BGRACE)
2596 mi->dqi_bgrace = ii->dqi_bgrace;
2597 if (ii->dqi_valid & IIF_IGRACE)
2598 mi->dqi_igrace = ii->dqi_igrace;
2599 if (ii->dqi_valid & IIF_FLAGS)
2600 mi->dqi_flags = (mi->dqi_flags & ~DQF_SETINFO_MASK) |
2601 (ii->dqi_flags & DQF_SETINFO_MASK);
2602 spin_unlock(&dq_data_lock);
2603 mark_info_dirty(sb, type);
2604 /* Force write to disk */
2605 sb->dq_op->write_info(sb, type);
2606 out:
2607 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2608 return err;
2609 }
2610 EXPORT_SYMBOL(dquot_set_dqinfo);
2611
2612 const struct quotactl_ops dquot_quotactl_ops = {
2613 .quota_on = dquot_quota_on,
2614 .quota_off = dquot_quota_off,
2615 .quota_sync = dquot_quota_sync,
2616 .get_info = dquot_get_dqinfo,
2617 .set_info = dquot_set_dqinfo,
2618 .get_dqblk = dquot_get_dqblk,
2619 .set_dqblk = dquot_set_dqblk
2620 };
2621 EXPORT_SYMBOL(dquot_quotactl_ops);
2622
2623 static int do_proc_dqstats(struct ctl_table *table, int write,
2624 void __user *buffer, size_t *lenp, loff_t *ppos)
2625 {
2626 unsigned int type = (int *)table->data - dqstats.stat;
2627
2628 /* Update global table */
2629 dqstats.stat[type] =
2630 percpu_counter_sum_positive(&dqstats.counter[type]);
2631 return proc_dointvec(table, write, buffer, lenp, ppos);
2632 }
2633
2634 static struct ctl_table fs_dqstats_table[] = {
2635 {
2636 .procname = "lookups",
2637 .data = &dqstats.stat[DQST_LOOKUPS],
2638 .maxlen = sizeof(int),
2639 .mode = 0444,
2640 .proc_handler = do_proc_dqstats,
2641 },
2642 {
2643 .procname = "drops",
2644 .data = &dqstats.stat[DQST_DROPS],
2645 .maxlen = sizeof(int),
2646 .mode = 0444,
2647 .proc_handler = do_proc_dqstats,
2648 },
2649 {
2650 .procname = "reads",
2651 .data = &dqstats.stat[DQST_READS],
2652 .maxlen = sizeof(int),
2653 .mode = 0444,
2654 .proc_handler = do_proc_dqstats,
2655 },
2656 {
2657 .procname = "writes",
2658 .data = &dqstats.stat[DQST_WRITES],
2659 .maxlen = sizeof(int),
2660 .mode = 0444,
2661 .proc_handler = do_proc_dqstats,
2662 },
2663 {
2664 .procname = "cache_hits",
2665 .data = &dqstats.stat[DQST_CACHE_HITS],
2666 .maxlen = sizeof(int),
2667 .mode = 0444,
2668 .proc_handler = do_proc_dqstats,
2669 },
2670 {
2671 .procname = "allocated_dquots",
2672 .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
2673 .maxlen = sizeof(int),
2674 .mode = 0444,
2675 .proc_handler = do_proc_dqstats,
2676 },
2677 {
2678 .procname = "free_dquots",
2679 .data = &dqstats.stat[DQST_FREE_DQUOTS],
2680 .maxlen = sizeof(int),
2681 .mode = 0444,
2682 .proc_handler = do_proc_dqstats,
2683 },
2684 {
2685 .procname = "syncs",
2686 .data = &dqstats.stat[DQST_SYNCS],
2687 .maxlen = sizeof(int),
2688 .mode = 0444,
2689 .proc_handler = do_proc_dqstats,
2690 },
2691 #ifdef CONFIG_PRINT_QUOTA_WARNING
2692 {
2693 .procname = "warnings",
2694 .data = &flag_print_warnings,
2695 .maxlen = sizeof(int),
2696 .mode = 0644,
2697 .proc_handler = proc_dointvec,
2698 },
2699 #endif
2700 { },
2701 };
2702
2703 static struct ctl_table fs_table[] = {
2704 {
2705 .procname = "quota",
2706 .mode = 0555,
2707 .child = fs_dqstats_table,
2708 },
2709 { },
2710 };
2711
2712 static struct ctl_table sys_table[] = {
2713 {
2714 .procname = "fs",
2715 .mode = 0555,
2716 .child = fs_table,
2717 },
2718 { },
2719 };
2720
2721 static int __init dquot_init(void)
2722 {
2723 int i, ret;
2724 unsigned long nr_hash, order;
2725
2726 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2727
2728 register_sysctl_table(sys_table);
2729
2730 dquot_cachep = kmem_cache_create("dquot",
2731 sizeof(struct dquot), sizeof(unsigned long) * 4,
2732 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2733 SLAB_MEM_SPREAD|SLAB_PANIC),
2734 NULL);
2735
2736 order = 0;
2737 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
2738 if (!dquot_hash)
2739 panic("Cannot create dquot hash table");
2740
2741 for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
2742 ret = percpu_counter_init(&dqstats.counter[i], 0);
2743 if (ret)
2744 panic("Cannot create dquot stat counters");
2745 }
2746
2747 /* Find power-of-two hlist_heads which can fit into allocation */
2748 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2749 dq_hash_bits = 0;
2750 do {
2751 dq_hash_bits++;
2752 } while (nr_hash >> dq_hash_bits);
2753 dq_hash_bits--;
2754
2755 nr_hash = 1UL << dq_hash_bits;
2756 dq_hash_mask = nr_hash - 1;
2757 for (i = 0; i < nr_hash; i++)
2758 INIT_HLIST_HEAD(dquot_hash + i);
2759
2760 printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
2761 nr_hash, order, (PAGE_SIZE << order));
2762
2763 register_shrinker(&dqcache_shrinker);
2764
2765 return 0;
2766 }
2767 module_init(dquot_init);