]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/quota/dquot.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / fs / quota / dquot.c
1 /*
2 * Implementation of the diskquota system for the LINUX operating system. QUOTA
3 * is implemented using the BSD system call interface as the means of
4 * communication with the user level. This file contains the generic routines
5 * called by the different filesystems on allocation of an inode or block.
6 * These routines take care of the administration needed to have a consistent
7 * diskquota tracking system. The ideas of both user and group quotas are based
8 * on the Melbourne quota system as used on BSD derived systems. The internal
9 * implementation is based on one of the several variants of the LINUX
10 * inode-subsystem with added complexity of the diskquota system.
11 *
12 * Author: Marco van Wieringen <mvw@planets.elm.net>
13 *
14 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
15 *
16 * Revised list management to avoid races
17 * -- Bill Hawes, <whawes@star.net>, 9/98
18 *
19 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
20 * As the consequence the locking was moved from dquot_decr_...(),
21 * dquot_incr_...() to calling functions.
22 * invalidate_dquots() now writes modified dquots.
23 * Serialized quota_off() and quota_on() for mount point.
24 * Fixed a few bugs in grow_dquots().
25 * Fixed deadlock in write_dquot() - we no longer account quotas on
26 * quota files
27 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
28 * add_dquot_ref() restarts after blocking
29 * Added check for bogus uid and fixed check for group in quotactl.
30 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
31 *
32 * Used struct list_head instead of own list struct
33 * Invalidation of referenced dquots is no longer possible
34 * Improved free_dquots list management
35 * Quota and i_blocks are now updated in one place to avoid races
36 * Warnings are now delayed so we won't block in critical section
37 * Write updated not to require dquot lock
38 * Jan Kara, <jack@suse.cz>, 9/2000
39 *
40 * Added dynamic quota structure allocation
41 * Jan Kara <jack@suse.cz> 12/2000
42 *
43 * Rewritten quota interface. Implemented new quota format and
44 * formats registering.
45 * Jan Kara, <jack@suse.cz>, 2001,2002
46 *
47 * New SMP locking.
48 * Jan Kara, <jack@suse.cz>, 10/2002
49 *
50 * Added journalled quota support, fix lock inversion problems
51 * Jan Kara, <jack@suse.cz>, 2003,2004
52 *
53 * (C) Copyright 1994 - 1997 Marco van Wieringen
54 */
55
56 #include <linux/errno.h>
57 #include <linux/kernel.h>
58 #include <linux/fs.h>
59 #include <linux/mount.h>
60 #include <linux/mm.h>
61 #include <linux/time.h>
62 #include <linux/types.h>
63 #include <linux/string.h>
64 #include <linux/fcntl.h>
65 #include <linux/stat.h>
66 #include <linux/tty.h>
67 #include <linux/file.h>
68 #include <linux/slab.h>
69 #include <linux/sysctl.h>
70 #include <linux/init.h>
71 #include <linux/module.h>
72 #include <linux/proc_fs.h>
73 #include <linux/security.h>
74 #include <linux/sched.h>
75 #include <linux/cred.h>
76 #include <linux/kmod.h>
77 #include <linux/namei.h>
78 #include <linux/capability.h>
79 #include <linux/quotaops.h>
80 #include "../internal.h" /* ugh */
81
82 #include <linux/uaccess.h>
83
84 /*
85 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
86 * and quota formats.
87 * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
88 * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
89 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
90 * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
91 * modifications of quota state (on quotaon and quotaoff) and readers who care
92 * about latest values take it as well.
93 *
94 * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
95 * dq_list_lock > dq_state_lock
96 *
97 * Note that some things (eg. sb pointer, type, id) doesn't change during
98 * the life of the dquot structure and so needn't to be protected by a lock
99 *
100 * Operation accessing dquots via inode pointers are protected by dquot_srcu.
101 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
102 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
103 * inode and before dropping dquot references to avoid use of dquots after
104 * they are freed. dq_data_lock is used to serialize the pointer setting and
105 * clearing operations.
106 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
107 * inode is a quota file). Functions adding pointers from inode to dquots have
108 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
109 * have to do all pointer modifications before dropping dq_data_lock. This makes
110 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
111 * then drops all pointers to dquots from an inode.
112 *
113 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
114 * from inodes (dquot_alloc_space() and such don't check the dq_lock).
115 * Currently dquot is locked only when it is being read to memory (or space for
116 * it is being allocated) on the first dqget() and when it is being released on
117 * the last dqput(). The allocation and release oparations are serialized by
118 * the dq_lock and by checking the use count in dquot_release(). Write
119 * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
120 * spinlock to internal buffers before writing.
121 *
122 * Lock ordering (including related VFS locks) is the following:
123 * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex
124 */
125
126 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
127 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
128 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
129 EXPORT_SYMBOL(dq_data_lock);
130 DEFINE_STATIC_SRCU(dquot_srcu);
131
132 void __quota_error(struct super_block *sb, const char *func,
133 const char *fmt, ...)
134 {
135 if (printk_ratelimit()) {
136 va_list args;
137 struct va_format vaf;
138
139 va_start(args, fmt);
140
141 vaf.fmt = fmt;
142 vaf.va = &args;
143
144 printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
145 sb->s_id, func, &vaf);
146
147 va_end(args);
148 }
149 }
150 EXPORT_SYMBOL(__quota_error);
151
152 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
153 static char *quotatypes[] = INITQFNAMES;
154 #endif
155 static struct quota_format_type *quota_formats; /* List of registered formats */
156 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
157
158 /* SLAB cache for dquot structures */
159 static struct kmem_cache *dquot_cachep;
160
161 int register_quota_format(struct quota_format_type *fmt)
162 {
163 spin_lock(&dq_list_lock);
164 fmt->qf_next = quota_formats;
165 quota_formats = fmt;
166 spin_unlock(&dq_list_lock);
167 return 0;
168 }
169 EXPORT_SYMBOL(register_quota_format);
170
171 void unregister_quota_format(struct quota_format_type *fmt)
172 {
173 struct quota_format_type **actqf;
174
175 spin_lock(&dq_list_lock);
176 for (actqf = &quota_formats; *actqf && *actqf != fmt;
177 actqf = &(*actqf)->qf_next)
178 ;
179 if (*actqf)
180 *actqf = (*actqf)->qf_next;
181 spin_unlock(&dq_list_lock);
182 }
183 EXPORT_SYMBOL(unregister_quota_format);
184
185 static struct quota_format_type *find_quota_format(int id)
186 {
187 struct quota_format_type *actqf;
188
189 spin_lock(&dq_list_lock);
190 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
191 actqf = actqf->qf_next)
192 ;
193 if (!actqf || !try_module_get(actqf->qf_owner)) {
194 int qm;
195
196 spin_unlock(&dq_list_lock);
197
198 for (qm = 0; module_names[qm].qm_fmt_id &&
199 module_names[qm].qm_fmt_id != id; qm++)
200 ;
201 if (!module_names[qm].qm_fmt_id ||
202 request_module(module_names[qm].qm_mod_name))
203 return NULL;
204
205 spin_lock(&dq_list_lock);
206 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
207 actqf = actqf->qf_next)
208 ;
209 if (actqf && !try_module_get(actqf->qf_owner))
210 actqf = NULL;
211 }
212 spin_unlock(&dq_list_lock);
213 return actqf;
214 }
215
216 static void put_quota_format(struct quota_format_type *fmt)
217 {
218 module_put(fmt->qf_owner);
219 }
220
221 /*
222 * Dquot List Management:
223 * The quota code uses three lists for dquot management: the inuse_list,
224 * free_dquots, and dquot_hash[] array. A single dquot structure may be
225 * on all three lists, depending on its current state.
226 *
227 * All dquots are placed to the end of inuse_list when first created, and this
228 * list is used for invalidate operation, which must look at every dquot.
229 *
230 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
231 * and this list is searched whenever we need an available dquot. Dquots are
232 * removed from the list as soon as they are used again, and
233 * dqstats.free_dquots gives the number of dquots on the list. When
234 * dquot is invalidated it's completely released from memory.
235 *
236 * Dquots with a specific identity (device, type and id) are placed on
237 * one of the dquot_hash[] hash chains. The provides an efficient search
238 * mechanism to locate a specific dquot.
239 */
240
241 static LIST_HEAD(inuse_list);
242 static LIST_HEAD(free_dquots);
243 static unsigned int dq_hash_bits, dq_hash_mask;
244 static struct hlist_head *dquot_hash;
245
246 struct dqstats dqstats;
247 EXPORT_SYMBOL(dqstats);
248
249 static qsize_t inode_get_rsv_space(struct inode *inode);
250 static int __dquot_initialize(struct inode *inode, int type);
251
252 static inline unsigned int
253 hashfn(const struct super_block *sb, struct kqid qid)
254 {
255 unsigned int id = from_kqid(&init_user_ns, qid);
256 int type = qid.type;
257 unsigned long tmp;
258
259 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
260 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
261 }
262
263 /*
264 * Following list functions expect dq_list_lock to be held
265 */
266 static inline void insert_dquot_hash(struct dquot *dquot)
267 {
268 struct hlist_head *head;
269 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
270 hlist_add_head(&dquot->dq_hash, head);
271 }
272
273 static inline void remove_dquot_hash(struct dquot *dquot)
274 {
275 hlist_del_init(&dquot->dq_hash);
276 }
277
278 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
279 struct kqid qid)
280 {
281 struct hlist_node *node;
282 struct dquot *dquot;
283
284 hlist_for_each (node, dquot_hash+hashent) {
285 dquot = hlist_entry(node, struct dquot, dq_hash);
286 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
287 return dquot;
288 }
289 return NULL;
290 }
291
292 /* Add a dquot to the tail of the free list */
293 static inline void put_dquot_last(struct dquot *dquot)
294 {
295 list_add_tail(&dquot->dq_free, &free_dquots);
296 dqstats_inc(DQST_FREE_DQUOTS);
297 }
298
299 static inline void remove_free_dquot(struct dquot *dquot)
300 {
301 if (list_empty(&dquot->dq_free))
302 return;
303 list_del_init(&dquot->dq_free);
304 dqstats_dec(DQST_FREE_DQUOTS);
305 }
306
307 static inline void put_inuse(struct dquot *dquot)
308 {
309 /* We add to the back of inuse list so we don't have to restart
310 * when traversing this list and we block */
311 list_add_tail(&dquot->dq_inuse, &inuse_list);
312 dqstats_inc(DQST_ALLOC_DQUOTS);
313 }
314
315 static inline void remove_inuse(struct dquot *dquot)
316 {
317 dqstats_dec(DQST_ALLOC_DQUOTS);
318 list_del(&dquot->dq_inuse);
319 }
320 /*
321 * End of list functions needing dq_list_lock
322 */
323
324 static void wait_on_dquot(struct dquot *dquot)
325 {
326 mutex_lock(&dquot->dq_lock);
327 mutex_unlock(&dquot->dq_lock);
328 }
329
330 static inline int dquot_dirty(struct dquot *dquot)
331 {
332 return test_bit(DQ_MOD_B, &dquot->dq_flags);
333 }
334
335 static inline int mark_dquot_dirty(struct dquot *dquot)
336 {
337 return dquot->dq_sb->dq_op->mark_dirty(dquot);
338 }
339
340 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
341 int dquot_mark_dquot_dirty(struct dquot *dquot)
342 {
343 int ret = 1;
344
345 /* If quota is dirty already, we don't have to acquire dq_list_lock */
346 if (test_bit(DQ_MOD_B, &dquot->dq_flags))
347 return 1;
348
349 spin_lock(&dq_list_lock);
350 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
351 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
352 info[dquot->dq_id.type].dqi_dirty_list);
353 ret = 0;
354 }
355 spin_unlock(&dq_list_lock);
356 return ret;
357 }
358 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
359
360 /* Dirtify all the dquots - this can block when journalling */
361 static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
362 {
363 int ret, err, cnt;
364
365 ret = err = 0;
366 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
367 if (dquot[cnt])
368 /* Even in case of error we have to continue */
369 ret = mark_dquot_dirty(dquot[cnt]);
370 if (!err)
371 err = ret;
372 }
373 return err;
374 }
375
376 static inline void dqput_all(struct dquot **dquot)
377 {
378 unsigned int cnt;
379
380 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
381 dqput(dquot[cnt]);
382 }
383
384 /* This function needs dq_list_lock */
385 static inline int clear_dquot_dirty(struct dquot *dquot)
386 {
387 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
388 return 0;
389 list_del_init(&dquot->dq_dirty);
390 return 1;
391 }
392
393 void mark_info_dirty(struct super_block *sb, int type)
394 {
395 set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
396 }
397 EXPORT_SYMBOL(mark_info_dirty);
398
399 /*
400 * Read dquot from disk and alloc space for it
401 */
402
403 int dquot_acquire(struct dquot *dquot)
404 {
405 int ret = 0, ret2 = 0;
406 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
407
408 mutex_lock(&dquot->dq_lock);
409 mutex_lock(&dqopt->dqio_mutex);
410 if (!test_bit(DQ_READ_B, &dquot->dq_flags))
411 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
412 if (ret < 0)
413 goto out_iolock;
414 /* Make sure flags update is visible after dquot has been filled */
415 smp_mb__before_atomic();
416 set_bit(DQ_READ_B, &dquot->dq_flags);
417 /* Instantiate dquot if needed */
418 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
419 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
420 /* Write the info if needed */
421 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
422 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
423 dquot->dq_sb, dquot->dq_id.type);
424 }
425 if (ret < 0)
426 goto out_iolock;
427 if (ret2 < 0) {
428 ret = ret2;
429 goto out_iolock;
430 }
431 }
432 /*
433 * Make sure flags update is visible after on-disk struct has been
434 * allocated. Paired with smp_rmb() in dqget().
435 */
436 smp_mb__before_atomic();
437 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
438 out_iolock:
439 mutex_unlock(&dqopt->dqio_mutex);
440 mutex_unlock(&dquot->dq_lock);
441 return ret;
442 }
443 EXPORT_SYMBOL(dquot_acquire);
444
445 /*
446 * Write dquot to disk
447 */
448 int dquot_commit(struct dquot *dquot)
449 {
450 int ret = 0;
451 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
452
453 mutex_lock(&dqopt->dqio_mutex);
454 spin_lock(&dq_list_lock);
455 if (!clear_dquot_dirty(dquot)) {
456 spin_unlock(&dq_list_lock);
457 goto out_sem;
458 }
459 spin_unlock(&dq_list_lock);
460 /* Inactive dquot can be only if there was error during read/init
461 * => we have better not writing it */
462 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
463 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
464 else
465 ret = -EIO;
466 out_sem:
467 mutex_unlock(&dqopt->dqio_mutex);
468 return ret;
469 }
470 EXPORT_SYMBOL(dquot_commit);
471
472 /*
473 * Release dquot
474 */
475 int dquot_release(struct dquot *dquot)
476 {
477 int ret = 0, ret2 = 0;
478 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
479
480 mutex_lock(&dquot->dq_lock);
481 /* Check whether we are not racing with some other dqget() */
482 if (atomic_read(&dquot->dq_count) > 1)
483 goto out_dqlock;
484 mutex_lock(&dqopt->dqio_mutex);
485 if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
486 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
487 /* Write the info */
488 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
489 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
490 dquot->dq_sb, dquot->dq_id.type);
491 }
492 if (ret >= 0)
493 ret = ret2;
494 }
495 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
496 mutex_unlock(&dqopt->dqio_mutex);
497 out_dqlock:
498 mutex_unlock(&dquot->dq_lock);
499 return ret;
500 }
501 EXPORT_SYMBOL(dquot_release);
502
503 void dquot_destroy(struct dquot *dquot)
504 {
505 kmem_cache_free(dquot_cachep, dquot);
506 }
507 EXPORT_SYMBOL(dquot_destroy);
508
509 static inline void do_destroy_dquot(struct dquot *dquot)
510 {
511 dquot->dq_sb->dq_op->destroy_dquot(dquot);
512 }
513
514 /* Invalidate all dquots on the list. Note that this function is called after
515 * quota is disabled and pointers from inodes removed so there cannot be new
516 * quota users. There can still be some users of quotas due to inodes being
517 * just deleted or pruned by prune_icache() (those are not attached to any
518 * list) or parallel quotactl call. We have to wait for such users.
519 */
520 static void invalidate_dquots(struct super_block *sb, int type)
521 {
522 struct dquot *dquot, *tmp;
523
524 restart:
525 spin_lock(&dq_list_lock);
526 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
527 if (dquot->dq_sb != sb)
528 continue;
529 if (dquot->dq_id.type != type)
530 continue;
531 /* Wait for dquot users */
532 if (atomic_read(&dquot->dq_count)) {
533 DEFINE_WAIT(wait);
534
535 dqgrab(dquot);
536 prepare_to_wait(&dquot->dq_wait_unused, &wait,
537 TASK_UNINTERRUPTIBLE);
538 spin_unlock(&dq_list_lock);
539 /* Once dqput() wakes us up, we know it's time to free
540 * the dquot.
541 * IMPORTANT: we rely on the fact that there is always
542 * at most one process waiting for dquot to free.
543 * Otherwise dq_count would be > 1 and we would never
544 * wake up.
545 */
546 if (atomic_read(&dquot->dq_count) > 1)
547 schedule();
548 finish_wait(&dquot->dq_wait_unused, &wait);
549 dqput(dquot);
550 /* At this moment dquot() need not exist (it could be
551 * reclaimed by prune_dqcache(). Hence we must
552 * restart. */
553 goto restart;
554 }
555 /*
556 * Quota now has no users and it has been written on last
557 * dqput()
558 */
559 remove_dquot_hash(dquot);
560 remove_free_dquot(dquot);
561 remove_inuse(dquot);
562 do_destroy_dquot(dquot);
563 }
564 spin_unlock(&dq_list_lock);
565 }
566
567 /* Call callback for every active dquot on given filesystem */
568 int dquot_scan_active(struct super_block *sb,
569 int (*fn)(struct dquot *dquot, unsigned long priv),
570 unsigned long priv)
571 {
572 struct dquot *dquot, *old_dquot = NULL;
573 int ret = 0;
574
575 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
576
577 spin_lock(&dq_list_lock);
578 list_for_each_entry(dquot, &inuse_list, dq_inuse) {
579 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
580 continue;
581 if (dquot->dq_sb != sb)
582 continue;
583 /* Now we have active dquot so we can just increase use count */
584 atomic_inc(&dquot->dq_count);
585 spin_unlock(&dq_list_lock);
586 dqstats_inc(DQST_LOOKUPS);
587 dqput(old_dquot);
588 old_dquot = dquot;
589 /*
590 * ->release_dquot() can be racing with us. Our reference
591 * protects us from new calls to it so just wait for any
592 * outstanding call and recheck the DQ_ACTIVE_B after that.
593 */
594 wait_on_dquot(dquot);
595 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
596 ret = fn(dquot, priv);
597 if (ret < 0)
598 goto out;
599 }
600 spin_lock(&dq_list_lock);
601 /* We are safe to continue now because our dquot could not
602 * be moved out of the inuse list while we hold the reference */
603 }
604 spin_unlock(&dq_list_lock);
605 out:
606 dqput(old_dquot);
607 return ret;
608 }
609 EXPORT_SYMBOL(dquot_scan_active);
610
611 /* Write all dquot structures to quota files */
612 int dquot_writeback_dquots(struct super_block *sb, int type)
613 {
614 struct list_head *dirty;
615 struct dquot *dquot;
616 struct quota_info *dqopt = sb_dqopt(sb);
617 int cnt;
618 int err, ret = 0;
619
620 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
621
622 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
623 if (type != -1 && cnt != type)
624 continue;
625 if (!sb_has_quota_active(sb, cnt))
626 continue;
627 spin_lock(&dq_list_lock);
628 dirty = &dqopt->info[cnt].dqi_dirty_list;
629 while (!list_empty(dirty)) {
630 dquot = list_first_entry(dirty, struct dquot,
631 dq_dirty);
632 /* Dirty and inactive can be only bad dquot... */
633 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
634 clear_dquot_dirty(dquot);
635 continue;
636 }
637 /* Now we have active dquot from which someone is
638 * holding reference so we can safely just increase
639 * use count */
640 dqgrab(dquot);
641 spin_unlock(&dq_list_lock);
642 dqstats_inc(DQST_LOOKUPS);
643 err = sb->dq_op->write_dquot(dquot);
644 if (!ret && err)
645 ret = err;
646 dqput(dquot);
647 spin_lock(&dq_list_lock);
648 }
649 spin_unlock(&dq_list_lock);
650 }
651
652 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
653 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
654 && info_dirty(&dqopt->info[cnt]))
655 sb->dq_op->write_info(sb, cnt);
656 dqstats_inc(DQST_SYNCS);
657
658 return ret;
659 }
660 EXPORT_SYMBOL(dquot_writeback_dquots);
661
662 /* Write all dquot structures to disk and make them visible from userspace */
663 int dquot_quota_sync(struct super_block *sb, int type)
664 {
665 struct quota_info *dqopt = sb_dqopt(sb);
666 int cnt;
667 int ret;
668
669 ret = dquot_writeback_dquots(sb, type);
670 if (ret)
671 return ret;
672 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
673 return 0;
674
675 /* This is not very clever (and fast) but currently I don't know about
676 * any other simple way of getting quota data to disk and we must get
677 * them there for userspace to be visible... */
678 if (sb->s_op->sync_fs)
679 sb->s_op->sync_fs(sb, 1);
680 sync_blockdev(sb->s_bdev);
681
682 /*
683 * Now when everything is written we can discard the pagecache so
684 * that userspace sees the changes.
685 */
686 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
687 if (type != -1 && cnt != type)
688 continue;
689 if (!sb_has_quota_active(sb, cnt))
690 continue;
691 inode_lock(dqopt->files[cnt]);
692 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
693 inode_unlock(dqopt->files[cnt]);
694 }
695
696 return 0;
697 }
698 EXPORT_SYMBOL(dquot_quota_sync);
699
700 static unsigned long
701 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
702 {
703 struct list_head *head;
704 struct dquot *dquot;
705 unsigned long freed = 0;
706
707 spin_lock(&dq_list_lock);
708 head = free_dquots.prev;
709 while (head != &free_dquots && sc->nr_to_scan) {
710 dquot = list_entry(head, struct dquot, dq_free);
711 remove_dquot_hash(dquot);
712 remove_free_dquot(dquot);
713 remove_inuse(dquot);
714 do_destroy_dquot(dquot);
715 sc->nr_to_scan--;
716 freed++;
717 head = free_dquots.prev;
718 }
719 spin_unlock(&dq_list_lock);
720 return freed;
721 }
722
723 static unsigned long
724 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
725 {
726 return vfs_pressure_ratio(
727 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
728 }
729
730 static struct shrinker dqcache_shrinker = {
731 .count_objects = dqcache_shrink_count,
732 .scan_objects = dqcache_shrink_scan,
733 .seeks = DEFAULT_SEEKS,
734 };
735
736 /*
737 * Put reference to dquot
738 */
739 void dqput(struct dquot *dquot)
740 {
741 int ret;
742
743 if (!dquot)
744 return;
745 #ifdef CONFIG_QUOTA_DEBUG
746 if (!atomic_read(&dquot->dq_count)) {
747 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
748 quotatypes[dquot->dq_id.type],
749 from_kqid(&init_user_ns, dquot->dq_id));
750 BUG();
751 }
752 #endif
753 dqstats_inc(DQST_DROPS);
754 we_slept:
755 spin_lock(&dq_list_lock);
756 if (atomic_read(&dquot->dq_count) > 1) {
757 /* We have more than one user... nothing to do */
758 atomic_dec(&dquot->dq_count);
759 /* Releasing dquot during quotaoff phase? */
760 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
761 atomic_read(&dquot->dq_count) == 1)
762 wake_up(&dquot->dq_wait_unused);
763 spin_unlock(&dq_list_lock);
764 return;
765 }
766 /* Need to release dquot? */
767 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
768 spin_unlock(&dq_list_lock);
769 /* Commit dquot before releasing */
770 ret = dquot->dq_sb->dq_op->write_dquot(dquot);
771 if (ret < 0) {
772 quota_error(dquot->dq_sb, "Can't write quota structure"
773 " (error %d). Quota may get out of sync!",
774 ret);
775 /*
776 * We clear dirty bit anyway, so that we avoid
777 * infinite loop here
778 */
779 spin_lock(&dq_list_lock);
780 clear_dquot_dirty(dquot);
781 spin_unlock(&dq_list_lock);
782 }
783 goto we_slept;
784 }
785 /* Clear flag in case dquot was inactive (something bad happened) */
786 clear_dquot_dirty(dquot);
787 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
788 spin_unlock(&dq_list_lock);
789 dquot->dq_sb->dq_op->release_dquot(dquot);
790 goto we_slept;
791 }
792 atomic_dec(&dquot->dq_count);
793 #ifdef CONFIG_QUOTA_DEBUG
794 /* sanity check */
795 BUG_ON(!list_empty(&dquot->dq_free));
796 #endif
797 put_dquot_last(dquot);
798 spin_unlock(&dq_list_lock);
799 }
800 EXPORT_SYMBOL(dqput);
801
802 struct dquot *dquot_alloc(struct super_block *sb, int type)
803 {
804 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
805 }
806 EXPORT_SYMBOL(dquot_alloc);
807
808 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
809 {
810 struct dquot *dquot;
811
812 dquot = sb->dq_op->alloc_dquot(sb, type);
813 if(!dquot)
814 return NULL;
815
816 mutex_init(&dquot->dq_lock);
817 INIT_LIST_HEAD(&dquot->dq_free);
818 INIT_LIST_HEAD(&dquot->dq_inuse);
819 INIT_HLIST_NODE(&dquot->dq_hash);
820 INIT_LIST_HEAD(&dquot->dq_dirty);
821 init_waitqueue_head(&dquot->dq_wait_unused);
822 dquot->dq_sb = sb;
823 dquot->dq_id = make_kqid_invalid(type);
824 atomic_set(&dquot->dq_count, 1);
825
826 return dquot;
827 }
828
829 /*
830 * Get reference to dquot
831 *
832 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
833 * destroying our dquot by:
834 * a) checking for quota flags under dq_list_lock and
835 * b) getting a reference to dquot before we release dq_list_lock
836 */
837 struct dquot *dqget(struct super_block *sb, struct kqid qid)
838 {
839 unsigned int hashent = hashfn(sb, qid);
840 struct dquot *dquot, *empty = NULL;
841
842 if (!qid_has_mapping(sb->s_user_ns, qid))
843 return ERR_PTR(-EINVAL);
844
845 if (!sb_has_quota_active(sb, qid.type))
846 return ERR_PTR(-ESRCH);
847 we_slept:
848 spin_lock(&dq_list_lock);
849 spin_lock(&dq_state_lock);
850 if (!sb_has_quota_active(sb, qid.type)) {
851 spin_unlock(&dq_state_lock);
852 spin_unlock(&dq_list_lock);
853 dquot = ERR_PTR(-ESRCH);
854 goto out;
855 }
856 spin_unlock(&dq_state_lock);
857
858 dquot = find_dquot(hashent, sb, qid);
859 if (!dquot) {
860 if (!empty) {
861 spin_unlock(&dq_list_lock);
862 empty = get_empty_dquot(sb, qid.type);
863 if (!empty)
864 schedule(); /* Try to wait for a moment... */
865 goto we_slept;
866 }
867 dquot = empty;
868 empty = NULL;
869 dquot->dq_id = qid;
870 /* all dquots go on the inuse_list */
871 put_inuse(dquot);
872 /* hash it first so it can be found */
873 insert_dquot_hash(dquot);
874 spin_unlock(&dq_list_lock);
875 dqstats_inc(DQST_LOOKUPS);
876 } else {
877 if (!atomic_read(&dquot->dq_count))
878 remove_free_dquot(dquot);
879 atomic_inc(&dquot->dq_count);
880 spin_unlock(&dq_list_lock);
881 dqstats_inc(DQST_CACHE_HITS);
882 dqstats_inc(DQST_LOOKUPS);
883 }
884 /* Wait for dq_lock - after this we know that either dquot_release() is
885 * already finished or it will be canceled due to dq_count > 1 test */
886 wait_on_dquot(dquot);
887 /* Read the dquot / allocate space in quota file */
888 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
889 int err;
890
891 err = sb->dq_op->acquire_dquot(dquot);
892 if (err < 0) {
893 dqput(dquot);
894 dquot = ERR_PTR(err);
895 goto out;
896 }
897 }
898 /*
899 * Make sure following reads see filled structure - paired with
900 * smp_mb__before_atomic() in dquot_acquire().
901 */
902 smp_rmb();
903 #ifdef CONFIG_QUOTA_DEBUG
904 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
905 #endif
906 out:
907 if (empty)
908 do_destroy_dquot(empty);
909
910 return dquot;
911 }
912 EXPORT_SYMBOL(dqget);
913
914 static inline struct dquot **i_dquot(struct inode *inode)
915 {
916 return inode->i_sb->s_op->get_dquots(inode);
917 }
918
919 static int dqinit_needed(struct inode *inode, int type)
920 {
921 struct dquot * const *dquots;
922 int cnt;
923
924 if (IS_NOQUOTA(inode))
925 return 0;
926
927 dquots = i_dquot(inode);
928 if (type != -1)
929 return !dquots[type];
930 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
931 if (!dquots[cnt])
932 return 1;
933 return 0;
934 }
935
936 /* This routine is guarded by s_umount semaphore */
937 static void add_dquot_ref(struct super_block *sb, int type)
938 {
939 struct inode *inode, *old_inode = NULL;
940 #ifdef CONFIG_QUOTA_DEBUG
941 int reserved = 0;
942 #endif
943
944 spin_lock(&sb->s_inode_list_lock);
945 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
946 spin_lock(&inode->i_lock);
947 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
948 !atomic_read(&inode->i_writecount) ||
949 !dqinit_needed(inode, type)) {
950 spin_unlock(&inode->i_lock);
951 continue;
952 }
953 __iget(inode);
954 spin_unlock(&inode->i_lock);
955 spin_unlock(&sb->s_inode_list_lock);
956
957 #ifdef CONFIG_QUOTA_DEBUG
958 if (unlikely(inode_get_rsv_space(inode) > 0))
959 reserved = 1;
960 #endif
961 iput(old_inode);
962 __dquot_initialize(inode, type);
963
964 /*
965 * We hold a reference to 'inode' so it couldn't have been
966 * removed from s_inodes list while we dropped the
967 * s_inode_list_lock. We cannot iput the inode now as we can be
968 * holding the last reference and we cannot iput it under
969 * s_inode_list_lock. So we keep the reference and iput it
970 * later.
971 */
972 old_inode = inode;
973 spin_lock(&sb->s_inode_list_lock);
974 }
975 spin_unlock(&sb->s_inode_list_lock);
976 iput(old_inode);
977
978 #ifdef CONFIG_QUOTA_DEBUG
979 if (reserved) {
980 quota_error(sb, "Writes happened before quota was turned on "
981 "thus quota information is probably inconsistent. "
982 "Please run quotacheck(8)");
983 }
984 #endif
985 }
986
987 /*
988 * Remove references to dquots from inode and add dquot to list for freeing
989 * if we have the last reference to dquot
990 */
991 static void remove_inode_dquot_ref(struct inode *inode, int type,
992 struct list_head *tofree_head)
993 {
994 struct dquot **dquots = i_dquot(inode);
995 struct dquot *dquot = dquots[type];
996
997 if (!dquot)
998 return;
999
1000 dquots[type] = NULL;
1001 if (list_empty(&dquot->dq_free)) {
1002 /*
1003 * The inode still has reference to dquot so it can't be in the
1004 * free list
1005 */
1006 spin_lock(&dq_list_lock);
1007 list_add(&dquot->dq_free, tofree_head);
1008 spin_unlock(&dq_list_lock);
1009 } else {
1010 /*
1011 * Dquot is already in a list to put so we won't drop the last
1012 * reference here.
1013 */
1014 dqput(dquot);
1015 }
1016 }
1017
1018 /*
1019 * Free list of dquots
1020 * Dquots are removed from inodes and no new references can be got so we are
1021 * the only ones holding reference
1022 */
1023 static void put_dquot_list(struct list_head *tofree_head)
1024 {
1025 struct list_head *act_head;
1026 struct dquot *dquot;
1027
1028 act_head = tofree_head->next;
1029 while (act_head != tofree_head) {
1030 dquot = list_entry(act_head, struct dquot, dq_free);
1031 act_head = act_head->next;
1032 /* Remove dquot from the list so we won't have problems... */
1033 list_del_init(&dquot->dq_free);
1034 dqput(dquot);
1035 }
1036 }
1037
1038 static void remove_dquot_ref(struct super_block *sb, int type,
1039 struct list_head *tofree_head)
1040 {
1041 struct inode *inode;
1042 int reserved = 0;
1043
1044 spin_lock(&sb->s_inode_list_lock);
1045 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1046 /*
1047 * We have to scan also I_NEW inodes because they can already
1048 * have quota pointer initialized. Luckily, we need to touch
1049 * only quota pointers and these have separate locking
1050 * (dq_data_lock).
1051 */
1052 spin_lock(&dq_data_lock);
1053 if (!IS_NOQUOTA(inode)) {
1054 if (unlikely(inode_get_rsv_space(inode) > 0))
1055 reserved = 1;
1056 remove_inode_dquot_ref(inode, type, tofree_head);
1057 }
1058 spin_unlock(&dq_data_lock);
1059 }
1060 spin_unlock(&sb->s_inode_list_lock);
1061 #ifdef CONFIG_QUOTA_DEBUG
1062 if (reserved) {
1063 printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1064 " was disabled thus quota information is probably "
1065 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
1066 }
1067 #endif
1068 }
1069
1070 /* Gather all references from inodes and drop them */
1071 static void drop_dquot_ref(struct super_block *sb, int type)
1072 {
1073 LIST_HEAD(tofree_head);
1074
1075 if (sb->dq_op) {
1076 remove_dquot_ref(sb, type, &tofree_head);
1077 synchronize_srcu(&dquot_srcu);
1078 put_dquot_list(&tofree_head);
1079 }
1080 }
1081
1082 static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
1083 {
1084 dquot->dq_dqb.dqb_curinodes += number;
1085 }
1086
1087 static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
1088 {
1089 dquot->dq_dqb.dqb_curspace += number;
1090 }
1091
1092 static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
1093 {
1094 dquot->dq_dqb.dqb_rsvspace += number;
1095 }
1096
1097 /*
1098 * Claim reserved quota space
1099 */
1100 static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
1101 {
1102 if (dquot->dq_dqb.dqb_rsvspace < number) {
1103 WARN_ON_ONCE(1);
1104 number = dquot->dq_dqb.dqb_rsvspace;
1105 }
1106 dquot->dq_dqb.dqb_curspace += number;
1107 dquot->dq_dqb.dqb_rsvspace -= number;
1108 }
1109
1110 static void dquot_reclaim_reserved_space(struct dquot *dquot, qsize_t number)
1111 {
1112 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1113 number = dquot->dq_dqb.dqb_curspace;
1114 dquot->dq_dqb.dqb_rsvspace += number;
1115 dquot->dq_dqb.dqb_curspace -= number;
1116 }
1117
1118 static inline
1119 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1120 {
1121 if (dquot->dq_dqb.dqb_rsvspace >= number)
1122 dquot->dq_dqb.dqb_rsvspace -= number;
1123 else {
1124 WARN_ON_ONCE(1);
1125 dquot->dq_dqb.dqb_rsvspace = 0;
1126 }
1127 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1128 dquot->dq_dqb.dqb_bsoftlimit)
1129 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1130 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1131 }
1132
1133 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1134 {
1135 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1136 dquot->dq_dqb.dqb_curinodes >= number)
1137 dquot->dq_dqb.dqb_curinodes -= number;
1138 else
1139 dquot->dq_dqb.dqb_curinodes = 0;
1140 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1141 dquot->dq_dqb.dqb_itime = (time64_t) 0;
1142 clear_bit(DQ_INODES_B, &dquot->dq_flags);
1143 }
1144
1145 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1146 {
1147 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1148 dquot->dq_dqb.dqb_curspace >= number)
1149 dquot->dq_dqb.dqb_curspace -= number;
1150 else
1151 dquot->dq_dqb.dqb_curspace = 0;
1152 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1153 dquot->dq_dqb.dqb_bsoftlimit)
1154 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1155 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1156 }
1157
1158 struct dquot_warn {
1159 struct super_block *w_sb;
1160 struct kqid w_dq_id;
1161 short w_type;
1162 };
1163
1164 static int warning_issued(struct dquot *dquot, const int warntype)
1165 {
1166 int flag = (warntype == QUOTA_NL_BHARDWARN ||
1167 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1168 ((warntype == QUOTA_NL_IHARDWARN ||
1169 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1170
1171 if (!flag)
1172 return 0;
1173 return test_and_set_bit(flag, &dquot->dq_flags);
1174 }
1175
1176 #ifdef CONFIG_PRINT_QUOTA_WARNING
1177 static int flag_print_warnings = 1;
1178
1179 static int need_print_warning(struct dquot_warn *warn)
1180 {
1181 if (!flag_print_warnings)
1182 return 0;
1183
1184 switch (warn->w_dq_id.type) {
1185 case USRQUOTA:
1186 return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1187 case GRPQUOTA:
1188 return in_group_p(warn->w_dq_id.gid);
1189 case PRJQUOTA:
1190 return 1;
1191 }
1192 return 0;
1193 }
1194
1195 /* Print warning to user which exceeded quota */
1196 static void print_warning(struct dquot_warn *warn)
1197 {
1198 char *msg = NULL;
1199 struct tty_struct *tty;
1200 int warntype = warn->w_type;
1201
1202 if (warntype == QUOTA_NL_IHARDBELOW ||
1203 warntype == QUOTA_NL_ISOFTBELOW ||
1204 warntype == QUOTA_NL_BHARDBELOW ||
1205 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1206 return;
1207
1208 tty = get_current_tty();
1209 if (!tty)
1210 return;
1211 tty_write_message(tty, warn->w_sb->s_id);
1212 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1213 tty_write_message(tty, ": warning, ");
1214 else
1215 tty_write_message(tty, ": write failed, ");
1216 tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1217 switch (warntype) {
1218 case QUOTA_NL_IHARDWARN:
1219 msg = " file limit reached.\r\n";
1220 break;
1221 case QUOTA_NL_ISOFTLONGWARN:
1222 msg = " file quota exceeded too long.\r\n";
1223 break;
1224 case QUOTA_NL_ISOFTWARN:
1225 msg = " file quota exceeded.\r\n";
1226 break;
1227 case QUOTA_NL_BHARDWARN:
1228 msg = " block limit reached.\r\n";
1229 break;
1230 case QUOTA_NL_BSOFTLONGWARN:
1231 msg = " block quota exceeded too long.\r\n";
1232 break;
1233 case QUOTA_NL_BSOFTWARN:
1234 msg = " block quota exceeded.\r\n";
1235 break;
1236 }
1237 tty_write_message(tty, msg);
1238 tty_kref_put(tty);
1239 }
1240 #endif
1241
1242 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1243 int warntype)
1244 {
1245 if (warning_issued(dquot, warntype))
1246 return;
1247 warn->w_type = warntype;
1248 warn->w_sb = dquot->dq_sb;
1249 warn->w_dq_id = dquot->dq_id;
1250 }
1251
1252 /*
1253 * Write warnings to the console and send warning messages over netlink.
1254 *
1255 * Note that this function can call into tty and networking code.
1256 */
1257 static void flush_warnings(struct dquot_warn *warn)
1258 {
1259 int i;
1260
1261 for (i = 0; i < MAXQUOTAS; i++) {
1262 if (warn[i].w_type == QUOTA_NL_NOWARN)
1263 continue;
1264 #ifdef CONFIG_PRINT_QUOTA_WARNING
1265 print_warning(&warn[i]);
1266 #endif
1267 quota_send_warning(warn[i].w_dq_id,
1268 warn[i].w_sb->s_dev, warn[i].w_type);
1269 }
1270 }
1271
1272 static int ignore_hardlimit(struct dquot *dquot)
1273 {
1274 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1275
1276 return capable(CAP_SYS_RESOURCE) &&
1277 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1278 !(info->dqi_flags & DQF_ROOT_SQUASH));
1279 }
1280
1281 /* needs dq_data_lock */
1282 static int check_idq(struct dquot *dquot, qsize_t inodes,
1283 struct dquot_warn *warn)
1284 {
1285 qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1286
1287 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1288 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1289 return 0;
1290
1291 if (dquot->dq_dqb.dqb_ihardlimit &&
1292 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1293 !ignore_hardlimit(dquot)) {
1294 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1295 return -EDQUOT;
1296 }
1297
1298 if (dquot->dq_dqb.dqb_isoftlimit &&
1299 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1300 dquot->dq_dqb.dqb_itime &&
1301 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
1302 !ignore_hardlimit(dquot)) {
1303 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1304 return -EDQUOT;
1305 }
1306
1307 if (dquot->dq_dqb.dqb_isoftlimit &&
1308 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1309 dquot->dq_dqb.dqb_itime == 0) {
1310 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1311 dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
1312 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1313 }
1314
1315 return 0;
1316 }
1317
1318 /* needs dq_data_lock */
1319 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc,
1320 struct dquot_warn *warn)
1321 {
1322 qsize_t tspace;
1323 struct super_block *sb = dquot->dq_sb;
1324
1325 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1326 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1327 return 0;
1328
1329 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1330 + space;
1331
1332 if (dquot->dq_dqb.dqb_bhardlimit &&
1333 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1334 !ignore_hardlimit(dquot)) {
1335 if (!prealloc)
1336 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1337 return -EDQUOT;
1338 }
1339
1340 if (dquot->dq_dqb.dqb_bsoftlimit &&
1341 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1342 dquot->dq_dqb.dqb_btime &&
1343 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
1344 !ignore_hardlimit(dquot)) {
1345 if (!prealloc)
1346 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1347 return -EDQUOT;
1348 }
1349
1350 if (dquot->dq_dqb.dqb_bsoftlimit &&
1351 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1352 dquot->dq_dqb.dqb_btime == 0) {
1353 if (!prealloc) {
1354 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1355 dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
1356 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1357 }
1358 else
1359 /*
1360 * We don't allow preallocation to exceed softlimit so exceeding will
1361 * be always printed
1362 */
1363 return -EDQUOT;
1364 }
1365
1366 return 0;
1367 }
1368
1369 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1370 {
1371 qsize_t newinodes;
1372
1373 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1374 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1375 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1376 return QUOTA_NL_NOWARN;
1377
1378 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1379 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1380 return QUOTA_NL_ISOFTBELOW;
1381 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1382 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1383 return QUOTA_NL_IHARDBELOW;
1384 return QUOTA_NL_NOWARN;
1385 }
1386
1387 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1388 {
1389 qsize_t tspace;
1390
1391 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
1392
1393 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1394 tspace <= dquot->dq_dqb.dqb_bsoftlimit)
1395 return QUOTA_NL_NOWARN;
1396
1397 if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1398 return QUOTA_NL_BSOFTBELOW;
1399 if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
1400 tspace - space < dquot->dq_dqb.dqb_bhardlimit)
1401 return QUOTA_NL_BHARDBELOW;
1402 return QUOTA_NL_NOWARN;
1403 }
1404
1405 static int dquot_active(const struct inode *inode)
1406 {
1407 struct super_block *sb = inode->i_sb;
1408
1409 if (IS_NOQUOTA(inode))
1410 return 0;
1411 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1412 }
1413
1414 /*
1415 * Initialize quota pointers in inode
1416 *
1417 * It is better to call this function outside of any transaction as it
1418 * might need a lot of space in journal for dquot structure allocation.
1419 */
1420 static int __dquot_initialize(struct inode *inode, int type)
1421 {
1422 int cnt, init_needed = 0;
1423 struct dquot **dquots, *got[MAXQUOTAS] = {};
1424 struct super_block *sb = inode->i_sb;
1425 qsize_t rsv;
1426 int ret = 0;
1427
1428 if (!dquot_active(inode))
1429 return 0;
1430
1431 dquots = i_dquot(inode);
1432
1433 /* First get references to structures we might need. */
1434 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1435 struct kqid qid;
1436 kprojid_t projid;
1437 int rc;
1438 struct dquot *dquot;
1439
1440 if (type != -1 && cnt != type)
1441 continue;
1442 /*
1443 * The i_dquot should have been initialized in most cases,
1444 * we check it without locking here to avoid unnecessary
1445 * dqget()/dqput() calls.
1446 */
1447 if (dquots[cnt])
1448 continue;
1449
1450 if (!sb_has_quota_active(sb, cnt))
1451 continue;
1452
1453 init_needed = 1;
1454
1455 switch (cnt) {
1456 case USRQUOTA:
1457 qid = make_kqid_uid(inode->i_uid);
1458 break;
1459 case GRPQUOTA:
1460 qid = make_kqid_gid(inode->i_gid);
1461 break;
1462 case PRJQUOTA:
1463 rc = inode->i_sb->dq_op->get_projid(inode, &projid);
1464 if (rc)
1465 continue;
1466 qid = make_kqid_projid(projid);
1467 break;
1468 }
1469 dquot = dqget(sb, qid);
1470 if (IS_ERR(dquot)) {
1471 /* We raced with somebody turning quotas off... */
1472 if (PTR_ERR(dquot) != -ESRCH) {
1473 ret = PTR_ERR(dquot);
1474 goto out_put;
1475 }
1476 dquot = NULL;
1477 }
1478 got[cnt] = dquot;
1479 }
1480
1481 /* All required i_dquot has been initialized */
1482 if (!init_needed)
1483 return 0;
1484
1485 spin_lock(&dq_data_lock);
1486 if (IS_NOQUOTA(inode))
1487 goto out_lock;
1488 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1489 if (type != -1 && cnt != type)
1490 continue;
1491 /* Avoid races with quotaoff() */
1492 if (!sb_has_quota_active(sb, cnt))
1493 continue;
1494 /* We could race with quotaon or dqget() could have failed */
1495 if (!got[cnt])
1496 continue;
1497 if (!dquots[cnt]) {
1498 dquots[cnt] = got[cnt];
1499 got[cnt] = NULL;
1500 /*
1501 * Make quota reservation system happy if someone
1502 * did a write before quota was turned on
1503 */
1504 rsv = inode_get_rsv_space(inode);
1505 if (unlikely(rsv))
1506 dquot_resv_space(dquots[cnt], rsv);
1507 }
1508 }
1509 out_lock:
1510 spin_unlock(&dq_data_lock);
1511 out_put:
1512 /* Drop unused references */
1513 dqput_all(got);
1514
1515 return ret;
1516 }
1517
1518 int dquot_initialize(struct inode *inode)
1519 {
1520 return __dquot_initialize(inode, -1);
1521 }
1522 EXPORT_SYMBOL(dquot_initialize);
1523
1524 bool dquot_initialize_needed(struct inode *inode)
1525 {
1526 struct dquot **dquots;
1527 int i;
1528
1529 if (!dquot_active(inode))
1530 return false;
1531
1532 dquots = i_dquot(inode);
1533 for (i = 0; i < MAXQUOTAS; i++)
1534 if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1535 return true;
1536 return false;
1537 }
1538 EXPORT_SYMBOL(dquot_initialize_needed);
1539
1540 /*
1541 * Release all quotas referenced by inode.
1542 *
1543 * This function only be called on inode free or converting
1544 * a file to quota file, no other users for the i_dquot in
1545 * both cases, so we needn't call synchronize_srcu() after
1546 * clearing i_dquot.
1547 */
1548 static void __dquot_drop(struct inode *inode)
1549 {
1550 int cnt;
1551 struct dquot **dquots = i_dquot(inode);
1552 struct dquot *put[MAXQUOTAS];
1553
1554 spin_lock(&dq_data_lock);
1555 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1556 put[cnt] = dquots[cnt];
1557 dquots[cnt] = NULL;
1558 }
1559 spin_unlock(&dq_data_lock);
1560 dqput_all(put);
1561 }
1562
1563 void dquot_drop(struct inode *inode)
1564 {
1565 struct dquot * const *dquots;
1566 int cnt;
1567
1568 if (IS_NOQUOTA(inode))
1569 return;
1570
1571 /*
1572 * Test before calling to rule out calls from proc and such
1573 * where we are not allowed to block. Note that this is
1574 * actually reliable test even without the lock - the caller
1575 * must assure that nobody can come after the DQUOT_DROP and
1576 * add quota pointers back anyway.
1577 */
1578 dquots = i_dquot(inode);
1579 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1580 if (dquots[cnt])
1581 break;
1582 }
1583
1584 if (cnt < MAXQUOTAS)
1585 __dquot_drop(inode);
1586 }
1587 EXPORT_SYMBOL(dquot_drop);
1588
1589 /*
1590 * inode_reserved_space is managed internally by quota, and protected by
1591 * i_lock similar to i_blocks+i_bytes.
1592 */
1593 static qsize_t *inode_reserved_space(struct inode * inode)
1594 {
1595 /* Filesystem must explicitly define it's own method in order to use
1596 * quota reservation interface */
1597 BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1598 return inode->i_sb->dq_op->get_reserved_space(inode);
1599 }
1600
1601 void inode_add_rsv_space(struct inode *inode, qsize_t number)
1602 {
1603 spin_lock(&inode->i_lock);
1604 *inode_reserved_space(inode) += number;
1605 spin_unlock(&inode->i_lock);
1606 }
1607 EXPORT_SYMBOL(inode_add_rsv_space);
1608
1609 void inode_claim_rsv_space(struct inode *inode, qsize_t number)
1610 {
1611 spin_lock(&inode->i_lock);
1612 *inode_reserved_space(inode) -= number;
1613 __inode_add_bytes(inode, number);
1614 spin_unlock(&inode->i_lock);
1615 }
1616 EXPORT_SYMBOL(inode_claim_rsv_space);
1617
1618 void inode_reclaim_rsv_space(struct inode *inode, qsize_t number)
1619 {
1620 spin_lock(&inode->i_lock);
1621 *inode_reserved_space(inode) += number;
1622 __inode_sub_bytes(inode, number);
1623 spin_unlock(&inode->i_lock);
1624 }
1625 EXPORT_SYMBOL(inode_reclaim_rsv_space);
1626
1627 void inode_sub_rsv_space(struct inode *inode, qsize_t number)
1628 {
1629 spin_lock(&inode->i_lock);
1630 *inode_reserved_space(inode) -= number;
1631 spin_unlock(&inode->i_lock);
1632 }
1633 EXPORT_SYMBOL(inode_sub_rsv_space);
1634
1635 static qsize_t inode_get_rsv_space(struct inode *inode)
1636 {
1637 qsize_t ret;
1638
1639 if (!inode->i_sb->dq_op->get_reserved_space)
1640 return 0;
1641 spin_lock(&inode->i_lock);
1642 ret = *inode_reserved_space(inode);
1643 spin_unlock(&inode->i_lock);
1644 return ret;
1645 }
1646
1647 static void inode_incr_space(struct inode *inode, qsize_t number,
1648 int reserve)
1649 {
1650 if (reserve)
1651 inode_add_rsv_space(inode, number);
1652 else
1653 inode_add_bytes(inode, number);
1654 }
1655
1656 static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1657 {
1658 if (reserve)
1659 inode_sub_rsv_space(inode, number);
1660 else
1661 inode_sub_bytes(inode, number);
1662 }
1663
1664 /*
1665 * This functions updates i_blocks+i_bytes fields and quota information
1666 * (together with appropriate checks).
1667 *
1668 * NOTE: We absolutely rely on the fact that caller dirties the inode
1669 * (usually helpers in quotaops.h care about this) and holds a handle for
1670 * the current transaction so that dquot write and inode write go into the
1671 * same transaction.
1672 */
1673
1674 /*
1675 * This operation can block, but only after everything is updated
1676 */
1677 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1678 {
1679 int cnt, ret = 0, index;
1680 struct dquot_warn warn[MAXQUOTAS];
1681 int reserve = flags & DQUOT_SPACE_RESERVE;
1682 struct dquot **dquots;
1683
1684 if (!dquot_active(inode)) {
1685 inode_incr_space(inode, number, reserve);
1686 goto out;
1687 }
1688
1689 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1690 warn[cnt].w_type = QUOTA_NL_NOWARN;
1691
1692 dquots = i_dquot(inode);
1693 index = srcu_read_lock(&dquot_srcu);
1694 spin_lock(&dq_data_lock);
1695 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1696 if (!dquots[cnt])
1697 continue;
1698 ret = check_bdq(dquots[cnt], number,
1699 !(flags & DQUOT_SPACE_WARN), &warn[cnt]);
1700 if (ret && !(flags & DQUOT_SPACE_NOFAIL)) {
1701 spin_unlock(&dq_data_lock);
1702 goto out_flush_warn;
1703 }
1704 }
1705 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1706 if (!dquots[cnt])
1707 continue;
1708 if (reserve)
1709 dquot_resv_space(dquots[cnt], number);
1710 else
1711 dquot_incr_space(dquots[cnt], number);
1712 }
1713 inode_incr_space(inode, number, reserve);
1714 spin_unlock(&dq_data_lock);
1715
1716 if (reserve)
1717 goto out_flush_warn;
1718 mark_all_dquot_dirty(dquots);
1719 out_flush_warn:
1720 srcu_read_unlock(&dquot_srcu, index);
1721 flush_warnings(warn);
1722 out:
1723 return ret;
1724 }
1725 EXPORT_SYMBOL(__dquot_alloc_space);
1726
1727 /*
1728 * This operation can block, but only after everything is updated
1729 */
1730 int dquot_alloc_inode(struct inode *inode)
1731 {
1732 int cnt, ret = 0, index;
1733 struct dquot_warn warn[MAXQUOTAS];
1734 struct dquot * const *dquots;
1735
1736 if (!dquot_active(inode))
1737 return 0;
1738 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1739 warn[cnt].w_type = QUOTA_NL_NOWARN;
1740
1741 dquots = i_dquot(inode);
1742 index = srcu_read_lock(&dquot_srcu);
1743 spin_lock(&dq_data_lock);
1744 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1745 if (!dquots[cnt])
1746 continue;
1747 ret = check_idq(dquots[cnt], 1, &warn[cnt]);
1748 if (ret)
1749 goto warn_put_all;
1750 }
1751
1752 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1753 if (!dquots[cnt])
1754 continue;
1755 dquot_incr_inodes(dquots[cnt], 1);
1756 }
1757
1758 warn_put_all:
1759 spin_unlock(&dq_data_lock);
1760 if (ret == 0)
1761 mark_all_dquot_dirty(dquots);
1762 srcu_read_unlock(&dquot_srcu, index);
1763 flush_warnings(warn);
1764 return ret;
1765 }
1766 EXPORT_SYMBOL(dquot_alloc_inode);
1767
1768 /*
1769 * Convert in-memory reserved quotas to real consumed quotas
1770 */
1771 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1772 {
1773 struct dquot **dquots;
1774 int cnt, index;
1775
1776 if (!dquot_active(inode)) {
1777 inode_claim_rsv_space(inode, number);
1778 return 0;
1779 }
1780
1781 dquots = i_dquot(inode);
1782 index = srcu_read_lock(&dquot_srcu);
1783 spin_lock(&dq_data_lock);
1784 /* Claim reserved quotas to allocated quotas */
1785 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1786 if (dquots[cnt])
1787 dquot_claim_reserved_space(dquots[cnt], number);
1788 }
1789 /* Update inode bytes */
1790 inode_claim_rsv_space(inode, number);
1791 spin_unlock(&dq_data_lock);
1792 mark_all_dquot_dirty(dquots);
1793 srcu_read_unlock(&dquot_srcu, index);
1794 return 0;
1795 }
1796 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1797
1798 /*
1799 * Convert allocated space back to in-memory reserved quotas
1800 */
1801 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1802 {
1803 struct dquot **dquots;
1804 int cnt, index;
1805
1806 if (!dquot_active(inode)) {
1807 inode_reclaim_rsv_space(inode, number);
1808 return;
1809 }
1810
1811 dquots = i_dquot(inode);
1812 index = srcu_read_lock(&dquot_srcu);
1813 spin_lock(&dq_data_lock);
1814 /* Claim reserved quotas to allocated quotas */
1815 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1816 if (dquots[cnt])
1817 dquot_reclaim_reserved_space(dquots[cnt], number);
1818 }
1819 /* Update inode bytes */
1820 inode_reclaim_rsv_space(inode, number);
1821 spin_unlock(&dq_data_lock);
1822 mark_all_dquot_dirty(dquots);
1823 srcu_read_unlock(&dquot_srcu, index);
1824 return;
1825 }
1826 EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1827
1828 /*
1829 * This operation can block, but only after everything is updated
1830 */
1831 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1832 {
1833 unsigned int cnt;
1834 struct dquot_warn warn[MAXQUOTAS];
1835 struct dquot **dquots;
1836 int reserve = flags & DQUOT_SPACE_RESERVE, index;
1837
1838 if (!dquot_active(inode)) {
1839 inode_decr_space(inode, number, reserve);
1840 return;
1841 }
1842
1843 dquots = i_dquot(inode);
1844 index = srcu_read_lock(&dquot_srcu);
1845 spin_lock(&dq_data_lock);
1846 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1847 int wtype;
1848
1849 warn[cnt].w_type = QUOTA_NL_NOWARN;
1850 if (!dquots[cnt])
1851 continue;
1852 wtype = info_bdq_free(dquots[cnt], number);
1853 if (wtype != QUOTA_NL_NOWARN)
1854 prepare_warning(&warn[cnt], dquots[cnt], wtype);
1855 if (reserve)
1856 dquot_free_reserved_space(dquots[cnt], number);
1857 else
1858 dquot_decr_space(dquots[cnt], number);
1859 }
1860 inode_decr_space(inode, number, reserve);
1861 spin_unlock(&dq_data_lock);
1862
1863 if (reserve)
1864 goto out_unlock;
1865 mark_all_dquot_dirty(dquots);
1866 out_unlock:
1867 srcu_read_unlock(&dquot_srcu, index);
1868 flush_warnings(warn);
1869 }
1870 EXPORT_SYMBOL(__dquot_free_space);
1871
1872 /*
1873 * This operation can block, but only after everything is updated
1874 */
1875 void dquot_free_inode(struct inode *inode)
1876 {
1877 unsigned int cnt;
1878 struct dquot_warn warn[MAXQUOTAS];
1879 struct dquot * const *dquots;
1880 int index;
1881
1882 if (!dquot_active(inode))
1883 return;
1884
1885 dquots = i_dquot(inode);
1886 index = srcu_read_lock(&dquot_srcu);
1887 spin_lock(&dq_data_lock);
1888 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1889 int wtype;
1890
1891 warn[cnt].w_type = QUOTA_NL_NOWARN;
1892 if (!dquots[cnt])
1893 continue;
1894 wtype = info_idq_free(dquots[cnt], 1);
1895 if (wtype != QUOTA_NL_NOWARN)
1896 prepare_warning(&warn[cnt], dquots[cnt], wtype);
1897 dquot_decr_inodes(dquots[cnt], 1);
1898 }
1899 spin_unlock(&dq_data_lock);
1900 mark_all_dquot_dirty(dquots);
1901 srcu_read_unlock(&dquot_srcu, index);
1902 flush_warnings(warn);
1903 }
1904 EXPORT_SYMBOL(dquot_free_inode);
1905
1906 /*
1907 * Transfer the number of inode and blocks from one diskquota to an other.
1908 * On success, dquot references in transfer_to are consumed and references
1909 * to original dquots that need to be released are placed there. On failure,
1910 * references are kept untouched.
1911 *
1912 * This operation can block, but only after everything is updated
1913 * A transaction must be started when entering this function.
1914 *
1915 * We are holding reference on transfer_from & transfer_to, no need to
1916 * protect them by srcu_read_lock().
1917 */
1918 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1919 {
1920 qsize_t space, cur_space;
1921 qsize_t rsv_space = 0;
1922 qsize_t inode_usage = 1;
1923 struct dquot *transfer_from[MAXQUOTAS] = {};
1924 int cnt, ret = 0;
1925 char is_valid[MAXQUOTAS] = {};
1926 struct dquot_warn warn_to[MAXQUOTAS];
1927 struct dquot_warn warn_from_inodes[MAXQUOTAS];
1928 struct dquot_warn warn_from_space[MAXQUOTAS];
1929
1930 if (IS_NOQUOTA(inode))
1931 return 0;
1932
1933 if (inode->i_sb->dq_op->get_inode_usage) {
1934 ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
1935 if (ret)
1936 return ret;
1937 }
1938
1939 /* Initialize the arrays */
1940 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1941 warn_to[cnt].w_type = QUOTA_NL_NOWARN;
1942 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
1943 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
1944 }
1945
1946 spin_lock(&dq_data_lock);
1947 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1948 spin_unlock(&dq_data_lock);
1949 return 0;
1950 }
1951 cur_space = inode_get_bytes(inode);
1952 rsv_space = inode_get_rsv_space(inode);
1953 space = cur_space + rsv_space;
1954 /* Build the transfer_from list and check the limits */
1955 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1956 /*
1957 * Skip changes for same uid or gid or for turned off quota-type.
1958 */
1959 if (!transfer_to[cnt])
1960 continue;
1961 /* Avoid races with quotaoff() */
1962 if (!sb_has_quota_active(inode->i_sb, cnt))
1963 continue;
1964 is_valid[cnt] = 1;
1965 transfer_from[cnt] = i_dquot(inode)[cnt];
1966 ret = check_idq(transfer_to[cnt], inode_usage, &warn_to[cnt]);
1967 if (ret)
1968 goto over_quota;
1969 ret = check_bdq(transfer_to[cnt], space, 0, &warn_to[cnt]);
1970 if (ret)
1971 goto over_quota;
1972 }
1973
1974 /*
1975 * Finally perform the needed transfer from transfer_from to transfer_to
1976 */
1977 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1978 if (!is_valid[cnt])
1979 continue;
1980 /* Due to IO error we might not have transfer_from[] structure */
1981 if (transfer_from[cnt]) {
1982 int wtype;
1983 wtype = info_idq_free(transfer_from[cnt], inode_usage);
1984 if (wtype != QUOTA_NL_NOWARN)
1985 prepare_warning(&warn_from_inodes[cnt],
1986 transfer_from[cnt], wtype);
1987 wtype = info_bdq_free(transfer_from[cnt], space);
1988 if (wtype != QUOTA_NL_NOWARN)
1989 prepare_warning(&warn_from_space[cnt],
1990 transfer_from[cnt], wtype);
1991 dquot_decr_inodes(transfer_from[cnt], inode_usage);
1992 dquot_decr_space(transfer_from[cnt], cur_space);
1993 dquot_free_reserved_space(transfer_from[cnt],
1994 rsv_space);
1995 }
1996
1997 dquot_incr_inodes(transfer_to[cnt], inode_usage);
1998 dquot_incr_space(transfer_to[cnt], cur_space);
1999 dquot_resv_space(transfer_to[cnt], rsv_space);
2000
2001 i_dquot(inode)[cnt] = transfer_to[cnt];
2002 }
2003 spin_unlock(&dq_data_lock);
2004
2005 mark_all_dquot_dirty(transfer_from);
2006 mark_all_dquot_dirty(transfer_to);
2007 flush_warnings(warn_to);
2008 flush_warnings(warn_from_inodes);
2009 flush_warnings(warn_from_space);
2010 /* Pass back references to put */
2011 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2012 if (is_valid[cnt])
2013 transfer_to[cnt] = transfer_from[cnt];
2014 return 0;
2015 over_quota:
2016 spin_unlock(&dq_data_lock);
2017 flush_warnings(warn_to);
2018 return ret;
2019 }
2020 EXPORT_SYMBOL(__dquot_transfer);
2021
2022 /* Wrapper for transferring ownership of an inode for uid/gid only
2023 * Called from FSXXX_setattr()
2024 */
2025 int dquot_transfer(struct inode *inode, struct iattr *iattr)
2026 {
2027 struct dquot *transfer_to[MAXQUOTAS] = {};
2028 struct dquot *dquot;
2029 struct super_block *sb = inode->i_sb;
2030 int ret;
2031
2032 if (!dquot_active(inode))
2033 return 0;
2034
2035 if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
2036 dquot = dqget(sb, make_kqid_uid(iattr->ia_uid));
2037 if (IS_ERR(dquot)) {
2038 if (PTR_ERR(dquot) != -ESRCH) {
2039 ret = PTR_ERR(dquot);
2040 goto out_put;
2041 }
2042 dquot = NULL;
2043 }
2044 transfer_to[USRQUOTA] = dquot;
2045 }
2046 if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)){
2047 dquot = dqget(sb, make_kqid_gid(iattr->ia_gid));
2048 if (IS_ERR(dquot)) {
2049 if (PTR_ERR(dquot) != -ESRCH) {
2050 ret = PTR_ERR(dquot);
2051 goto out_put;
2052 }
2053 dquot = NULL;
2054 }
2055 transfer_to[GRPQUOTA] = dquot;
2056 }
2057 ret = __dquot_transfer(inode, transfer_to);
2058 out_put:
2059 dqput_all(transfer_to);
2060 return ret;
2061 }
2062 EXPORT_SYMBOL(dquot_transfer);
2063
2064 /*
2065 * Write info of quota file to disk
2066 */
2067 int dquot_commit_info(struct super_block *sb, int type)
2068 {
2069 int ret;
2070 struct quota_info *dqopt = sb_dqopt(sb);
2071
2072 mutex_lock(&dqopt->dqio_mutex);
2073 ret = dqopt->ops[type]->write_file_info(sb, type);
2074 mutex_unlock(&dqopt->dqio_mutex);
2075 return ret;
2076 }
2077 EXPORT_SYMBOL(dquot_commit_info);
2078
2079 int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2080 {
2081 struct quota_info *dqopt = sb_dqopt(sb);
2082 int err;
2083
2084 if (!sb_has_quota_active(sb, qid->type))
2085 return -ESRCH;
2086 if (!dqopt->ops[qid->type]->get_next_id)
2087 return -ENOSYS;
2088 mutex_lock(&dqopt->dqio_mutex);
2089 err = dqopt->ops[qid->type]->get_next_id(sb, qid);
2090 mutex_unlock(&dqopt->dqio_mutex);
2091 return err;
2092 }
2093 EXPORT_SYMBOL(dquot_get_next_id);
2094
2095 /*
2096 * Definitions of diskquota operations.
2097 */
2098 const struct dquot_operations dquot_operations = {
2099 .write_dquot = dquot_commit,
2100 .acquire_dquot = dquot_acquire,
2101 .release_dquot = dquot_release,
2102 .mark_dirty = dquot_mark_dquot_dirty,
2103 .write_info = dquot_commit_info,
2104 .alloc_dquot = dquot_alloc,
2105 .destroy_dquot = dquot_destroy,
2106 .get_next_id = dquot_get_next_id,
2107 };
2108 EXPORT_SYMBOL(dquot_operations);
2109
2110 /*
2111 * Generic helper for ->open on filesystems supporting disk quotas.
2112 */
2113 int dquot_file_open(struct inode *inode, struct file *file)
2114 {
2115 int error;
2116
2117 error = generic_file_open(inode, file);
2118 if (!error && (file->f_mode & FMODE_WRITE))
2119 dquot_initialize(inode);
2120 return error;
2121 }
2122 EXPORT_SYMBOL(dquot_file_open);
2123
2124 /*
2125 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2126 */
2127 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2128 {
2129 int cnt, ret = 0;
2130 struct quota_info *dqopt = sb_dqopt(sb);
2131 struct inode *toputinode[MAXQUOTAS];
2132
2133 /* s_umount should be held in exclusive mode */
2134 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2135 up_read(&sb->s_umount);
2136
2137 /* Cannot turn off usage accounting without turning off limits, or
2138 * suspend quotas and simultaneously turn quotas off. */
2139 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2140 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2141 DQUOT_USAGE_ENABLED)))
2142 return -EINVAL;
2143
2144 /*
2145 * Skip everything if there's nothing to do. We have to do this because
2146 * sometimes we are called when fill_super() failed and calling
2147 * sync_fs() in such cases does no good.
2148 */
2149 if (!sb_any_quota_loaded(sb))
2150 return 0;
2151
2152 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2153 toputinode[cnt] = NULL;
2154 if (type != -1 && cnt != type)
2155 continue;
2156 if (!sb_has_quota_loaded(sb, cnt))
2157 continue;
2158
2159 if (flags & DQUOT_SUSPENDED) {
2160 spin_lock(&dq_state_lock);
2161 dqopt->flags |=
2162 dquot_state_flag(DQUOT_SUSPENDED, cnt);
2163 spin_unlock(&dq_state_lock);
2164 } else {
2165 spin_lock(&dq_state_lock);
2166 dqopt->flags &= ~dquot_state_flag(flags, cnt);
2167 /* Turning off suspended quotas? */
2168 if (!sb_has_quota_loaded(sb, cnt) &&
2169 sb_has_quota_suspended(sb, cnt)) {
2170 dqopt->flags &= ~dquot_state_flag(
2171 DQUOT_SUSPENDED, cnt);
2172 spin_unlock(&dq_state_lock);
2173 iput(dqopt->files[cnt]);
2174 dqopt->files[cnt] = NULL;
2175 continue;
2176 }
2177 spin_unlock(&dq_state_lock);
2178 }
2179
2180 /* We still have to keep quota loaded? */
2181 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2182 continue;
2183
2184 /* Note: these are blocking operations */
2185 drop_dquot_ref(sb, cnt);
2186 invalidate_dquots(sb, cnt);
2187 /*
2188 * Now all dquots should be invalidated, all writes done so we
2189 * should be only users of the info. No locks needed.
2190 */
2191 if (info_dirty(&dqopt->info[cnt]))
2192 sb->dq_op->write_info(sb, cnt);
2193 if (dqopt->ops[cnt]->free_file_info)
2194 dqopt->ops[cnt]->free_file_info(sb, cnt);
2195 put_quota_format(dqopt->info[cnt].dqi_format);
2196
2197 toputinode[cnt] = dqopt->files[cnt];
2198 if (!sb_has_quota_loaded(sb, cnt))
2199 dqopt->files[cnt] = NULL;
2200 dqopt->info[cnt].dqi_flags = 0;
2201 dqopt->info[cnt].dqi_igrace = 0;
2202 dqopt->info[cnt].dqi_bgrace = 0;
2203 dqopt->ops[cnt] = NULL;
2204 }
2205
2206 /* Skip syncing and setting flags if quota files are hidden */
2207 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2208 goto put_inodes;
2209
2210 /* Sync the superblock so that buffers with quota data are written to
2211 * disk (and so userspace sees correct data afterwards). */
2212 if (sb->s_op->sync_fs)
2213 sb->s_op->sync_fs(sb, 1);
2214 sync_blockdev(sb->s_bdev);
2215 /* Now the quota files are just ordinary files and we can set the
2216 * inode flags back. Moreover we discard the pagecache so that
2217 * userspace sees the writes we did bypassing the pagecache. We
2218 * must also discard the blockdev buffers so that we see the
2219 * changes done by userspace on the next quotaon() */
2220 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2221 /* This can happen when suspending quotas on remount-ro... */
2222 if (toputinode[cnt] && !sb_has_quota_loaded(sb, cnt)) {
2223 inode_lock(toputinode[cnt]);
2224 toputinode[cnt]->i_flags &= ~S_NOQUOTA;
2225 truncate_inode_pages(&toputinode[cnt]->i_data, 0);
2226 inode_unlock(toputinode[cnt]);
2227 mark_inode_dirty_sync(toputinode[cnt]);
2228 }
2229 if (sb->s_bdev)
2230 invalidate_bdev(sb->s_bdev);
2231 put_inodes:
2232 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2233 if (toputinode[cnt]) {
2234 /* On remount RO, we keep the inode pointer so that we
2235 * can reenable quota on the subsequent remount RW. We
2236 * have to check 'flags' variable and not use sb_has_
2237 * function because another quotaon / quotaoff could
2238 * change global state before we got here. We refuse
2239 * to suspend quotas when there is pending delete on
2240 * the quota file... */
2241 if (!(flags & DQUOT_SUSPENDED))
2242 iput(toputinode[cnt]);
2243 else if (!toputinode[cnt]->i_nlink)
2244 ret = -EBUSY;
2245 }
2246 return ret;
2247 }
2248 EXPORT_SYMBOL(dquot_disable);
2249
2250 int dquot_quota_off(struct super_block *sb, int type)
2251 {
2252 return dquot_disable(sb, type,
2253 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2254 }
2255 EXPORT_SYMBOL(dquot_quota_off);
2256
2257 /*
2258 * Turn quotas on on a device
2259 */
2260
2261 /*
2262 * Helper function to turn quotas on when we already have the inode of
2263 * quota file and no quota information is loaded.
2264 */
2265 static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2266 unsigned int flags)
2267 {
2268 struct quota_format_type *fmt = find_quota_format(format_id);
2269 struct super_block *sb = inode->i_sb;
2270 struct quota_info *dqopt = sb_dqopt(sb);
2271 int error;
2272
2273 if (!fmt)
2274 return -ESRCH;
2275 if (!S_ISREG(inode->i_mode)) {
2276 error = -EACCES;
2277 goto out_fmt;
2278 }
2279 if (IS_RDONLY(inode)) {
2280 error = -EROFS;
2281 goto out_fmt;
2282 }
2283 if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
2284 (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
2285 error = -EINVAL;
2286 goto out_fmt;
2287 }
2288 /* Filesystems outside of init_user_ns not yet supported */
2289 if (sb->s_user_ns != &init_user_ns) {
2290 error = -EINVAL;
2291 goto out_fmt;
2292 }
2293 /* Usage always has to be set... */
2294 if (!(flags & DQUOT_USAGE_ENABLED)) {
2295 error = -EINVAL;
2296 goto out_fmt;
2297 }
2298 if (sb_has_quota_loaded(sb, type)) {
2299 error = -EBUSY;
2300 goto out_fmt;
2301 }
2302
2303 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2304 /* As we bypass the pagecache we must now flush all the
2305 * dirty data and invalidate caches so that kernel sees
2306 * changes from userspace. It is not enough to just flush
2307 * the quota file since if blocksize < pagesize, invalidation
2308 * of the cache could fail because of other unrelated dirty
2309 * data */
2310 sync_filesystem(sb);
2311 invalidate_bdev(sb->s_bdev);
2312 }
2313
2314 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2315 /* We don't want quota and atime on quota files (deadlocks
2316 * possible) Also nobody should write to the file - we use
2317 * special IO operations which ignore the immutable bit. */
2318 inode_lock(inode);
2319 inode->i_flags |= S_NOQUOTA;
2320 inode_unlock(inode);
2321 /*
2322 * When S_NOQUOTA is set, remove dquot references as no more
2323 * references can be added
2324 */
2325 __dquot_drop(inode);
2326 }
2327
2328 error = -EIO;
2329 dqopt->files[type] = igrab(inode);
2330 if (!dqopt->files[type])
2331 goto out_file_flags;
2332 error = -EINVAL;
2333 if (!fmt->qf_ops->check_quota_file(sb, type))
2334 goto out_file_init;
2335
2336 dqopt->ops[type] = fmt->qf_ops;
2337 dqopt->info[type].dqi_format = fmt;
2338 dqopt->info[type].dqi_fmt_id = format_id;
2339 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2340 mutex_lock(&dqopt->dqio_mutex);
2341 error = dqopt->ops[type]->read_file_info(sb, type);
2342 if (error < 0) {
2343 mutex_unlock(&dqopt->dqio_mutex);
2344 goto out_file_init;
2345 }
2346 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2347 dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2348 mutex_unlock(&dqopt->dqio_mutex);
2349 spin_lock(&dq_state_lock);
2350 dqopt->flags |= dquot_state_flag(flags, type);
2351 spin_unlock(&dq_state_lock);
2352
2353 add_dquot_ref(sb, type);
2354
2355 return 0;
2356
2357 out_file_init:
2358 dqopt->files[type] = NULL;
2359 iput(inode);
2360 out_file_flags:
2361 inode_lock(inode);
2362 inode->i_flags &= ~S_NOQUOTA;
2363 inode_unlock(inode);
2364 out_fmt:
2365 put_quota_format(fmt);
2366
2367 return error;
2368 }
2369
2370 /* Reenable quotas on remount RW */
2371 int dquot_resume(struct super_block *sb, int type)
2372 {
2373 struct quota_info *dqopt = sb_dqopt(sb);
2374 struct inode *inode;
2375 int ret = 0, cnt;
2376 unsigned int flags;
2377
2378 /* s_umount should be held in exclusive mode */
2379 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2380 up_read(&sb->s_umount);
2381
2382 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2383 if (type != -1 && cnt != type)
2384 continue;
2385 if (!sb_has_quota_suspended(sb, cnt))
2386 continue;
2387
2388 inode = dqopt->files[cnt];
2389 dqopt->files[cnt] = NULL;
2390 spin_lock(&dq_state_lock);
2391 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2392 DQUOT_LIMITS_ENABLED,
2393 cnt);
2394 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2395 spin_unlock(&dq_state_lock);
2396
2397 flags = dquot_generic_flag(flags, cnt);
2398 ret = vfs_load_quota_inode(inode, cnt,
2399 dqopt->info[cnt].dqi_fmt_id, flags);
2400 iput(inode);
2401 }
2402
2403 return ret;
2404 }
2405 EXPORT_SYMBOL(dquot_resume);
2406
2407 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2408 const struct path *path)
2409 {
2410 int error = security_quota_on(path->dentry);
2411 if (error)
2412 return error;
2413 /* Quota file not on the same filesystem? */
2414 if (path->dentry->d_sb != sb)
2415 error = -EXDEV;
2416 else
2417 error = vfs_load_quota_inode(d_inode(path->dentry), type,
2418 format_id, DQUOT_USAGE_ENABLED |
2419 DQUOT_LIMITS_ENABLED);
2420 return error;
2421 }
2422 EXPORT_SYMBOL(dquot_quota_on);
2423
2424 /*
2425 * More powerful function for turning on quotas allowing setting
2426 * of individual quota flags
2427 */
2428 int dquot_enable(struct inode *inode, int type, int format_id,
2429 unsigned int flags)
2430 {
2431 struct super_block *sb = inode->i_sb;
2432
2433 /* Just unsuspend quotas? */
2434 BUG_ON(flags & DQUOT_SUSPENDED);
2435 /* s_umount should be held in exclusive mode */
2436 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2437 up_read(&sb->s_umount);
2438
2439 if (!flags)
2440 return 0;
2441 /* Just updating flags needed? */
2442 if (sb_has_quota_loaded(sb, type)) {
2443 if (flags & DQUOT_USAGE_ENABLED &&
2444 sb_has_quota_usage_enabled(sb, type))
2445 return -EBUSY;
2446 if (flags & DQUOT_LIMITS_ENABLED &&
2447 sb_has_quota_limits_enabled(sb, type))
2448 return -EBUSY;
2449 spin_lock(&dq_state_lock);
2450 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
2451 spin_unlock(&dq_state_lock);
2452 return 0;
2453 }
2454
2455 return vfs_load_quota_inode(inode, type, format_id, flags);
2456 }
2457 EXPORT_SYMBOL(dquot_enable);
2458
2459 /*
2460 * This function is used when filesystem needs to initialize quotas
2461 * during mount time.
2462 */
2463 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2464 int format_id, int type)
2465 {
2466 struct dentry *dentry;
2467 int error;
2468
2469 dentry = lookup_one_len_unlocked(qf_name, sb->s_root, strlen(qf_name));
2470 if (IS_ERR(dentry))
2471 return PTR_ERR(dentry);
2472
2473 if (d_really_is_negative(dentry)) {
2474 error = -ENOENT;
2475 goto out;
2476 }
2477
2478 error = security_quota_on(dentry);
2479 if (!error)
2480 error = vfs_load_quota_inode(d_inode(dentry), type, format_id,
2481 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2482
2483 out:
2484 dput(dentry);
2485 return error;
2486 }
2487 EXPORT_SYMBOL(dquot_quota_on_mount);
2488
2489 static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
2490 {
2491 int ret;
2492 int type;
2493 struct quota_info *dqopt = sb_dqopt(sb);
2494
2495 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2496 return -ENOSYS;
2497 /* Accounting cannot be turned on while fs is mounted */
2498 flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
2499 if (!flags)
2500 return -EINVAL;
2501 for (type = 0; type < MAXQUOTAS; type++) {
2502 if (!(flags & qtype_enforce_flag(type)))
2503 continue;
2504 /* Can't enforce without accounting */
2505 if (!sb_has_quota_usage_enabled(sb, type))
2506 return -EINVAL;
2507 ret = dquot_enable(dqopt->files[type], type,
2508 dqopt->info[type].dqi_fmt_id,
2509 DQUOT_LIMITS_ENABLED);
2510 if (ret < 0)
2511 goto out_err;
2512 }
2513 return 0;
2514 out_err:
2515 /* Backout enforcement enablement we already did */
2516 for (type--; type >= 0; type--) {
2517 if (flags & qtype_enforce_flag(type))
2518 dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2519 }
2520 /* Error code translation for better compatibility with XFS */
2521 if (ret == -EBUSY)
2522 ret = -EEXIST;
2523 return ret;
2524 }
2525
2526 static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
2527 {
2528 int ret;
2529 int type;
2530 struct quota_info *dqopt = sb_dqopt(sb);
2531
2532 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2533 return -ENOSYS;
2534 /*
2535 * We don't support turning off accounting via quotactl. In principle
2536 * quota infrastructure can do this but filesystems don't expect
2537 * userspace to be able to do it.
2538 */
2539 if (flags &
2540 (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
2541 return -EOPNOTSUPP;
2542
2543 /* Filter out limits not enabled */
2544 for (type = 0; type < MAXQUOTAS; type++)
2545 if (!sb_has_quota_limits_enabled(sb, type))
2546 flags &= ~qtype_enforce_flag(type);
2547 /* Nothing left? */
2548 if (!flags)
2549 return -EEXIST;
2550 for (type = 0; type < MAXQUOTAS; type++) {
2551 if (flags & qtype_enforce_flag(type)) {
2552 ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2553 if (ret < 0)
2554 goto out_err;
2555 }
2556 }
2557 return 0;
2558 out_err:
2559 /* Backout enforcement disabling we already did */
2560 for (type--; type >= 0; type--) {
2561 if (flags & qtype_enforce_flag(type))
2562 dquot_enable(dqopt->files[type], type,
2563 dqopt->info[type].dqi_fmt_id,
2564 DQUOT_LIMITS_ENABLED);
2565 }
2566 return ret;
2567 }
2568
2569 /* Generic routine for getting common part of quota structure */
2570 static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2571 {
2572 struct mem_dqblk *dm = &dquot->dq_dqb;
2573
2574 memset(di, 0, sizeof(*di));
2575 spin_lock(&dq_data_lock);
2576 di->d_spc_hardlimit = dm->dqb_bhardlimit;
2577 di->d_spc_softlimit = dm->dqb_bsoftlimit;
2578 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2579 di->d_ino_softlimit = dm->dqb_isoftlimit;
2580 di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2581 di->d_ino_count = dm->dqb_curinodes;
2582 di->d_spc_timer = dm->dqb_btime;
2583 di->d_ino_timer = dm->dqb_itime;
2584 spin_unlock(&dq_data_lock);
2585 }
2586
2587 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2588 struct qc_dqblk *di)
2589 {
2590 struct dquot *dquot;
2591
2592 dquot = dqget(sb, qid);
2593 if (IS_ERR(dquot))
2594 return PTR_ERR(dquot);
2595 do_get_dqblk(dquot, di);
2596 dqput(dquot);
2597
2598 return 0;
2599 }
2600 EXPORT_SYMBOL(dquot_get_dqblk);
2601
2602 int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
2603 struct qc_dqblk *di)
2604 {
2605 struct dquot *dquot;
2606 int err;
2607
2608 if (!sb->dq_op->get_next_id)
2609 return -ENOSYS;
2610 err = sb->dq_op->get_next_id(sb, qid);
2611 if (err < 0)
2612 return err;
2613 dquot = dqget(sb, *qid);
2614 if (IS_ERR(dquot))
2615 return PTR_ERR(dquot);
2616 do_get_dqblk(dquot, di);
2617 dqput(dquot);
2618
2619 return 0;
2620 }
2621 EXPORT_SYMBOL(dquot_get_next_dqblk);
2622
2623 #define VFS_QC_MASK \
2624 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2625 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2626 QC_SPC_TIMER | QC_INO_TIMER)
2627
2628 /* Generic routine for setting common part of quota structure */
2629 static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2630 {
2631 struct mem_dqblk *dm = &dquot->dq_dqb;
2632 int check_blim = 0, check_ilim = 0;
2633 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2634
2635 if (di->d_fieldmask & ~VFS_QC_MASK)
2636 return -EINVAL;
2637
2638 if (((di->d_fieldmask & QC_SPC_SOFT) &&
2639 di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
2640 ((di->d_fieldmask & QC_SPC_HARD) &&
2641 di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
2642 ((di->d_fieldmask & QC_INO_SOFT) &&
2643 (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
2644 ((di->d_fieldmask & QC_INO_HARD) &&
2645 (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
2646 return -ERANGE;
2647
2648 spin_lock(&dq_data_lock);
2649 if (di->d_fieldmask & QC_SPACE) {
2650 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2651 check_blim = 1;
2652 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2653 }
2654
2655 if (di->d_fieldmask & QC_SPC_SOFT)
2656 dm->dqb_bsoftlimit = di->d_spc_softlimit;
2657 if (di->d_fieldmask & QC_SPC_HARD)
2658 dm->dqb_bhardlimit = di->d_spc_hardlimit;
2659 if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2660 check_blim = 1;
2661 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2662 }
2663
2664 if (di->d_fieldmask & QC_INO_COUNT) {
2665 dm->dqb_curinodes = di->d_ino_count;
2666 check_ilim = 1;
2667 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2668 }
2669
2670 if (di->d_fieldmask & QC_INO_SOFT)
2671 dm->dqb_isoftlimit = di->d_ino_softlimit;
2672 if (di->d_fieldmask & QC_INO_HARD)
2673 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2674 if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2675 check_ilim = 1;
2676 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2677 }
2678
2679 if (di->d_fieldmask & QC_SPC_TIMER) {
2680 dm->dqb_btime = di->d_spc_timer;
2681 check_blim = 1;
2682 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2683 }
2684
2685 if (di->d_fieldmask & QC_INO_TIMER) {
2686 dm->dqb_itime = di->d_ino_timer;
2687 check_ilim = 1;
2688 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2689 }
2690
2691 if (check_blim) {
2692 if (!dm->dqb_bsoftlimit ||
2693 dm->dqb_curspace + dm->dqb_rsvspace < dm->dqb_bsoftlimit) {
2694 dm->dqb_btime = 0;
2695 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2696 } else if (!(di->d_fieldmask & QC_SPC_TIMER))
2697 /* Set grace only if user hasn't provided his own... */
2698 dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
2699 }
2700 if (check_ilim) {
2701 if (!dm->dqb_isoftlimit ||
2702 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2703 dm->dqb_itime = 0;
2704 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2705 } else if (!(di->d_fieldmask & QC_INO_TIMER))
2706 /* Set grace only if user hasn't provided his own... */
2707 dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
2708 }
2709 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2710 dm->dqb_isoftlimit)
2711 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2712 else
2713 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2714 spin_unlock(&dq_data_lock);
2715 mark_dquot_dirty(dquot);
2716
2717 return 0;
2718 }
2719
2720 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2721 struct qc_dqblk *di)
2722 {
2723 struct dquot *dquot;
2724 int rc;
2725
2726 dquot = dqget(sb, qid);
2727 if (IS_ERR(dquot)) {
2728 rc = PTR_ERR(dquot);
2729 goto out;
2730 }
2731 rc = do_set_dqblk(dquot, di);
2732 dqput(dquot);
2733 out:
2734 return rc;
2735 }
2736 EXPORT_SYMBOL(dquot_set_dqblk);
2737
2738 /* Generic routine for getting common part of quota file information */
2739 int dquot_get_state(struct super_block *sb, struct qc_state *state)
2740 {
2741 struct mem_dqinfo *mi;
2742 struct qc_type_state *tstate;
2743 struct quota_info *dqopt = sb_dqopt(sb);
2744 int type;
2745
2746 memset(state, 0, sizeof(*state));
2747 for (type = 0; type < MAXQUOTAS; type++) {
2748 if (!sb_has_quota_active(sb, type))
2749 continue;
2750 tstate = state->s_state + type;
2751 mi = sb_dqopt(sb)->info + type;
2752 tstate->flags = QCI_ACCT_ENABLED;
2753 spin_lock(&dq_data_lock);
2754 if (mi->dqi_flags & DQF_SYS_FILE)
2755 tstate->flags |= QCI_SYSFILE;
2756 if (mi->dqi_flags & DQF_ROOT_SQUASH)
2757 tstate->flags |= QCI_ROOT_SQUASH;
2758 if (sb_has_quota_limits_enabled(sb, type))
2759 tstate->flags |= QCI_LIMITS_ENFORCED;
2760 tstate->spc_timelimit = mi->dqi_bgrace;
2761 tstate->ino_timelimit = mi->dqi_igrace;
2762 tstate->ino = dqopt->files[type]->i_ino;
2763 tstate->blocks = dqopt->files[type]->i_blocks;
2764 tstate->nextents = 1; /* We don't know... */
2765 spin_unlock(&dq_data_lock);
2766 }
2767 return 0;
2768 }
2769 EXPORT_SYMBOL(dquot_get_state);
2770
2771 /* Generic routine for setting common part of quota file information */
2772 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
2773 {
2774 struct mem_dqinfo *mi;
2775 int err = 0;
2776
2777 if ((ii->i_fieldmask & QC_WARNS_MASK) ||
2778 (ii->i_fieldmask & QC_RT_SPC_TIMER))
2779 return -EINVAL;
2780 if (!sb_has_quota_active(sb, type))
2781 return -ESRCH;
2782 mi = sb_dqopt(sb)->info + type;
2783 if (ii->i_fieldmask & QC_FLAGS) {
2784 if ((ii->i_flags & QCI_ROOT_SQUASH &&
2785 mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
2786 return -EINVAL;
2787 }
2788 spin_lock(&dq_data_lock);
2789 if (ii->i_fieldmask & QC_SPC_TIMER)
2790 mi->dqi_bgrace = ii->i_spc_timelimit;
2791 if (ii->i_fieldmask & QC_INO_TIMER)
2792 mi->dqi_igrace = ii->i_ino_timelimit;
2793 if (ii->i_fieldmask & QC_FLAGS) {
2794 if (ii->i_flags & QCI_ROOT_SQUASH)
2795 mi->dqi_flags |= DQF_ROOT_SQUASH;
2796 else
2797 mi->dqi_flags &= ~DQF_ROOT_SQUASH;
2798 }
2799 spin_unlock(&dq_data_lock);
2800 mark_info_dirty(sb, type);
2801 /* Force write to disk */
2802 sb->dq_op->write_info(sb, type);
2803 return err;
2804 }
2805 EXPORT_SYMBOL(dquot_set_dqinfo);
2806
2807 const struct quotactl_ops dquot_quotactl_sysfile_ops = {
2808 .quota_enable = dquot_quota_enable,
2809 .quota_disable = dquot_quota_disable,
2810 .quota_sync = dquot_quota_sync,
2811 .get_state = dquot_get_state,
2812 .set_info = dquot_set_dqinfo,
2813 .get_dqblk = dquot_get_dqblk,
2814 .get_nextdqblk = dquot_get_next_dqblk,
2815 .set_dqblk = dquot_set_dqblk
2816 };
2817 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
2818
2819 static int do_proc_dqstats(struct ctl_table *table, int write,
2820 void __user *buffer, size_t *lenp, loff_t *ppos)
2821 {
2822 unsigned int type = (int *)table->data - dqstats.stat;
2823
2824 /* Update global table */
2825 dqstats.stat[type] =
2826 percpu_counter_sum_positive(&dqstats.counter[type]);
2827 return proc_dointvec(table, write, buffer, lenp, ppos);
2828 }
2829
2830 static struct ctl_table fs_dqstats_table[] = {
2831 {
2832 .procname = "lookups",
2833 .data = &dqstats.stat[DQST_LOOKUPS],
2834 .maxlen = sizeof(int),
2835 .mode = 0444,
2836 .proc_handler = do_proc_dqstats,
2837 },
2838 {
2839 .procname = "drops",
2840 .data = &dqstats.stat[DQST_DROPS],
2841 .maxlen = sizeof(int),
2842 .mode = 0444,
2843 .proc_handler = do_proc_dqstats,
2844 },
2845 {
2846 .procname = "reads",
2847 .data = &dqstats.stat[DQST_READS],
2848 .maxlen = sizeof(int),
2849 .mode = 0444,
2850 .proc_handler = do_proc_dqstats,
2851 },
2852 {
2853 .procname = "writes",
2854 .data = &dqstats.stat[DQST_WRITES],
2855 .maxlen = sizeof(int),
2856 .mode = 0444,
2857 .proc_handler = do_proc_dqstats,
2858 },
2859 {
2860 .procname = "cache_hits",
2861 .data = &dqstats.stat[DQST_CACHE_HITS],
2862 .maxlen = sizeof(int),
2863 .mode = 0444,
2864 .proc_handler = do_proc_dqstats,
2865 },
2866 {
2867 .procname = "allocated_dquots",
2868 .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
2869 .maxlen = sizeof(int),
2870 .mode = 0444,
2871 .proc_handler = do_proc_dqstats,
2872 },
2873 {
2874 .procname = "free_dquots",
2875 .data = &dqstats.stat[DQST_FREE_DQUOTS],
2876 .maxlen = sizeof(int),
2877 .mode = 0444,
2878 .proc_handler = do_proc_dqstats,
2879 },
2880 {
2881 .procname = "syncs",
2882 .data = &dqstats.stat[DQST_SYNCS],
2883 .maxlen = sizeof(int),
2884 .mode = 0444,
2885 .proc_handler = do_proc_dqstats,
2886 },
2887 #ifdef CONFIG_PRINT_QUOTA_WARNING
2888 {
2889 .procname = "warnings",
2890 .data = &flag_print_warnings,
2891 .maxlen = sizeof(int),
2892 .mode = 0644,
2893 .proc_handler = proc_dointvec,
2894 },
2895 #endif
2896 { },
2897 };
2898
2899 static struct ctl_table fs_table[] = {
2900 {
2901 .procname = "quota",
2902 .mode = 0555,
2903 .child = fs_dqstats_table,
2904 },
2905 { },
2906 };
2907
2908 static struct ctl_table sys_table[] = {
2909 {
2910 .procname = "fs",
2911 .mode = 0555,
2912 .child = fs_table,
2913 },
2914 { },
2915 };
2916
2917 static int __init dquot_init(void)
2918 {
2919 int i, ret;
2920 unsigned long nr_hash, order;
2921
2922 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2923
2924 register_sysctl_table(sys_table);
2925
2926 dquot_cachep = kmem_cache_create("dquot",
2927 sizeof(struct dquot), sizeof(unsigned long) * 4,
2928 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2929 SLAB_MEM_SPREAD|SLAB_PANIC),
2930 NULL);
2931
2932 order = 0;
2933 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
2934 if (!dquot_hash)
2935 panic("Cannot create dquot hash table");
2936
2937 for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
2938 ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
2939 if (ret)
2940 panic("Cannot create dquot stat counters");
2941 }
2942
2943 /* Find power-of-two hlist_heads which can fit into allocation */
2944 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2945 dq_hash_bits = 0;
2946 do {
2947 dq_hash_bits++;
2948 } while (nr_hash >> dq_hash_bits);
2949 dq_hash_bits--;
2950
2951 nr_hash = 1UL << dq_hash_bits;
2952 dq_hash_mask = nr_hash - 1;
2953 for (i = 0; i < nr_hash; i++)
2954 INIT_HLIST_HEAD(dquot_hash + i);
2955
2956 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
2957 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
2958
2959 register_shrinker(&dqcache_shrinker);
2960
2961 return 0;
2962 }
2963 fs_initcall(dquot_init);