1 // SPDX-License-Identifier: GPL-2.0
3 * Quota code necessary even when VFS quota support is not compiled
4 * into the kernel. The interesting stuff is over in dquot.c, here
5 * we have symbols for initial quotactl(2) handling, the sysctl(2)
6 * variables, etc - things needed even when quota support disabled.
10 #include <linux/namei.h>
11 #include <linux/slab.h>
12 #include <asm/current.h>
13 #include <linux/uaccess.h>
14 #include <linux/kernel.h>
15 #include <linux/security.h>
16 #include <linux/syscalls.h>
17 #include <linux/capability.h>
18 #include <linux/quotaops.h>
19 #include <linux/types.h>
20 #include <linux/mount.h>
21 #include <linux/writeback.h>
22 #include <linux/nospec.h>
24 #include "../internal.h"
26 static int check_quotactl_permission(struct super_block
*sb
, int type
, int cmd
,
30 /* these commands do not require any special privilegues */
38 /* allow to query information for dquots we "own" */
41 if ((type
== USRQUOTA
&& uid_eq(current_euid(), make_kuid(current_user_ns(), id
))) ||
42 (type
== GRPQUOTA
&& in_egroup_p(make_kgid(current_user_ns(), id
))))
46 if (!capable(CAP_SYS_ADMIN
))
50 return security_quotactl(cmd
, type
, id
, sb
);
53 static void quota_sync_one(struct super_block
*sb
, void *arg
)
55 int type
= *(int *)arg
;
57 if (sb
->s_qcop
&& sb
->s_qcop
->quota_sync
&&
58 (sb
->s_quota_types
& (1 << type
)))
59 sb
->s_qcop
->quota_sync(sb
, type
);
62 static int quota_sync_all(int type
)
66 ret
= security_quotactl(Q_SYNC
, type
, 0, NULL
);
68 iterate_supers(quota_sync_one
, &type
);
72 unsigned int qtype_enforce_flag(int type
)
76 return FS_QUOTA_UDQ_ENFD
;
78 return FS_QUOTA_GDQ_ENFD
;
80 return FS_QUOTA_PDQ_ENFD
;
85 static int quota_quotaon(struct super_block
*sb
, int type
, qid_t id
,
86 const struct path
*path
)
88 if (!sb
->s_qcop
->quota_on
&& !sb
->s_qcop
->quota_enable
)
90 if (sb
->s_qcop
->quota_enable
)
91 return sb
->s_qcop
->quota_enable(sb
, qtype_enforce_flag(type
));
94 return sb
->s_qcop
->quota_on(sb
, type
, id
, path
);
97 static int quota_quotaoff(struct super_block
*sb
, int type
)
99 if (!sb
->s_qcop
->quota_off
&& !sb
->s_qcop
->quota_disable
)
101 if (sb
->s_qcop
->quota_disable
)
102 return sb
->s_qcop
->quota_disable(sb
, qtype_enforce_flag(type
));
103 return sb
->s_qcop
->quota_off(sb
, type
);
106 static int quota_getfmt(struct super_block
*sb
, int type
, void __user
*addr
)
110 if (!sb_has_quota_active(sb
, type
))
112 fmt
= sb_dqopt(sb
)->info
[type
].dqi_format
->qf_fmt_id
;
113 if (copy_to_user(addr
, &fmt
, sizeof(fmt
)))
118 static int quota_getinfo(struct super_block
*sb
, int type
, void __user
*addr
)
120 struct qc_state state
;
121 struct qc_type_state
*tstate
;
122 struct if_dqinfo uinfo
;
125 if (!sb
->s_qcop
->get_state
)
127 ret
= sb
->s_qcop
->get_state(sb
, &state
);
130 tstate
= state
.s_state
+ type
;
131 if (!(tstate
->flags
& QCI_ACCT_ENABLED
))
133 memset(&uinfo
, 0, sizeof(uinfo
));
134 uinfo
.dqi_bgrace
= tstate
->spc_timelimit
;
135 uinfo
.dqi_igrace
= tstate
->ino_timelimit
;
136 if (tstate
->flags
& QCI_SYSFILE
)
137 uinfo
.dqi_flags
|= DQF_SYS_FILE
;
138 if (tstate
->flags
& QCI_ROOT_SQUASH
)
139 uinfo
.dqi_flags
|= DQF_ROOT_SQUASH
;
140 uinfo
.dqi_valid
= IIF_ALL
;
141 if (copy_to_user(addr
, &uinfo
, sizeof(uinfo
)))
146 static int quota_setinfo(struct super_block
*sb
, int type
, void __user
*addr
)
148 struct if_dqinfo info
;
149 struct qc_info qinfo
;
151 if (copy_from_user(&info
, addr
, sizeof(info
)))
153 if (!sb
->s_qcop
->set_info
)
155 if (info
.dqi_valid
& ~(IIF_FLAGS
| IIF_BGRACE
| IIF_IGRACE
))
157 memset(&qinfo
, 0, sizeof(qinfo
));
158 if (info
.dqi_valid
& IIF_FLAGS
) {
159 if (info
.dqi_flags
& ~DQF_SETINFO_MASK
)
161 if (info
.dqi_flags
& DQF_ROOT_SQUASH
)
162 qinfo
.i_flags
|= QCI_ROOT_SQUASH
;
163 qinfo
.i_fieldmask
|= QC_FLAGS
;
165 if (info
.dqi_valid
& IIF_BGRACE
) {
166 qinfo
.i_spc_timelimit
= info
.dqi_bgrace
;
167 qinfo
.i_fieldmask
|= QC_SPC_TIMER
;
169 if (info
.dqi_valid
& IIF_IGRACE
) {
170 qinfo
.i_ino_timelimit
= info
.dqi_igrace
;
171 qinfo
.i_fieldmask
|= QC_INO_TIMER
;
173 return sb
->s_qcop
->set_info(sb
, type
, &qinfo
);
176 static inline qsize_t
qbtos(qsize_t blocks
)
178 return blocks
<< QIF_DQBLKSIZE_BITS
;
181 static inline qsize_t
stoqb(qsize_t space
)
183 return (space
+ QIF_DQBLKSIZE
- 1) >> QIF_DQBLKSIZE_BITS
;
186 static void copy_to_if_dqblk(struct if_dqblk
*dst
, struct qc_dqblk
*src
)
188 memset(dst
, 0, sizeof(*dst
));
189 dst
->dqb_bhardlimit
= stoqb(src
->d_spc_hardlimit
);
190 dst
->dqb_bsoftlimit
= stoqb(src
->d_spc_softlimit
);
191 dst
->dqb_curspace
= src
->d_space
;
192 dst
->dqb_ihardlimit
= src
->d_ino_hardlimit
;
193 dst
->dqb_isoftlimit
= src
->d_ino_softlimit
;
194 dst
->dqb_curinodes
= src
->d_ino_count
;
195 dst
->dqb_btime
= src
->d_spc_timer
;
196 dst
->dqb_itime
= src
->d_ino_timer
;
197 dst
->dqb_valid
= QIF_ALL
;
200 static int quota_getquota(struct super_block
*sb
, int type
, qid_t id
,
208 if (!sb
->s_qcop
->get_dqblk
)
210 qid
= make_kqid(current_user_ns(), type
, id
);
211 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
213 ret
= sb
->s_qcop
->get_dqblk(sb
, qid
, &fdq
);
216 copy_to_if_dqblk(&idq
, &fdq
);
218 if (compat_need_64bit_alignment_fixup()) {
219 struct compat_if_dqblk __user
*compat_dqblk
= addr
;
221 if (copy_to_user(compat_dqblk
, &idq
, sizeof(*compat_dqblk
)))
223 if (put_user(idq
.dqb_valid
, &compat_dqblk
->dqb_valid
))
226 if (copy_to_user(addr
, &idq
, sizeof(idq
)))
233 * Return quota for next active quota >= this id, if any exists,
234 * otherwise return -ENOENT via ->get_nextdqblk
236 static int quota_getnextquota(struct super_block
*sb
, int type
, qid_t id
,
241 struct if_nextdqblk idq
;
244 if (!sb
->s_qcop
->get_nextdqblk
)
246 qid
= make_kqid(current_user_ns(), type
, id
);
247 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
249 ret
= sb
->s_qcop
->get_nextdqblk(sb
, &qid
, &fdq
);
252 /* struct if_nextdqblk is a superset of struct if_dqblk */
253 copy_to_if_dqblk((struct if_dqblk
*)&idq
, &fdq
);
254 idq
.dqb_id
= from_kqid(current_user_ns(), qid
);
255 if (copy_to_user(addr
, &idq
, sizeof(idq
)))
260 static void copy_from_if_dqblk(struct qc_dqblk
*dst
, struct if_dqblk
*src
)
262 dst
->d_spc_hardlimit
= qbtos(src
->dqb_bhardlimit
);
263 dst
->d_spc_softlimit
= qbtos(src
->dqb_bsoftlimit
);
264 dst
->d_space
= src
->dqb_curspace
;
265 dst
->d_ino_hardlimit
= src
->dqb_ihardlimit
;
266 dst
->d_ino_softlimit
= src
->dqb_isoftlimit
;
267 dst
->d_ino_count
= src
->dqb_curinodes
;
268 dst
->d_spc_timer
= src
->dqb_btime
;
269 dst
->d_ino_timer
= src
->dqb_itime
;
271 dst
->d_fieldmask
= 0;
272 if (src
->dqb_valid
& QIF_BLIMITS
)
273 dst
->d_fieldmask
|= QC_SPC_SOFT
| QC_SPC_HARD
;
274 if (src
->dqb_valid
& QIF_SPACE
)
275 dst
->d_fieldmask
|= QC_SPACE
;
276 if (src
->dqb_valid
& QIF_ILIMITS
)
277 dst
->d_fieldmask
|= QC_INO_SOFT
| QC_INO_HARD
;
278 if (src
->dqb_valid
& QIF_INODES
)
279 dst
->d_fieldmask
|= QC_INO_COUNT
;
280 if (src
->dqb_valid
& QIF_BTIME
)
281 dst
->d_fieldmask
|= QC_SPC_TIMER
;
282 if (src
->dqb_valid
& QIF_ITIME
)
283 dst
->d_fieldmask
|= QC_INO_TIMER
;
286 static int quota_setquota(struct super_block
*sb
, int type
, qid_t id
,
293 if (compat_need_64bit_alignment_fixup()) {
294 struct compat_if_dqblk __user
*compat_dqblk
= addr
;
296 if (copy_from_user(&idq
, compat_dqblk
, sizeof(*compat_dqblk
)) ||
297 get_user(idq
.dqb_valid
, &compat_dqblk
->dqb_valid
))
300 if (copy_from_user(&idq
, addr
, sizeof(idq
)))
303 if (!sb
->s_qcop
->set_dqblk
)
305 qid
= make_kqid(current_user_ns(), type
, id
);
306 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
308 copy_from_if_dqblk(&fdq
, &idq
);
309 return sb
->s_qcop
->set_dqblk(sb
, qid
, &fdq
);
312 static int quota_enable(struct super_block
*sb
, void __user
*addr
)
316 if (copy_from_user(&flags
, addr
, sizeof(flags
)))
318 if (!sb
->s_qcop
->quota_enable
)
320 return sb
->s_qcop
->quota_enable(sb
, flags
);
323 static int quota_disable(struct super_block
*sb
, void __user
*addr
)
327 if (copy_from_user(&flags
, addr
, sizeof(flags
)))
329 if (!sb
->s_qcop
->quota_disable
)
331 return sb
->s_qcop
->quota_disable(sb
, flags
);
334 static int quota_state_to_flags(struct qc_state
*state
)
338 if (state
->s_state
[USRQUOTA
].flags
& QCI_ACCT_ENABLED
)
339 flags
|= FS_QUOTA_UDQ_ACCT
;
340 if (state
->s_state
[USRQUOTA
].flags
& QCI_LIMITS_ENFORCED
)
341 flags
|= FS_QUOTA_UDQ_ENFD
;
342 if (state
->s_state
[GRPQUOTA
].flags
& QCI_ACCT_ENABLED
)
343 flags
|= FS_QUOTA_GDQ_ACCT
;
344 if (state
->s_state
[GRPQUOTA
].flags
& QCI_LIMITS_ENFORCED
)
345 flags
|= FS_QUOTA_GDQ_ENFD
;
346 if (state
->s_state
[PRJQUOTA
].flags
& QCI_ACCT_ENABLED
)
347 flags
|= FS_QUOTA_PDQ_ACCT
;
348 if (state
->s_state
[PRJQUOTA
].flags
& QCI_LIMITS_ENFORCED
)
349 flags
|= FS_QUOTA_PDQ_ENFD
;
353 static int quota_getstate(struct super_block
*sb
, int type
,
354 struct fs_quota_stat
*fqs
)
356 struct qc_state state
;
359 memset(&state
, 0, sizeof (struct qc_state
));
360 ret
= sb
->s_qcop
->get_state(sb
, &state
);
364 memset(fqs
, 0, sizeof(*fqs
));
365 fqs
->qs_version
= FS_QSTAT_VERSION
;
366 fqs
->qs_flags
= quota_state_to_flags(&state
);
367 /* No quota enabled? */
370 fqs
->qs_incoredqs
= state
.s_incoredqs
;
372 fqs
->qs_btimelimit
= state
.s_state
[type
].spc_timelimit
;
373 fqs
->qs_itimelimit
= state
.s_state
[type
].ino_timelimit
;
374 fqs
->qs_rtbtimelimit
= state
.s_state
[type
].rt_spc_timelimit
;
375 fqs
->qs_bwarnlimit
= state
.s_state
[type
].spc_warnlimit
;
376 fqs
->qs_iwarnlimit
= state
.s_state
[type
].ino_warnlimit
;
378 /* Inodes may be allocated even if inactive; copy out if present */
379 if (state
.s_state
[USRQUOTA
].ino
) {
380 fqs
->qs_uquota
.qfs_ino
= state
.s_state
[USRQUOTA
].ino
;
381 fqs
->qs_uquota
.qfs_nblks
= state
.s_state
[USRQUOTA
].blocks
;
382 fqs
->qs_uquota
.qfs_nextents
= state
.s_state
[USRQUOTA
].nextents
;
384 if (state
.s_state
[GRPQUOTA
].ino
) {
385 fqs
->qs_gquota
.qfs_ino
= state
.s_state
[GRPQUOTA
].ino
;
386 fqs
->qs_gquota
.qfs_nblks
= state
.s_state
[GRPQUOTA
].blocks
;
387 fqs
->qs_gquota
.qfs_nextents
= state
.s_state
[GRPQUOTA
].nextents
;
389 if (state
.s_state
[PRJQUOTA
].ino
) {
391 * Q_XGETQSTAT doesn't have room for both group and project
392 * quotas. So, allow the project quota values to be copied out
393 * only if there is no group quota information available.
395 if (!(state
.s_state
[GRPQUOTA
].flags
& QCI_ACCT_ENABLED
)) {
396 fqs
->qs_gquota
.qfs_ino
= state
.s_state
[PRJQUOTA
].ino
;
397 fqs
->qs_gquota
.qfs_nblks
=
398 state
.s_state
[PRJQUOTA
].blocks
;
399 fqs
->qs_gquota
.qfs_nextents
=
400 state
.s_state
[PRJQUOTA
].nextents
;
406 static int compat_copy_fs_qfilestat(struct compat_fs_qfilestat __user
*to
,
407 struct fs_qfilestat
*from
)
409 if (copy_to_user(to
, from
, sizeof(*to
)) ||
410 put_user(from
->qfs_nextents
, &to
->qfs_nextents
))
415 static int compat_copy_fs_quota_stat(struct compat_fs_quota_stat __user
*to
,
416 struct fs_quota_stat
*from
)
418 if (put_user(from
->qs_version
, &to
->qs_version
) ||
419 put_user(from
->qs_flags
, &to
->qs_flags
) ||
420 put_user(from
->qs_pad
, &to
->qs_pad
) ||
421 compat_copy_fs_qfilestat(&to
->qs_uquota
, &from
->qs_uquota
) ||
422 compat_copy_fs_qfilestat(&to
->qs_gquota
, &from
->qs_gquota
) ||
423 put_user(from
->qs_incoredqs
, &to
->qs_incoredqs
) ||
424 put_user(from
->qs_btimelimit
, &to
->qs_btimelimit
) ||
425 put_user(from
->qs_itimelimit
, &to
->qs_itimelimit
) ||
426 put_user(from
->qs_rtbtimelimit
, &to
->qs_rtbtimelimit
) ||
427 put_user(from
->qs_bwarnlimit
, &to
->qs_bwarnlimit
) ||
428 put_user(from
->qs_iwarnlimit
, &to
->qs_iwarnlimit
))
433 static int quota_getxstate(struct super_block
*sb
, int type
, void __user
*addr
)
435 struct fs_quota_stat fqs
;
438 if (!sb
->s_qcop
->get_state
)
440 ret
= quota_getstate(sb
, type
, &fqs
);
444 if (compat_need_64bit_alignment_fixup())
445 return compat_copy_fs_quota_stat(addr
, &fqs
);
446 if (copy_to_user(addr
, &fqs
, sizeof(fqs
)))
451 static int quota_getstatev(struct super_block
*sb
, int type
,
452 struct fs_quota_statv
*fqs
)
454 struct qc_state state
;
457 memset(&state
, 0, sizeof (struct qc_state
));
458 ret
= sb
->s_qcop
->get_state(sb
, &state
);
462 memset(fqs
, 0, sizeof(*fqs
));
463 fqs
->qs_version
= FS_QSTAT_VERSION
;
464 fqs
->qs_flags
= quota_state_to_flags(&state
);
465 /* No quota enabled? */
468 fqs
->qs_incoredqs
= state
.s_incoredqs
;
470 fqs
->qs_btimelimit
= state
.s_state
[type
].spc_timelimit
;
471 fqs
->qs_itimelimit
= state
.s_state
[type
].ino_timelimit
;
472 fqs
->qs_rtbtimelimit
= state
.s_state
[type
].rt_spc_timelimit
;
473 fqs
->qs_bwarnlimit
= state
.s_state
[type
].spc_warnlimit
;
474 fqs
->qs_iwarnlimit
= state
.s_state
[type
].ino_warnlimit
;
475 fqs
->qs_rtbwarnlimit
= state
.s_state
[type
].rt_spc_warnlimit
;
477 /* Inodes may be allocated even if inactive; copy out if present */
478 if (state
.s_state
[USRQUOTA
].ino
) {
479 fqs
->qs_uquota
.qfs_ino
= state
.s_state
[USRQUOTA
].ino
;
480 fqs
->qs_uquota
.qfs_nblks
= state
.s_state
[USRQUOTA
].blocks
;
481 fqs
->qs_uquota
.qfs_nextents
= state
.s_state
[USRQUOTA
].nextents
;
483 if (state
.s_state
[GRPQUOTA
].ino
) {
484 fqs
->qs_gquota
.qfs_ino
= state
.s_state
[GRPQUOTA
].ino
;
485 fqs
->qs_gquota
.qfs_nblks
= state
.s_state
[GRPQUOTA
].blocks
;
486 fqs
->qs_gquota
.qfs_nextents
= state
.s_state
[GRPQUOTA
].nextents
;
488 if (state
.s_state
[PRJQUOTA
].ino
) {
489 fqs
->qs_pquota
.qfs_ino
= state
.s_state
[PRJQUOTA
].ino
;
490 fqs
->qs_pquota
.qfs_nblks
= state
.s_state
[PRJQUOTA
].blocks
;
491 fqs
->qs_pquota
.qfs_nextents
= state
.s_state
[PRJQUOTA
].nextents
;
496 static int quota_getxstatev(struct super_block
*sb
, int type
, void __user
*addr
)
498 struct fs_quota_statv fqs
;
501 if (!sb
->s_qcop
->get_state
)
504 memset(&fqs
, 0, sizeof(fqs
));
505 if (copy_from_user(&fqs
, addr
, 1)) /* Just read qs_version */
508 /* If this kernel doesn't support user specified version, fail */
509 switch (fqs
.qs_version
) {
510 case FS_QSTATV_VERSION1
:
515 ret
= quota_getstatev(sb
, type
, &fqs
);
516 if (!ret
&& copy_to_user(addr
, &fqs
, sizeof(fqs
)))
522 * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
523 * out of there as xfsprogs rely on definitions being in that header file. So
524 * just define same functions here for quota purposes.
526 #define XFS_BB_SHIFT 9
528 static inline u64
quota_bbtob(u64 blocks
)
530 return blocks
<< XFS_BB_SHIFT
;
533 static inline u64
quota_btobb(u64 bytes
)
535 return (bytes
+ (1 << XFS_BB_SHIFT
) - 1) >> XFS_BB_SHIFT
;
538 static inline s64
copy_from_xfs_dqblk_ts(const struct fs_disk_quota
*d
,
539 __s32 timer
, __s8 timer_hi
)
541 if (d
->d_fieldmask
& FS_DQ_BIGTIME
)
542 return (u32
)timer
| (s64
)timer_hi
<< 32;
546 static void copy_from_xfs_dqblk(struct qc_dqblk
*dst
, struct fs_disk_quota
*src
)
548 dst
->d_spc_hardlimit
= quota_bbtob(src
->d_blk_hardlimit
);
549 dst
->d_spc_softlimit
= quota_bbtob(src
->d_blk_softlimit
);
550 dst
->d_ino_hardlimit
= src
->d_ino_hardlimit
;
551 dst
->d_ino_softlimit
= src
->d_ino_softlimit
;
552 dst
->d_space
= quota_bbtob(src
->d_bcount
);
553 dst
->d_ino_count
= src
->d_icount
;
554 dst
->d_ino_timer
= copy_from_xfs_dqblk_ts(src
, src
->d_itimer
,
556 dst
->d_spc_timer
= copy_from_xfs_dqblk_ts(src
, src
->d_btimer
,
558 dst
->d_ino_warns
= src
->d_iwarns
;
559 dst
->d_spc_warns
= src
->d_bwarns
;
560 dst
->d_rt_spc_hardlimit
= quota_bbtob(src
->d_rtb_hardlimit
);
561 dst
->d_rt_spc_softlimit
= quota_bbtob(src
->d_rtb_softlimit
);
562 dst
->d_rt_space
= quota_bbtob(src
->d_rtbcount
);
563 dst
->d_rt_spc_timer
= copy_from_xfs_dqblk_ts(src
, src
->d_rtbtimer
,
565 dst
->d_rt_spc_warns
= src
->d_rtbwarns
;
566 dst
->d_fieldmask
= 0;
567 if (src
->d_fieldmask
& FS_DQ_ISOFT
)
568 dst
->d_fieldmask
|= QC_INO_SOFT
;
569 if (src
->d_fieldmask
& FS_DQ_IHARD
)
570 dst
->d_fieldmask
|= QC_INO_HARD
;
571 if (src
->d_fieldmask
& FS_DQ_BSOFT
)
572 dst
->d_fieldmask
|= QC_SPC_SOFT
;
573 if (src
->d_fieldmask
& FS_DQ_BHARD
)
574 dst
->d_fieldmask
|= QC_SPC_HARD
;
575 if (src
->d_fieldmask
& FS_DQ_RTBSOFT
)
576 dst
->d_fieldmask
|= QC_RT_SPC_SOFT
;
577 if (src
->d_fieldmask
& FS_DQ_RTBHARD
)
578 dst
->d_fieldmask
|= QC_RT_SPC_HARD
;
579 if (src
->d_fieldmask
& FS_DQ_BTIMER
)
580 dst
->d_fieldmask
|= QC_SPC_TIMER
;
581 if (src
->d_fieldmask
& FS_DQ_ITIMER
)
582 dst
->d_fieldmask
|= QC_INO_TIMER
;
583 if (src
->d_fieldmask
& FS_DQ_RTBTIMER
)
584 dst
->d_fieldmask
|= QC_RT_SPC_TIMER
;
585 if (src
->d_fieldmask
& FS_DQ_BWARNS
)
586 dst
->d_fieldmask
|= QC_SPC_WARNS
;
587 if (src
->d_fieldmask
& FS_DQ_IWARNS
)
588 dst
->d_fieldmask
|= QC_INO_WARNS
;
589 if (src
->d_fieldmask
& FS_DQ_RTBWARNS
)
590 dst
->d_fieldmask
|= QC_RT_SPC_WARNS
;
591 if (src
->d_fieldmask
& FS_DQ_BCOUNT
)
592 dst
->d_fieldmask
|= QC_SPACE
;
593 if (src
->d_fieldmask
& FS_DQ_ICOUNT
)
594 dst
->d_fieldmask
|= QC_INO_COUNT
;
595 if (src
->d_fieldmask
& FS_DQ_RTBCOUNT
)
596 dst
->d_fieldmask
|= QC_RT_SPACE
;
599 static void copy_qcinfo_from_xfs_dqblk(struct qc_info
*dst
,
600 struct fs_disk_quota
*src
)
602 memset(dst
, 0, sizeof(*dst
));
603 dst
->i_spc_timelimit
= src
->d_btimer
;
604 dst
->i_ino_timelimit
= src
->d_itimer
;
605 dst
->i_rt_spc_timelimit
= src
->d_rtbtimer
;
606 dst
->i_ino_warnlimit
= src
->d_iwarns
;
607 dst
->i_spc_warnlimit
= src
->d_bwarns
;
608 dst
->i_rt_spc_warnlimit
= src
->d_rtbwarns
;
609 if (src
->d_fieldmask
& FS_DQ_BWARNS
)
610 dst
->i_fieldmask
|= QC_SPC_WARNS
;
611 if (src
->d_fieldmask
& FS_DQ_IWARNS
)
612 dst
->i_fieldmask
|= QC_INO_WARNS
;
613 if (src
->d_fieldmask
& FS_DQ_RTBWARNS
)
614 dst
->i_fieldmask
|= QC_RT_SPC_WARNS
;
615 if (src
->d_fieldmask
& FS_DQ_BTIMER
)
616 dst
->i_fieldmask
|= QC_SPC_TIMER
;
617 if (src
->d_fieldmask
& FS_DQ_ITIMER
)
618 dst
->i_fieldmask
|= QC_INO_TIMER
;
619 if (src
->d_fieldmask
& FS_DQ_RTBTIMER
)
620 dst
->i_fieldmask
|= QC_RT_SPC_TIMER
;
623 static int quota_setxquota(struct super_block
*sb
, int type
, qid_t id
,
626 struct fs_disk_quota fdq
;
630 if (copy_from_user(&fdq
, addr
, sizeof(fdq
)))
632 if (!sb
->s_qcop
->set_dqblk
)
634 qid
= make_kqid(current_user_ns(), type
, id
);
635 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
637 /* Are we actually setting timer / warning limits for all users? */
638 if (from_kqid(sb
->s_user_ns
, qid
) == 0 &&
639 fdq
.d_fieldmask
& (FS_DQ_WARNS_MASK
| FS_DQ_TIMER_MASK
)) {
640 struct qc_info qinfo
;
643 if (!sb
->s_qcop
->set_info
)
645 copy_qcinfo_from_xfs_dqblk(&qinfo
, &fdq
);
646 ret
= sb
->s_qcop
->set_info(sb
, type
, &qinfo
);
649 /* These are already done */
650 fdq
.d_fieldmask
&= ~(FS_DQ_WARNS_MASK
| FS_DQ_TIMER_MASK
);
652 copy_from_xfs_dqblk(&qdq
, &fdq
);
653 return sb
->s_qcop
->set_dqblk(sb
, qid
, &qdq
);
656 static inline void copy_to_xfs_dqblk_ts(const struct fs_disk_quota
*d
,
657 __s32
*timer_lo
, __s8
*timer_hi
, s64 timer
)
660 if (d
->d_fieldmask
& FS_DQ_BIGTIME
)
661 *timer_hi
= timer
>> 32;
664 static inline bool want_bigtime(s64 timer
)
666 return timer
> S32_MAX
|| timer
< S32_MIN
;
669 static void copy_to_xfs_dqblk(struct fs_disk_quota
*dst
, struct qc_dqblk
*src
,
672 memset(dst
, 0, sizeof(*dst
));
673 if (want_bigtime(src
->d_ino_timer
) || want_bigtime(src
->d_spc_timer
) ||
674 want_bigtime(src
->d_rt_spc_timer
))
675 dst
->d_fieldmask
|= FS_DQ_BIGTIME
;
676 dst
->d_version
= FS_DQUOT_VERSION
;
678 if (type
== USRQUOTA
)
679 dst
->d_flags
= FS_USER_QUOTA
;
680 else if (type
== PRJQUOTA
)
681 dst
->d_flags
= FS_PROJ_QUOTA
;
683 dst
->d_flags
= FS_GROUP_QUOTA
;
684 dst
->d_blk_hardlimit
= quota_btobb(src
->d_spc_hardlimit
);
685 dst
->d_blk_softlimit
= quota_btobb(src
->d_spc_softlimit
);
686 dst
->d_ino_hardlimit
= src
->d_ino_hardlimit
;
687 dst
->d_ino_softlimit
= src
->d_ino_softlimit
;
688 dst
->d_bcount
= quota_btobb(src
->d_space
);
689 dst
->d_icount
= src
->d_ino_count
;
690 copy_to_xfs_dqblk_ts(dst
, &dst
->d_itimer
, &dst
->d_itimer_hi
,
692 copy_to_xfs_dqblk_ts(dst
, &dst
->d_btimer
, &dst
->d_btimer_hi
,
694 dst
->d_iwarns
= src
->d_ino_warns
;
695 dst
->d_bwarns
= src
->d_spc_warns
;
696 dst
->d_rtb_hardlimit
= quota_btobb(src
->d_rt_spc_hardlimit
);
697 dst
->d_rtb_softlimit
= quota_btobb(src
->d_rt_spc_softlimit
);
698 dst
->d_rtbcount
= quota_btobb(src
->d_rt_space
);
699 copy_to_xfs_dqblk_ts(dst
, &dst
->d_rtbtimer
, &dst
->d_rtbtimer_hi
,
700 src
->d_rt_spc_timer
);
701 dst
->d_rtbwarns
= src
->d_rt_spc_warns
;
704 static int quota_getxquota(struct super_block
*sb
, int type
, qid_t id
,
707 struct fs_disk_quota fdq
;
712 if (!sb
->s_qcop
->get_dqblk
)
714 qid
= make_kqid(current_user_ns(), type
, id
);
715 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
717 ret
= sb
->s_qcop
->get_dqblk(sb
, qid
, &qdq
);
720 copy_to_xfs_dqblk(&fdq
, &qdq
, type
, id
);
721 if (copy_to_user(addr
, &fdq
, sizeof(fdq
)))
727 * Return quota for next active quota >= this id, if any exists,
728 * otherwise return -ENOENT via ->get_nextdqblk.
730 static int quota_getnextxquota(struct super_block
*sb
, int type
, qid_t id
,
733 struct fs_disk_quota fdq
;
739 if (!sb
->s_qcop
->get_nextdqblk
)
741 qid
= make_kqid(current_user_ns(), type
, id
);
742 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
744 ret
= sb
->s_qcop
->get_nextdqblk(sb
, &qid
, &qdq
);
747 id_out
= from_kqid(current_user_ns(), qid
);
748 copy_to_xfs_dqblk(&fdq
, &qdq
, type
, id_out
);
749 if (copy_to_user(addr
, &fdq
, sizeof(fdq
)))
754 static int quota_rmxquota(struct super_block
*sb
, void __user
*addr
)
758 if (copy_from_user(&flags
, addr
, sizeof(flags
)))
760 if (!sb
->s_qcop
->rm_xquota
)
762 return sb
->s_qcop
->rm_xquota(sb
, flags
);
765 /* Copy parameters and call proper function */
766 static int do_quotactl(struct super_block
*sb
, int type
, int cmd
, qid_t id
,
767 void __user
*addr
, const struct path
*path
)
771 type
= array_index_nospec(type
, MAXQUOTAS
);
773 * Quota not supported on this fs? Check this before s_quota_types
774 * since they needn't be set if quota is not supported at all.
778 if (!(sb
->s_quota_types
& (1 << type
)))
781 ret
= check_quotactl_permission(sb
, type
, cmd
, id
);
787 return quota_quotaon(sb
, type
, id
, path
);
789 return quota_quotaoff(sb
, type
);
791 return quota_getfmt(sb
, type
, addr
);
793 return quota_getinfo(sb
, type
, addr
);
795 return quota_setinfo(sb
, type
, addr
);
797 return quota_getquota(sb
, type
, id
, addr
);
799 return quota_getnextquota(sb
, type
, id
, addr
);
801 return quota_setquota(sb
, type
, id
, addr
);
803 if (!sb
->s_qcop
->quota_sync
)
805 return sb
->s_qcop
->quota_sync(sb
, type
);
807 return quota_enable(sb
, addr
);
809 return quota_disable(sb
, addr
);
811 return quota_rmxquota(sb
, addr
);
813 return quota_getxstate(sb
, type
, addr
);
815 return quota_getxstatev(sb
, type
, addr
);
817 return quota_setxquota(sb
, type
, id
, addr
);
819 return quota_getxquota(sb
, type
, id
, addr
);
820 case Q_XGETNEXTQUOTA
:
821 return quota_getnextxquota(sb
, type
, id
, addr
);
825 /* XFS quotas are fully coherent now, making this call a noop */
832 /* Return 1 if 'cmd' will block on frozen filesystem */
833 static int quotactl_cmd_write(int cmd
)
836 * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access
837 * as dquot_acquire() may allocate space for new structure and OCFS2
838 * needs to increment on-disk use count.
847 case Q_XGETNEXTQUOTA
:
854 /* Return true if quotactl command is manipulating quota on/off state */
855 static bool quotactl_cmd_onoff(int cmd
)
857 return (cmd
== Q_QUOTAON
) || (cmd
== Q_QUOTAOFF
) ||
858 (cmd
== Q_XQUOTAON
) || (cmd
== Q_XQUOTAOFF
);
862 * look up a superblock on which quota ops will be performed
863 * - use the name of a block device to find the superblock thereon
865 static struct super_block
*quotactl_block(const char __user
*special
, int cmd
)
868 struct super_block
*sb
;
869 struct filename
*tmp
= getname(special
);
870 bool excl
= false, thawed
= false;
875 return ERR_CAST(tmp
);
876 error
= lookup_bdev(tmp
->name
, &dev
);
879 return ERR_PTR(error
);
881 if (quotactl_cmd_onoff(cmd
)) {
884 } else if (quotactl_cmd_write(cmd
)) {
889 sb
= user_get_super(dev
, excl
);
891 return ERR_PTR(-ENODEV
);
892 if (thawed
&& sb
->s_writers
.frozen
!= SB_UNFROZEN
) {
894 up_write(&sb
->s_umount
);
896 up_read(&sb
->s_umount
);
897 wait_event(sb
->s_writers
.wait_unfrozen
,
898 sb
->s_writers
.frozen
== SB_UNFROZEN
);
905 return ERR_PTR(-ENODEV
);
910 * This is the system call interface. This communicates with
911 * the user-level programs. Currently this only supports diskquota
912 * calls. Maybe we need to add the process quotas etc. in the future,
913 * but we probably should use rlimits for that.
915 SYSCALL_DEFINE4(quotactl
, unsigned int, cmd
, const char __user
*, special
,
916 qid_t
, id
, void __user
*, addr
)
919 struct super_block
*sb
= NULL
;
920 struct path path
, *pathp
= NULL
;
923 cmds
= cmd
>> SUBCMDSHIFT
;
924 type
= cmd
& SUBCMDMASK
;
926 if (type
>= MAXQUOTAS
)
930 * As a special case Q_SYNC can be called without a specific device.
931 * It will iterate all superblocks that have quota enabled and call
932 * the sync action on each of them.
936 return quota_sync_all(type
);
941 * Path for quotaon has to be resolved before grabbing superblock
942 * because that gets s_umount sem which is also possibly needed by path
943 * resolution (think about autofs) and thus deadlocks could arise.
945 if (cmds
== Q_QUOTAON
) {
946 ret
= user_path_at(AT_FDCWD
, addr
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &path
);
948 pathp
= ERR_PTR(ret
);
953 sb
= quotactl_block(special
, cmds
);
959 ret
= do_quotactl(sb
, type
, cmds
, id
, addr
, pathp
);
961 if (!quotactl_cmd_onoff(cmds
))
964 drop_super_exclusive(sb
);
966 if (pathp
&& !IS_ERR(pathp
))
971 SYSCALL_DEFINE4(quotactl_fd
, unsigned int, fd
, unsigned int, cmd
,
972 qid_t
, id
, void __user
*, addr
)
974 struct super_block
*sb
;
975 unsigned int cmds
= cmd
>> SUBCMDSHIFT
;
976 unsigned int type
= cmd
& SUBCMDMASK
;
985 if (type
>= MAXQUOTAS
)
988 if (quotactl_cmd_write(cmds
)) {
989 ret
= mnt_want_write(f
.file
->f_path
.mnt
);
994 sb
= f
.file
->f_path
.mnt
->mnt_sb
;
995 if (quotactl_cmd_onoff(cmds
))
996 down_write(&sb
->s_umount
);
998 down_read(&sb
->s_umount
);
1000 ret
= do_quotactl(sb
, type
, cmds
, id
, addr
, ERR_PTR(-EINVAL
));
1002 if (quotactl_cmd_onoff(cmds
))
1003 up_write(&sb
->s_umount
);
1005 up_read(&sb
->s_umount
);
1007 if (quotactl_cmd_write(cmds
))
1008 mnt_drop_write(f
.file
->f_path
.mnt
);