]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Quota code necessary even when VFS quota support is not compiled | |
4 | * into the kernel. The interesting stuff is over in dquot.c, here | |
5 | * we have symbols for initial quotactl(2) handling, the sysctl(2) | |
6 | * variables, etc - things needed even when quota support disabled. | |
7 | */ | |
8 | ||
9 | #include <linux/fs.h> | |
10 | #include <linux/namei.h> | |
11 | #include <linux/slab.h> | |
12 | #include <asm/current.h> | |
13 | #include <linux/uaccess.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/security.h> | |
16 | #include <linux/syscalls.h> | |
17 | #include <linux/capability.h> | |
18 | #include <linux/quotaops.h> | |
19 | #include <linux/types.h> | |
20 | #include <linux/mount.h> | |
21 | #include <linux/writeback.h> | |
22 | #include <linux/nospec.h> | |
23 | #include "compat.h" | |
24 | #include "../internal.h" | |
25 | ||
26 | static int check_quotactl_permission(struct super_block *sb, int type, int cmd, | |
27 | qid_t id) | |
28 | { | |
29 | switch (cmd) { | |
30 | /* these commands do not require any special privilegues */ | |
31 | case Q_GETFMT: | |
32 | case Q_SYNC: | |
33 | case Q_GETINFO: | |
34 | case Q_XGETQSTAT: | |
35 | case Q_XGETQSTATV: | |
36 | case Q_XQUOTASYNC: | |
37 | break; | |
38 | /* allow to query information for dquots we "own" */ | |
39 | case Q_GETQUOTA: | |
40 | case Q_XGETQUOTA: | |
41 | if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) || | |
42 | (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id)))) | |
43 | break; | |
44 | fallthrough; | |
45 | default: | |
46 | if (!capable(CAP_SYS_ADMIN)) | |
47 | return -EPERM; | |
48 | } | |
49 | ||
50 | return security_quotactl(cmd, type, id, sb); | |
51 | } | |
52 | ||
53 | static void quota_sync_one(struct super_block *sb, void *arg) | |
54 | { | |
55 | int type = *(int *)arg; | |
56 | ||
57 | if (sb->s_qcop && sb->s_qcop->quota_sync && | |
58 | (sb->s_quota_types & (1 << type))) | |
59 | sb->s_qcop->quota_sync(sb, type); | |
60 | } | |
61 | ||
62 | static int quota_sync_all(int type) | |
63 | { | |
64 | int ret; | |
65 | ||
66 | ret = security_quotactl(Q_SYNC, type, 0, NULL); | |
67 | if (!ret) | |
68 | iterate_supers(quota_sync_one, &type); | |
69 | return ret; | |
70 | } | |
71 | ||
72 | unsigned int qtype_enforce_flag(int type) | |
73 | { | |
74 | switch (type) { | |
75 | case USRQUOTA: | |
76 | return FS_QUOTA_UDQ_ENFD; | |
77 | case GRPQUOTA: | |
78 | return FS_QUOTA_GDQ_ENFD; | |
79 | case PRJQUOTA: | |
80 | return FS_QUOTA_PDQ_ENFD; | |
81 | } | |
82 | return 0; | |
83 | } | |
84 | ||
85 | static int quota_quotaon(struct super_block *sb, int type, qid_t id, | |
86 | const struct path *path) | |
87 | { | |
88 | if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable) | |
89 | return -ENOSYS; | |
90 | if (sb->s_qcop->quota_enable) | |
91 | return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type)); | |
92 | if (IS_ERR(path)) | |
93 | return PTR_ERR(path); | |
94 | return sb->s_qcop->quota_on(sb, type, id, path); | |
95 | } | |
96 | ||
97 | static int quota_quotaoff(struct super_block *sb, int type) | |
98 | { | |
99 | if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable) | |
100 | return -ENOSYS; | |
101 | if (sb->s_qcop->quota_disable) | |
102 | return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type)); | |
103 | return sb->s_qcop->quota_off(sb, type); | |
104 | } | |
105 | ||
106 | static int quota_getfmt(struct super_block *sb, int type, void __user *addr) | |
107 | { | |
108 | __u32 fmt; | |
109 | ||
110 | if (!sb_has_quota_active(sb, type)) | |
111 | return -ESRCH; | |
112 | fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; | |
113 | if (copy_to_user(addr, &fmt, sizeof(fmt))) | |
114 | return -EFAULT; | |
115 | return 0; | |
116 | } | |
117 | ||
118 | static int quota_getinfo(struct super_block *sb, int type, void __user *addr) | |
119 | { | |
120 | struct qc_state state; | |
121 | struct qc_type_state *tstate; | |
122 | struct if_dqinfo uinfo; | |
123 | int ret; | |
124 | ||
125 | if (!sb->s_qcop->get_state) | |
126 | return -ENOSYS; | |
127 | ret = sb->s_qcop->get_state(sb, &state); | |
128 | if (ret) | |
129 | return ret; | |
130 | tstate = state.s_state + type; | |
131 | if (!(tstate->flags & QCI_ACCT_ENABLED)) | |
132 | return -ESRCH; | |
133 | memset(&uinfo, 0, sizeof(uinfo)); | |
134 | uinfo.dqi_bgrace = tstate->spc_timelimit; | |
135 | uinfo.dqi_igrace = tstate->ino_timelimit; | |
136 | if (tstate->flags & QCI_SYSFILE) | |
137 | uinfo.dqi_flags |= DQF_SYS_FILE; | |
138 | if (tstate->flags & QCI_ROOT_SQUASH) | |
139 | uinfo.dqi_flags |= DQF_ROOT_SQUASH; | |
140 | uinfo.dqi_valid = IIF_ALL; | |
141 | if (copy_to_user(addr, &uinfo, sizeof(uinfo))) | |
142 | return -EFAULT; | |
143 | return 0; | |
144 | } | |
145 | ||
146 | static int quota_setinfo(struct super_block *sb, int type, void __user *addr) | |
147 | { | |
148 | struct if_dqinfo info; | |
149 | struct qc_info qinfo; | |
150 | ||
151 | if (copy_from_user(&info, addr, sizeof(info))) | |
152 | return -EFAULT; | |
153 | if (!sb->s_qcop->set_info) | |
154 | return -ENOSYS; | |
155 | if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE)) | |
156 | return -EINVAL; | |
157 | memset(&qinfo, 0, sizeof(qinfo)); | |
158 | if (info.dqi_valid & IIF_FLAGS) { | |
159 | if (info.dqi_flags & ~DQF_SETINFO_MASK) | |
160 | return -EINVAL; | |
161 | if (info.dqi_flags & DQF_ROOT_SQUASH) | |
162 | qinfo.i_flags |= QCI_ROOT_SQUASH; | |
163 | qinfo.i_fieldmask |= QC_FLAGS; | |
164 | } | |
165 | if (info.dqi_valid & IIF_BGRACE) { | |
166 | qinfo.i_spc_timelimit = info.dqi_bgrace; | |
167 | qinfo.i_fieldmask |= QC_SPC_TIMER; | |
168 | } | |
169 | if (info.dqi_valid & IIF_IGRACE) { | |
170 | qinfo.i_ino_timelimit = info.dqi_igrace; | |
171 | qinfo.i_fieldmask |= QC_INO_TIMER; | |
172 | } | |
173 | return sb->s_qcop->set_info(sb, type, &qinfo); | |
174 | } | |
175 | ||
176 | static inline qsize_t qbtos(qsize_t blocks) | |
177 | { | |
178 | return blocks << QIF_DQBLKSIZE_BITS; | |
179 | } | |
180 | ||
181 | static inline qsize_t stoqb(qsize_t space) | |
182 | { | |
183 | return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; | |
184 | } | |
185 | ||
186 | static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) | |
187 | { | |
188 | memset(dst, 0, sizeof(*dst)); | |
189 | dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit); | |
190 | dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit); | |
191 | dst->dqb_curspace = src->d_space; | |
192 | dst->dqb_ihardlimit = src->d_ino_hardlimit; | |
193 | dst->dqb_isoftlimit = src->d_ino_softlimit; | |
194 | dst->dqb_curinodes = src->d_ino_count; | |
195 | dst->dqb_btime = src->d_spc_timer; | |
196 | dst->dqb_itime = src->d_ino_timer; | |
197 | dst->dqb_valid = QIF_ALL; | |
198 | } | |
199 | ||
200 | static int quota_getquota(struct super_block *sb, int type, qid_t id, | |
201 | void __user *addr) | |
202 | { | |
203 | struct kqid qid; | |
204 | struct qc_dqblk fdq; | |
205 | struct if_dqblk idq; | |
206 | int ret; | |
207 | ||
208 | if (!sb->s_qcop->get_dqblk) | |
209 | return -ENOSYS; | |
210 | qid = make_kqid(current_user_ns(), type, id); | |
211 | if (!qid_has_mapping(sb->s_user_ns, qid)) | |
212 | return -EINVAL; | |
213 | ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); | |
214 | if (ret) | |
215 | return ret; | |
216 | copy_to_if_dqblk(&idq, &fdq); | |
217 | ||
218 | if (compat_need_64bit_alignment_fixup()) { | |
219 | struct compat_if_dqblk __user *compat_dqblk = addr; | |
220 | ||
221 | if (copy_to_user(compat_dqblk, &idq, sizeof(*compat_dqblk))) | |
222 | return -EFAULT; | |
223 | if (put_user(idq.dqb_valid, &compat_dqblk->dqb_valid)) | |
224 | return -EFAULT; | |
225 | } else { | |
226 | if (copy_to_user(addr, &idq, sizeof(idq))) | |
227 | return -EFAULT; | |
228 | } | |
229 | return 0; | |
230 | } | |
231 | ||
232 | /* | |
233 | * Return quota for next active quota >= this id, if any exists, | |
234 | * otherwise return -ENOENT via ->get_nextdqblk | |
235 | */ | |
236 | static int quota_getnextquota(struct super_block *sb, int type, qid_t id, | |
237 | void __user *addr) | |
238 | { | |
239 | struct kqid qid; | |
240 | struct qc_dqblk fdq; | |
241 | struct if_nextdqblk idq; | |
242 | int ret; | |
243 | ||
244 | if (!sb->s_qcop->get_nextdqblk) | |
245 | return -ENOSYS; | |
246 | qid = make_kqid(current_user_ns(), type, id); | |
247 | if (!qid_has_mapping(sb->s_user_ns, qid)) | |
248 | return -EINVAL; | |
249 | ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq); | |
250 | if (ret) | |
251 | return ret; | |
252 | /* struct if_nextdqblk is a superset of struct if_dqblk */ | |
253 | copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq); | |
254 | idq.dqb_id = from_kqid(current_user_ns(), qid); | |
255 | if (copy_to_user(addr, &idq, sizeof(idq))) | |
256 | return -EFAULT; | |
257 | return 0; | |
258 | } | |
259 | ||
260 | static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) | |
261 | { | |
262 | dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit); | |
263 | dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit); | |
264 | dst->d_space = src->dqb_curspace; | |
265 | dst->d_ino_hardlimit = src->dqb_ihardlimit; | |
266 | dst->d_ino_softlimit = src->dqb_isoftlimit; | |
267 | dst->d_ino_count = src->dqb_curinodes; | |
268 | dst->d_spc_timer = src->dqb_btime; | |
269 | dst->d_ino_timer = src->dqb_itime; | |
270 | ||
271 | dst->d_fieldmask = 0; | |
272 | if (src->dqb_valid & QIF_BLIMITS) | |
273 | dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; | |
274 | if (src->dqb_valid & QIF_SPACE) | |
275 | dst->d_fieldmask |= QC_SPACE; | |
276 | if (src->dqb_valid & QIF_ILIMITS) | |
277 | dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; | |
278 | if (src->dqb_valid & QIF_INODES) | |
279 | dst->d_fieldmask |= QC_INO_COUNT; | |
280 | if (src->dqb_valid & QIF_BTIME) | |
281 | dst->d_fieldmask |= QC_SPC_TIMER; | |
282 | if (src->dqb_valid & QIF_ITIME) | |
283 | dst->d_fieldmask |= QC_INO_TIMER; | |
284 | } | |
285 | ||
286 | static int quota_setquota(struct super_block *sb, int type, qid_t id, | |
287 | void __user *addr) | |
288 | { | |
289 | struct qc_dqblk fdq; | |
290 | struct if_dqblk idq; | |
291 | struct kqid qid; | |
292 | ||
293 | if (compat_need_64bit_alignment_fixup()) { | |
294 | struct compat_if_dqblk __user *compat_dqblk = addr; | |
295 | ||
296 | if (copy_from_user(&idq, compat_dqblk, sizeof(*compat_dqblk)) || | |
297 | get_user(idq.dqb_valid, &compat_dqblk->dqb_valid)) | |
298 | return -EFAULT; | |
299 | } else { | |
300 | if (copy_from_user(&idq, addr, sizeof(idq))) | |
301 | return -EFAULT; | |
302 | } | |
303 | if (!sb->s_qcop->set_dqblk) | |
304 | return -ENOSYS; | |
305 | qid = make_kqid(current_user_ns(), type, id); | |
306 | if (!qid_has_mapping(sb->s_user_ns, qid)) | |
307 | return -EINVAL; | |
308 | copy_from_if_dqblk(&fdq, &idq); | |
309 | return sb->s_qcop->set_dqblk(sb, qid, &fdq); | |
310 | } | |
311 | ||
312 | static int quota_enable(struct super_block *sb, void __user *addr) | |
313 | { | |
314 | __u32 flags; | |
315 | ||
316 | if (copy_from_user(&flags, addr, sizeof(flags))) | |
317 | return -EFAULT; | |
318 | if (!sb->s_qcop->quota_enable) | |
319 | return -ENOSYS; | |
320 | return sb->s_qcop->quota_enable(sb, flags); | |
321 | } | |
322 | ||
323 | static int quota_disable(struct super_block *sb, void __user *addr) | |
324 | { | |
325 | __u32 flags; | |
326 | ||
327 | if (copy_from_user(&flags, addr, sizeof(flags))) | |
328 | return -EFAULT; | |
329 | if (!sb->s_qcop->quota_disable) | |
330 | return -ENOSYS; | |
331 | return sb->s_qcop->quota_disable(sb, flags); | |
332 | } | |
333 | ||
334 | static int quota_state_to_flags(struct qc_state *state) | |
335 | { | |
336 | int flags = 0; | |
337 | ||
338 | if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) | |
339 | flags |= FS_QUOTA_UDQ_ACCT; | |
340 | if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED) | |
341 | flags |= FS_QUOTA_UDQ_ENFD; | |
342 | if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) | |
343 | flags |= FS_QUOTA_GDQ_ACCT; | |
344 | if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED) | |
345 | flags |= FS_QUOTA_GDQ_ENFD; | |
346 | if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) | |
347 | flags |= FS_QUOTA_PDQ_ACCT; | |
348 | if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED) | |
349 | flags |= FS_QUOTA_PDQ_ENFD; | |
350 | return flags; | |
351 | } | |
352 | ||
353 | static int quota_getstate(struct super_block *sb, int type, | |
354 | struct fs_quota_stat *fqs) | |
355 | { | |
356 | struct qc_state state; | |
357 | int ret; | |
358 | ||
359 | memset(&state, 0, sizeof (struct qc_state)); | |
360 | ret = sb->s_qcop->get_state(sb, &state); | |
361 | if (ret < 0) | |
362 | return ret; | |
363 | ||
364 | memset(fqs, 0, sizeof(*fqs)); | |
365 | fqs->qs_version = FS_QSTAT_VERSION; | |
366 | fqs->qs_flags = quota_state_to_flags(&state); | |
367 | /* No quota enabled? */ | |
368 | if (!fqs->qs_flags) | |
369 | return -ENOSYS; | |
370 | fqs->qs_incoredqs = state.s_incoredqs; | |
371 | ||
372 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; | |
373 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; | |
374 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; | |
375 | fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; | |
376 | fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; | |
377 | ||
378 | /* Inodes may be allocated even if inactive; copy out if present */ | |
379 | if (state.s_state[USRQUOTA].ino) { | |
380 | fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; | |
381 | fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; | |
382 | fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; | |
383 | } | |
384 | if (state.s_state[GRPQUOTA].ino) { | |
385 | fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; | |
386 | fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; | |
387 | fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; | |
388 | } | |
389 | if (state.s_state[PRJQUOTA].ino) { | |
390 | /* | |
391 | * Q_XGETQSTAT doesn't have room for both group and project | |
392 | * quotas. So, allow the project quota values to be copied out | |
393 | * only if there is no group quota information available. | |
394 | */ | |
395 | if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) { | |
396 | fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino; | |
397 | fqs->qs_gquota.qfs_nblks = | |
398 | state.s_state[PRJQUOTA].blocks; | |
399 | fqs->qs_gquota.qfs_nextents = | |
400 | state.s_state[PRJQUOTA].nextents; | |
401 | } | |
402 | } | |
403 | return 0; | |
404 | } | |
405 | ||
406 | static int compat_copy_fs_qfilestat(struct compat_fs_qfilestat __user *to, | |
407 | struct fs_qfilestat *from) | |
408 | { | |
409 | if (copy_to_user(to, from, sizeof(*to)) || | |
410 | put_user(from->qfs_nextents, &to->qfs_nextents)) | |
411 | return -EFAULT; | |
412 | return 0; | |
413 | } | |
414 | ||
415 | static int compat_copy_fs_quota_stat(struct compat_fs_quota_stat __user *to, | |
416 | struct fs_quota_stat *from) | |
417 | { | |
418 | if (put_user(from->qs_version, &to->qs_version) || | |
419 | put_user(from->qs_flags, &to->qs_flags) || | |
420 | put_user(from->qs_pad, &to->qs_pad) || | |
421 | compat_copy_fs_qfilestat(&to->qs_uquota, &from->qs_uquota) || | |
422 | compat_copy_fs_qfilestat(&to->qs_gquota, &from->qs_gquota) || | |
423 | put_user(from->qs_incoredqs, &to->qs_incoredqs) || | |
424 | put_user(from->qs_btimelimit, &to->qs_btimelimit) || | |
425 | put_user(from->qs_itimelimit, &to->qs_itimelimit) || | |
426 | put_user(from->qs_rtbtimelimit, &to->qs_rtbtimelimit) || | |
427 | put_user(from->qs_bwarnlimit, &to->qs_bwarnlimit) || | |
428 | put_user(from->qs_iwarnlimit, &to->qs_iwarnlimit)) | |
429 | return -EFAULT; | |
430 | return 0; | |
431 | } | |
432 | ||
433 | static int quota_getxstate(struct super_block *sb, int type, void __user *addr) | |
434 | { | |
435 | struct fs_quota_stat fqs; | |
436 | int ret; | |
437 | ||
438 | if (!sb->s_qcop->get_state) | |
439 | return -ENOSYS; | |
440 | ret = quota_getstate(sb, type, &fqs); | |
441 | if (ret) | |
442 | return ret; | |
443 | ||
444 | if (compat_need_64bit_alignment_fixup()) | |
445 | return compat_copy_fs_quota_stat(addr, &fqs); | |
446 | if (copy_to_user(addr, &fqs, sizeof(fqs))) | |
447 | return -EFAULT; | |
448 | return 0; | |
449 | } | |
450 | ||
451 | static int quota_getstatev(struct super_block *sb, int type, | |
452 | struct fs_quota_statv *fqs) | |
453 | { | |
454 | struct qc_state state; | |
455 | int ret; | |
456 | ||
457 | memset(&state, 0, sizeof (struct qc_state)); | |
458 | ret = sb->s_qcop->get_state(sb, &state); | |
459 | if (ret < 0) | |
460 | return ret; | |
461 | ||
462 | memset(fqs, 0, sizeof(*fqs)); | |
463 | fqs->qs_version = FS_QSTAT_VERSION; | |
464 | fqs->qs_flags = quota_state_to_flags(&state); | |
465 | /* No quota enabled? */ | |
466 | if (!fqs->qs_flags) | |
467 | return -ENOSYS; | |
468 | fqs->qs_incoredqs = state.s_incoredqs; | |
469 | ||
470 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; | |
471 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; | |
472 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; | |
473 | fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; | |
474 | fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; | |
475 | fqs->qs_rtbwarnlimit = state.s_state[type].rt_spc_warnlimit; | |
476 | ||
477 | /* Inodes may be allocated even if inactive; copy out if present */ | |
478 | if (state.s_state[USRQUOTA].ino) { | |
479 | fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; | |
480 | fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; | |
481 | fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; | |
482 | } | |
483 | if (state.s_state[GRPQUOTA].ino) { | |
484 | fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; | |
485 | fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; | |
486 | fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; | |
487 | } | |
488 | if (state.s_state[PRJQUOTA].ino) { | |
489 | fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino; | |
490 | fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks; | |
491 | fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents; | |
492 | } | |
493 | return 0; | |
494 | } | |
495 | ||
496 | static int quota_getxstatev(struct super_block *sb, int type, void __user *addr) | |
497 | { | |
498 | struct fs_quota_statv fqs; | |
499 | int ret; | |
500 | ||
501 | if (!sb->s_qcop->get_state) | |
502 | return -ENOSYS; | |
503 | ||
504 | memset(&fqs, 0, sizeof(fqs)); | |
505 | if (copy_from_user(&fqs, addr, 1)) /* Just read qs_version */ | |
506 | return -EFAULT; | |
507 | ||
508 | /* If this kernel doesn't support user specified version, fail */ | |
509 | switch (fqs.qs_version) { | |
510 | case FS_QSTATV_VERSION1: | |
511 | break; | |
512 | default: | |
513 | return -EINVAL; | |
514 | } | |
515 | ret = quota_getstatev(sb, type, &fqs); | |
516 | if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) | |
517 | return -EFAULT; | |
518 | return ret; | |
519 | } | |
520 | ||
521 | /* | |
522 | * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them | |
523 | * out of there as xfsprogs rely on definitions being in that header file. So | |
524 | * just define same functions here for quota purposes. | |
525 | */ | |
526 | #define XFS_BB_SHIFT 9 | |
527 | ||
528 | static inline u64 quota_bbtob(u64 blocks) | |
529 | { | |
530 | return blocks << XFS_BB_SHIFT; | |
531 | } | |
532 | ||
533 | static inline u64 quota_btobb(u64 bytes) | |
534 | { | |
535 | return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; | |
536 | } | |
537 | ||
538 | static inline s64 copy_from_xfs_dqblk_ts(const struct fs_disk_quota *d, | |
539 | __s32 timer, __s8 timer_hi) | |
540 | { | |
541 | if (d->d_fieldmask & FS_DQ_BIGTIME) | |
542 | return (u32)timer | (s64)timer_hi << 32; | |
543 | return timer; | |
544 | } | |
545 | ||
546 | static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) | |
547 | { | |
548 | dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit); | |
549 | dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit); | |
550 | dst->d_ino_hardlimit = src->d_ino_hardlimit; | |
551 | dst->d_ino_softlimit = src->d_ino_softlimit; | |
552 | dst->d_space = quota_bbtob(src->d_bcount); | |
553 | dst->d_ino_count = src->d_icount; | |
554 | dst->d_ino_timer = copy_from_xfs_dqblk_ts(src, src->d_itimer, | |
555 | src->d_itimer_hi); | |
556 | dst->d_spc_timer = copy_from_xfs_dqblk_ts(src, src->d_btimer, | |
557 | src->d_btimer_hi); | |
558 | dst->d_ino_warns = src->d_iwarns; | |
559 | dst->d_spc_warns = src->d_bwarns; | |
560 | dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit); | |
561 | dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit); | |
562 | dst->d_rt_space = quota_bbtob(src->d_rtbcount); | |
563 | dst->d_rt_spc_timer = copy_from_xfs_dqblk_ts(src, src->d_rtbtimer, | |
564 | src->d_rtbtimer_hi); | |
565 | dst->d_rt_spc_warns = src->d_rtbwarns; | |
566 | dst->d_fieldmask = 0; | |
567 | if (src->d_fieldmask & FS_DQ_ISOFT) | |
568 | dst->d_fieldmask |= QC_INO_SOFT; | |
569 | if (src->d_fieldmask & FS_DQ_IHARD) | |
570 | dst->d_fieldmask |= QC_INO_HARD; | |
571 | if (src->d_fieldmask & FS_DQ_BSOFT) | |
572 | dst->d_fieldmask |= QC_SPC_SOFT; | |
573 | if (src->d_fieldmask & FS_DQ_BHARD) | |
574 | dst->d_fieldmask |= QC_SPC_HARD; | |
575 | if (src->d_fieldmask & FS_DQ_RTBSOFT) | |
576 | dst->d_fieldmask |= QC_RT_SPC_SOFT; | |
577 | if (src->d_fieldmask & FS_DQ_RTBHARD) | |
578 | dst->d_fieldmask |= QC_RT_SPC_HARD; | |
579 | if (src->d_fieldmask & FS_DQ_BTIMER) | |
580 | dst->d_fieldmask |= QC_SPC_TIMER; | |
581 | if (src->d_fieldmask & FS_DQ_ITIMER) | |
582 | dst->d_fieldmask |= QC_INO_TIMER; | |
583 | if (src->d_fieldmask & FS_DQ_RTBTIMER) | |
584 | dst->d_fieldmask |= QC_RT_SPC_TIMER; | |
585 | if (src->d_fieldmask & FS_DQ_BWARNS) | |
586 | dst->d_fieldmask |= QC_SPC_WARNS; | |
587 | if (src->d_fieldmask & FS_DQ_IWARNS) | |
588 | dst->d_fieldmask |= QC_INO_WARNS; | |
589 | if (src->d_fieldmask & FS_DQ_RTBWARNS) | |
590 | dst->d_fieldmask |= QC_RT_SPC_WARNS; | |
591 | if (src->d_fieldmask & FS_DQ_BCOUNT) | |
592 | dst->d_fieldmask |= QC_SPACE; | |
593 | if (src->d_fieldmask & FS_DQ_ICOUNT) | |
594 | dst->d_fieldmask |= QC_INO_COUNT; | |
595 | if (src->d_fieldmask & FS_DQ_RTBCOUNT) | |
596 | dst->d_fieldmask |= QC_RT_SPACE; | |
597 | } | |
598 | ||
599 | static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst, | |
600 | struct fs_disk_quota *src) | |
601 | { | |
602 | memset(dst, 0, sizeof(*dst)); | |
603 | dst->i_spc_timelimit = src->d_btimer; | |
604 | dst->i_ino_timelimit = src->d_itimer; | |
605 | dst->i_rt_spc_timelimit = src->d_rtbtimer; | |
606 | dst->i_ino_warnlimit = src->d_iwarns; | |
607 | dst->i_spc_warnlimit = src->d_bwarns; | |
608 | dst->i_rt_spc_warnlimit = src->d_rtbwarns; | |
609 | if (src->d_fieldmask & FS_DQ_BWARNS) | |
610 | dst->i_fieldmask |= QC_SPC_WARNS; | |
611 | if (src->d_fieldmask & FS_DQ_IWARNS) | |
612 | dst->i_fieldmask |= QC_INO_WARNS; | |
613 | if (src->d_fieldmask & FS_DQ_RTBWARNS) | |
614 | dst->i_fieldmask |= QC_RT_SPC_WARNS; | |
615 | if (src->d_fieldmask & FS_DQ_BTIMER) | |
616 | dst->i_fieldmask |= QC_SPC_TIMER; | |
617 | if (src->d_fieldmask & FS_DQ_ITIMER) | |
618 | dst->i_fieldmask |= QC_INO_TIMER; | |
619 | if (src->d_fieldmask & FS_DQ_RTBTIMER) | |
620 | dst->i_fieldmask |= QC_RT_SPC_TIMER; | |
621 | } | |
622 | ||
623 | static int quota_setxquota(struct super_block *sb, int type, qid_t id, | |
624 | void __user *addr) | |
625 | { | |
626 | struct fs_disk_quota fdq; | |
627 | struct qc_dqblk qdq; | |
628 | struct kqid qid; | |
629 | ||
630 | if (copy_from_user(&fdq, addr, sizeof(fdq))) | |
631 | return -EFAULT; | |
632 | if (!sb->s_qcop->set_dqblk) | |
633 | return -ENOSYS; | |
634 | qid = make_kqid(current_user_ns(), type, id); | |
635 | if (!qid_has_mapping(sb->s_user_ns, qid)) | |
636 | return -EINVAL; | |
637 | /* Are we actually setting timer / warning limits for all users? */ | |
638 | if (from_kqid(sb->s_user_ns, qid) == 0 && | |
639 | fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) { | |
640 | struct qc_info qinfo; | |
641 | int ret; | |
642 | ||
643 | if (!sb->s_qcop->set_info) | |
644 | return -EINVAL; | |
645 | copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq); | |
646 | ret = sb->s_qcop->set_info(sb, type, &qinfo); | |
647 | if (ret) | |
648 | return ret; | |
649 | /* These are already done */ | |
650 | fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK); | |
651 | } | |
652 | copy_from_xfs_dqblk(&qdq, &fdq); | |
653 | return sb->s_qcop->set_dqblk(sb, qid, &qdq); | |
654 | } | |
655 | ||
656 | static inline void copy_to_xfs_dqblk_ts(const struct fs_disk_quota *d, | |
657 | __s32 *timer_lo, __s8 *timer_hi, s64 timer) | |
658 | { | |
659 | *timer_lo = timer; | |
660 | if (d->d_fieldmask & FS_DQ_BIGTIME) | |
661 | *timer_hi = timer >> 32; | |
662 | } | |
663 | ||
664 | static inline bool want_bigtime(s64 timer) | |
665 | { | |
666 | return timer > S32_MAX || timer < S32_MIN; | |
667 | } | |
668 | ||
669 | static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, | |
670 | int type, qid_t id) | |
671 | { | |
672 | memset(dst, 0, sizeof(*dst)); | |
673 | if (want_bigtime(src->d_ino_timer) || want_bigtime(src->d_spc_timer) || | |
674 | want_bigtime(src->d_rt_spc_timer)) | |
675 | dst->d_fieldmask |= FS_DQ_BIGTIME; | |
676 | dst->d_version = FS_DQUOT_VERSION; | |
677 | dst->d_id = id; | |
678 | if (type == USRQUOTA) | |
679 | dst->d_flags = FS_USER_QUOTA; | |
680 | else if (type == PRJQUOTA) | |
681 | dst->d_flags = FS_PROJ_QUOTA; | |
682 | else | |
683 | dst->d_flags = FS_GROUP_QUOTA; | |
684 | dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit); | |
685 | dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit); | |
686 | dst->d_ino_hardlimit = src->d_ino_hardlimit; | |
687 | dst->d_ino_softlimit = src->d_ino_softlimit; | |
688 | dst->d_bcount = quota_btobb(src->d_space); | |
689 | dst->d_icount = src->d_ino_count; | |
690 | copy_to_xfs_dqblk_ts(dst, &dst->d_itimer, &dst->d_itimer_hi, | |
691 | src->d_ino_timer); | |
692 | copy_to_xfs_dqblk_ts(dst, &dst->d_btimer, &dst->d_btimer_hi, | |
693 | src->d_spc_timer); | |
694 | dst->d_iwarns = src->d_ino_warns; | |
695 | dst->d_bwarns = src->d_spc_warns; | |
696 | dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit); | |
697 | dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit); | |
698 | dst->d_rtbcount = quota_btobb(src->d_rt_space); | |
699 | copy_to_xfs_dqblk_ts(dst, &dst->d_rtbtimer, &dst->d_rtbtimer_hi, | |
700 | src->d_rt_spc_timer); | |
701 | dst->d_rtbwarns = src->d_rt_spc_warns; | |
702 | } | |
703 | ||
704 | static int quota_getxquota(struct super_block *sb, int type, qid_t id, | |
705 | void __user *addr) | |
706 | { | |
707 | struct fs_disk_quota fdq; | |
708 | struct qc_dqblk qdq; | |
709 | struct kqid qid; | |
710 | int ret; | |
711 | ||
712 | if (!sb->s_qcop->get_dqblk) | |
713 | return -ENOSYS; | |
714 | qid = make_kqid(current_user_ns(), type, id); | |
715 | if (!qid_has_mapping(sb->s_user_ns, qid)) | |
716 | return -EINVAL; | |
717 | ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); | |
718 | if (ret) | |
719 | return ret; | |
720 | copy_to_xfs_dqblk(&fdq, &qdq, type, id); | |
721 | if (copy_to_user(addr, &fdq, sizeof(fdq))) | |
722 | return -EFAULT; | |
723 | return ret; | |
724 | } | |
725 | ||
726 | /* | |
727 | * Return quota for next active quota >= this id, if any exists, | |
728 | * otherwise return -ENOENT via ->get_nextdqblk. | |
729 | */ | |
730 | static int quota_getnextxquota(struct super_block *sb, int type, qid_t id, | |
731 | void __user *addr) | |
732 | { | |
733 | struct fs_disk_quota fdq; | |
734 | struct qc_dqblk qdq; | |
735 | struct kqid qid; | |
736 | qid_t id_out; | |
737 | int ret; | |
738 | ||
739 | if (!sb->s_qcop->get_nextdqblk) | |
740 | return -ENOSYS; | |
741 | qid = make_kqid(current_user_ns(), type, id); | |
742 | if (!qid_has_mapping(sb->s_user_ns, qid)) | |
743 | return -EINVAL; | |
744 | ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq); | |
745 | if (ret) | |
746 | return ret; | |
747 | id_out = from_kqid(current_user_ns(), qid); | |
748 | copy_to_xfs_dqblk(&fdq, &qdq, type, id_out); | |
749 | if (copy_to_user(addr, &fdq, sizeof(fdq))) | |
750 | return -EFAULT; | |
751 | return ret; | |
752 | } | |
753 | ||
754 | static int quota_rmxquota(struct super_block *sb, void __user *addr) | |
755 | { | |
756 | __u32 flags; | |
757 | ||
758 | if (copy_from_user(&flags, addr, sizeof(flags))) | |
759 | return -EFAULT; | |
760 | if (!sb->s_qcop->rm_xquota) | |
761 | return -ENOSYS; | |
762 | return sb->s_qcop->rm_xquota(sb, flags); | |
763 | } | |
764 | ||
765 | /* Copy parameters and call proper function */ | |
766 | static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, | |
767 | void __user *addr, const struct path *path) | |
768 | { | |
769 | int ret; | |
770 | ||
771 | type = array_index_nospec(type, MAXQUOTAS); | |
772 | /* | |
773 | * Quota not supported on this fs? Check this before s_quota_types | |
774 | * since they needn't be set if quota is not supported at all. | |
775 | */ | |
776 | if (!sb->s_qcop) | |
777 | return -ENOSYS; | |
778 | if (!(sb->s_quota_types & (1 << type))) | |
779 | return -EINVAL; | |
780 | ||
781 | ret = check_quotactl_permission(sb, type, cmd, id); | |
782 | if (ret < 0) | |
783 | return ret; | |
784 | ||
785 | switch (cmd) { | |
786 | case Q_QUOTAON: | |
787 | return quota_quotaon(sb, type, id, path); | |
788 | case Q_QUOTAOFF: | |
789 | return quota_quotaoff(sb, type); | |
790 | case Q_GETFMT: | |
791 | return quota_getfmt(sb, type, addr); | |
792 | case Q_GETINFO: | |
793 | return quota_getinfo(sb, type, addr); | |
794 | case Q_SETINFO: | |
795 | return quota_setinfo(sb, type, addr); | |
796 | case Q_GETQUOTA: | |
797 | return quota_getquota(sb, type, id, addr); | |
798 | case Q_GETNEXTQUOTA: | |
799 | return quota_getnextquota(sb, type, id, addr); | |
800 | case Q_SETQUOTA: | |
801 | return quota_setquota(sb, type, id, addr); | |
802 | case Q_SYNC: | |
803 | if (!sb->s_qcop->quota_sync) | |
804 | return -ENOSYS; | |
805 | return sb->s_qcop->quota_sync(sb, type); | |
806 | case Q_XQUOTAON: | |
807 | return quota_enable(sb, addr); | |
808 | case Q_XQUOTAOFF: | |
809 | return quota_disable(sb, addr); | |
810 | case Q_XQUOTARM: | |
811 | return quota_rmxquota(sb, addr); | |
812 | case Q_XGETQSTAT: | |
813 | return quota_getxstate(sb, type, addr); | |
814 | case Q_XGETQSTATV: | |
815 | return quota_getxstatev(sb, type, addr); | |
816 | case Q_XSETQLIM: | |
817 | return quota_setxquota(sb, type, id, addr); | |
818 | case Q_XGETQUOTA: | |
819 | return quota_getxquota(sb, type, id, addr); | |
820 | case Q_XGETNEXTQUOTA: | |
821 | return quota_getnextxquota(sb, type, id, addr); | |
822 | case Q_XQUOTASYNC: | |
823 | if (sb_rdonly(sb)) | |
824 | return -EROFS; | |
825 | /* XFS quotas are fully coherent now, making this call a noop */ | |
826 | return 0; | |
827 | default: | |
828 | return -EINVAL; | |
829 | } | |
830 | } | |
831 | ||
832 | /* Return 1 if 'cmd' will block on frozen filesystem */ | |
833 | static int quotactl_cmd_write(int cmd) | |
834 | { | |
835 | /* | |
836 | * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access | |
837 | * as dquot_acquire() may allocate space for new structure and OCFS2 | |
838 | * needs to increment on-disk use count. | |
839 | */ | |
840 | switch (cmd) { | |
841 | case Q_GETFMT: | |
842 | case Q_GETINFO: | |
843 | case Q_SYNC: | |
844 | case Q_XGETQSTAT: | |
845 | case Q_XGETQSTATV: | |
846 | case Q_XGETQUOTA: | |
847 | case Q_XGETNEXTQUOTA: | |
848 | case Q_XQUOTASYNC: | |
849 | return 0; | |
850 | } | |
851 | return 1; | |
852 | } | |
853 | ||
854 | /* Return true if quotactl command is manipulating quota on/off state */ | |
855 | static bool quotactl_cmd_onoff(int cmd) | |
856 | { | |
857 | return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) || | |
858 | (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF); | |
859 | } | |
860 | ||
861 | /* | |
862 | * look up a superblock on which quota ops will be performed | |
863 | * - use the name of a block device to find the superblock thereon | |
864 | */ | |
865 | static struct super_block *quotactl_block(const char __user *special, int cmd) | |
866 | { | |
867 | #ifdef CONFIG_BLOCK | |
868 | struct super_block *sb; | |
869 | struct filename *tmp = getname(special); | |
870 | bool excl = false, thawed = false; | |
871 | int error; | |
872 | dev_t dev; | |
873 | ||
874 | if (IS_ERR(tmp)) | |
875 | return ERR_CAST(tmp); | |
876 | error = lookup_bdev(tmp->name, &dev); | |
877 | putname(tmp); | |
878 | if (error) | |
879 | return ERR_PTR(error); | |
880 | ||
881 | if (quotactl_cmd_onoff(cmd)) { | |
882 | excl = true; | |
883 | thawed = true; | |
884 | } else if (quotactl_cmd_write(cmd)) { | |
885 | thawed = true; | |
886 | } | |
887 | ||
888 | retry: | |
889 | sb = user_get_super(dev, excl); | |
890 | if (!sb) | |
891 | return ERR_PTR(-ENODEV); | |
892 | if (thawed && sb->s_writers.frozen != SB_UNFROZEN) { | |
893 | if (excl) | |
894 | up_write(&sb->s_umount); | |
895 | else | |
896 | up_read(&sb->s_umount); | |
897 | wait_event(sb->s_writers.wait_unfrozen, | |
898 | sb->s_writers.frozen == SB_UNFROZEN); | |
899 | put_super(sb); | |
900 | goto retry; | |
901 | } | |
902 | return sb; | |
903 | ||
904 | #else | |
905 | return ERR_PTR(-ENODEV); | |
906 | #endif | |
907 | } | |
908 | ||
909 | /* | |
910 | * This is the system call interface. This communicates with | |
911 | * the user-level programs. Currently this only supports diskquota | |
912 | * calls. Maybe we need to add the process quotas etc. in the future, | |
913 | * but we probably should use rlimits for that. | |
914 | */ | |
915 | SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, | |
916 | qid_t, id, void __user *, addr) | |
917 | { | |
918 | uint cmds, type; | |
919 | struct super_block *sb = NULL; | |
920 | struct path path, *pathp = NULL; | |
921 | int ret; | |
922 | ||
923 | cmds = cmd >> SUBCMDSHIFT; | |
924 | type = cmd & SUBCMDMASK; | |
925 | ||
926 | if (type >= MAXQUOTAS) | |
927 | return -EINVAL; | |
928 | ||
929 | /* | |
930 | * As a special case Q_SYNC can be called without a specific device. | |
931 | * It will iterate all superblocks that have quota enabled and call | |
932 | * the sync action on each of them. | |
933 | */ | |
934 | if (!special) { | |
935 | if (cmds == Q_SYNC) | |
936 | return quota_sync_all(type); | |
937 | return -ENODEV; | |
938 | } | |
939 | ||
940 | /* | |
941 | * Path for quotaon has to be resolved before grabbing superblock | |
942 | * because that gets s_umount sem which is also possibly needed by path | |
943 | * resolution (think about autofs) and thus deadlocks could arise. | |
944 | */ | |
945 | if (cmds == Q_QUOTAON) { | |
946 | ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); | |
947 | if (ret) | |
948 | pathp = ERR_PTR(ret); | |
949 | else | |
950 | pathp = &path; | |
951 | } | |
952 | ||
953 | sb = quotactl_block(special, cmds); | |
954 | if (IS_ERR(sb)) { | |
955 | ret = PTR_ERR(sb); | |
956 | goto out; | |
957 | } | |
958 | ||
959 | ret = do_quotactl(sb, type, cmds, id, addr, pathp); | |
960 | ||
961 | if (!quotactl_cmd_onoff(cmds)) | |
962 | drop_super(sb); | |
963 | else | |
964 | drop_super_exclusive(sb); | |
965 | out: | |
966 | if (pathp && !IS_ERR(pathp)) | |
967 | path_put(pathp); | |
968 | return ret; | |
969 | } | |
970 | ||
971 | SYSCALL_DEFINE4(quotactl_fd, unsigned int, fd, unsigned int, cmd, | |
972 | qid_t, id, void __user *, addr) | |
973 | { | |
974 | struct super_block *sb; | |
975 | unsigned int cmds = cmd >> SUBCMDSHIFT; | |
976 | unsigned int type = cmd & SUBCMDMASK; | |
977 | struct fd f; | |
978 | int ret; | |
979 | ||
980 | f = fdget_raw(fd); | |
981 | if (!f.file) | |
982 | return -EBADF; | |
983 | ||
984 | ret = -EINVAL; | |
985 | if (type >= MAXQUOTAS) | |
986 | goto out; | |
987 | ||
988 | if (quotactl_cmd_write(cmds)) { | |
989 | ret = mnt_want_write(f.file->f_path.mnt); | |
990 | if (ret) | |
991 | goto out; | |
992 | } | |
993 | ||
994 | sb = f.file->f_path.mnt->mnt_sb; | |
995 | if (quotactl_cmd_onoff(cmds)) | |
996 | down_write(&sb->s_umount); | |
997 | else | |
998 | down_read(&sb->s_umount); | |
999 | ||
1000 | ret = do_quotactl(sb, type, cmds, id, addr, ERR_PTR(-EINVAL)); | |
1001 | ||
1002 | if (quotactl_cmd_onoff(cmds)) | |
1003 | up_write(&sb->s_umount); | |
1004 | else | |
1005 | up_read(&sb->s_umount); | |
1006 | ||
1007 | if (quotactl_cmd_write(cmds)) | |
1008 | mnt_drop_write(f.file->f_path.mnt); | |
1009 | out: | |
1010 | fdput(f); | |
1011 | return ret; | |
1012 | } |