]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - kernel/sys.c
printk: fix return value of printk.devkmsg __setup handler
[mirror_ubuntu-focal-kernel.git] / kernel / sys.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/kernel/sys.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/reboot.h>
13 #include <linux/prctl.h>
14 #include <linux/highuid.h>
15 #include <linux/fs.h>
16 #include <linux/kmod.h>
17 #include <linux/perf_event.h>
18 #include <linux/resource.h>
19 #include <linux/kernel.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
36 #include <linux/personality.h>
37 #include <linux/ptrace.h>
38 #include <linux/fs_struct.h>
39 #include <linux/file.h>
40 #include <linux/mount.h>
41 #include <linux/gfp.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/version.h>
44 #include <linux/ctype.h>
45
46 #include <linux/compat.h>
47 #include <linux/syscalls.h>
48 #include <linux/kprobes.h>
49 #include <linux/user_namespace.h>
50 #include <linux/binfmts.h>
51
52 #include <linux/sched.h>
53 #include <linux/sched/autogroup.h>
54 #include <linux/sched/loadavg.h>
55 #include <linux/sched/stat.h>
56 #include <linux/sched/mm.h>
57 #include <linux/sched/coredump.h>
58 #include <linux/sched/task.h>
59 #include <linux/sched/cputime.h>
60 #include <linux/rcupdate.h>
61 #include <linux/uidgid.h>
62 #include <linux/cred.h>
63
64 #include <linux/nospec.h>
65
66 #include <linux/kmsg_dump.h>
67 /* Move somewhere else to avoid recompiling? */
68 #include <generated/utsrelease.h>
69
70 #include <linux/uaccess.h>
71 #include <asm/io.h>
72 #include <asm/unistd.h>
73
74 #include "uid16.h"
75
76 #ifndef SET_UNALIGN_CTL
77 # define SET_UNALIGN_CTL(a, b) (-EINVAL)
78 #endif
79 #ifndef GET_UNALIGN_CTL
80 # define GET_UNALIGN_CTL(a, b) (-EINVAL)
81 #endif
82 #ifndef SET_FPEMU_CTL
83 # define SET_FPEMU_CTL(a, b) (-EINVAL)
84 #endif
85 #ifndef GET_FPEMU_CTL
86 # define GET_FPEMU_CTL(a, b) (-EINVAL)
87 #endif
88 #ifndef SET_FPEXC_CTL
89 # define SET_FPEXC_CTL(a, b) (-EINVAL)
90 #endif
91 #ifndef GET_FPEXC_CTL
92 # define GET_FPEXC_CTL(a, b) (-EINVAL)
93 #endif
94 #ifndef GET_ENDIAN
95 # define GET_ENDIAN(a, b) (-EINVAL)
96 #endif
97 #ifndef SET_ENDIAN
98 # define SET_ENDIAN(a, b) (-EINVAL)
99 #endif
100 #ifndef GET_TSC_CTL
101 # define GET_TSC_CTL(a) (-EINVAL)
102 #endif
103 #ifndef SET_TSC_CTL
104 # define SET_TSC_CTL(a) (-EINVAL)
105 #endif
106 #ifndef GET_FP_MODE
107 # define GET_FP_MODE(a) (-EINVAL)
108 #endif
109 #ifndef SET_FP_MODE
110 # define SET_FP_MODE(a,b) (-EINVAL)
111 #endif
112 #ifndef SVE_SET_VL
113 # define SVE_SET_VL(a) (-EINVAL)
114 #endif
115 #ifndef SVE_GET_VL
116 # define SVE_GET_VL() (-EINVAL)
117 #endif
118 #ifndef PAC_RESET_KEYS
119 # define PAC_RESET_KEYS(a, b) (-EINVAL)
120 #endif
121 #ifndef SET_TAGGED_ADDR_CTRL
122 # define SET_TAGGED_ADDR_CTRL(a) (-EINVAL)
123 #endif
124 #ifndef GET_TAGGED_ADDR_CTRL
125 # define GET_TAGGED_ADDR_CTRL() (-EINVAL)
126 #endif
127
128 /*
129 * this is where the system-wide overflow UID and GID are defined, for
130 * architectures that now have 32-bit UID/GID but didn't in the past
131 */
132
133 int overflowuid = DEFAULT_OVERFLOWUID;
134 int overflowgid = DEFAULT_OVERFLOWGID;
135
136 EXPORT_SYMBOL(overflowuid);
137 EXPORT_SYMBOL(overflowgid);
138
139 /*
140 * the same as above, but for filesystems which can only store a 16-bit
141 * UID and GID. as such, this is needed on all architectures
142 */
143
144 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
145 int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
146
147 EXPORT_SYMBOL(fs_overflowuid);
148 EXPORT_SYMBOL(fs_overflowgid);
149
150 /*
151 * Returns true if current's euid is same as p's uid or euid,
152 * or has CAP_SYS_NICE to p's user_ns.
153 *
154 * Called with rcu_read_lock, creds are safe
155 */
156 static bool set_one_prio_perm(struct task_struct *p)
157 {
158 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
159
160 if (uid_eq(pcred->uid, cred->euid) ||
161 uid_eq(pcred->euid, cred->euid))
162 return true;
163 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
164 return true;
165 return false;
166 }
167
168 /*
169 * set the priority of a task
170 * - the caller must hold the RCU read lock
171 */
172 static int set_one_prio(struct task_struct *p, int niceval, int error)
173 {
174 int no_nice;
175
176 if (!set_one_prio_perm(p)) {
177 error = -EPERM;
178 goto out;
179 }
180 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
181 error = -EACCES;
182 goto out;
183 }
184 no_nice = security_task_setnice(p, niceval);
185 if (no_nice) {
186 error = no_nice;
187 goto out;
188 }
189 if (error == -ESRCH)
190 error = 0;
191 set_user_nice(p, niceval);
192 out:
193 return error;
194 }
195
196 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
197 {
198 struct task_struct *g, *p;
199 struct user_struct *user;
200 const struct cred *cred = current_cred();
201 int error = -EINVAL;
202 struct pid *pgrp;
203 kuid_t uid;
204
205 if (which > PRIO_USER || which < PRIO_PROCESS)
206 goto out;
207
208 /* normalize: avoid signed division (rounding problems) */
209 error = -ESRCH;
210 if (niceval < MIN_NICE)
211 niceval = MIN_NICE;
212 if (niceval > MAX_NICE)
213 niceval = MAX_NICE;
214
215 rcu_read_lock();
216 read_lock(&tasklist_lock);
217 switch (which) {
218 case PRIO_PROCESS:
219 if (who)
220 p = find_task_by_vpid(who);
221 else
222 p = current;
223 if (p)
224 error = set_one_prio(p, niceval, error);
225 break;
226 case PRIO_PGRP:
227 if (who)
228 pgrp = find_vpid(who);
229 else
230 pgrp = task_pgrp(current);
231 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
232 error = set_one_prio(p, niceval, error);
233 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
234 break;
235 case PRIO_USER:
236 uid = make_kuid(cred->user_ns, who);
237 user = cred->user;
238 if (!who)
239 uid = cred->uid;
240 else if (!uid_eq(uid, cred->uid)) {
241 user = find_user(uid);
242 if (!user)
243 goto out_unlock; /* No processes for this user */
244 }
245 do_each_thread(g, p) {
246 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
247 error = set_one_prio(p, niceval, error);
248 } while_each_thread(g, p);
249 if (!uid_eq(uid, cred->uid))
250 free_uid(user); /* For find_user() */
251 break;
252 }
253 out_unlock:
254 read_unlock(&tasklist_lock);
255 rcu_read_unlock();
256 out:
257 return error;
258 }
259
260 /*
261 * Ugh. To avoid negative return values, "getpriority()" will
262 * not return the normal nice-value, but a negated value that
263 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
264 * to stay compatible.
265 */
266 SYSCALL_DEFINE2(getpriority, int, which, int, who)
267 {
268 struct task_struct *g, *p;
269 struct user_struct *user;
270 const struct cred *cred = current_cred();
271 long niceval, retval = -ESRCH;
272 struct pid *pgrp;
273 kuid_t uid;
274
275 if (which > PRIO_USER || which < PRIO_PROCESS)
276 return -EINVAL;
277
278 rcu_read_lock();
279 read_lock(&tasklist_lock);
280 switch (which) {
281 case PRIO_PROCESS:
282 if (who)
283 p = find_task_by_vpid(who);
284 else
285 p = current;
286 if (p) {
287 niceval = nice_to_rlimit(task_nice(p));
288 if (niceval > retval)
289 retval = niceval;
290 }
291 break;
292 case PRIO_PGRP:
293 if (who)
294 pgrp = find_vpid(who);
295 else
296 pgrp = task_pgrp(current);
297 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
298 niceval = nice_to_rlimit(task_nice(p));
299 if (niceval > retval)
300 retval = niceval;
301 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
302 break;
303 case PRIO_USER:
304 uid = make_kuid(cred->user_ns, who);
305 user = cred->user;
306 if (!who)
307 uid = cred->uid;
308 else if (!uid_eq(uid, cred->uid)) {
309 user = find_user(uid);
310 if (!user)
311 goto out_unlock; /* No processes for this user */
312 }
313 do_each_thread(g, p) {
314 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
315 niceval = nice_to_rlimit(task_nice(p));
316 if (niceval > retval)
317 retval = niceval;
318 }
319 } while_each_thread(g, p);
320 if (!uid_eq(uid, cred->uid))
321 free_uid(user); /* for find_user() */
322 break;
323 }
324 out_unlock:
325 read_unlock(&tasklist_lock);
326 rcu_read_unlock();
327
328 return retval;
329 }
330
331 /*
332 * Unprivileged users may change the real gid to the effective gid
333 * or vice versa. (BSD-style)
334 *
335 * If you set the real gid at all, or set the effective gid to a value not
336 * equal to the real gid, then the saved gid is set to the new effective gid.
337 *
338 * This makes it possible for a setgid program to completely drop its
339 * privileges, which is often a useful assertion to make when you are doing
340 * a security audit over a program.
341 *
342 * The general idea is that a program which uses just setregid() will be
343 * 100% compatible with BSD. A program which uses just setgid() will be
344 * 100% compatible with POSIX with saved IDs.
345 *
346 * SMP: There are not races, the GIDs are checked only by filesystem
347 * operations (as far as semantic preservation is concerned).
348 */
349 #ifdef CONFIG_MULTIUSER
350 long __sys_setregid(gid_t rgid, gid_t egid)
351 {
352 struct user_namespace *ns = current_user_ns();
353 const struct cred *old;
354 struct cred *new;
355 int retval;
356 kgid_t krgid, kegid;
357
358 krgid = make_kgid(ns, rgid);
359 kegid = make_kgid(ns, egid);
360
361 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
362 return -EINVAL;
363 if ((egid != (gid_t) -1) && !gid_valid(kegid))
364 return -EINVAL;
365
366 new = prepare_creds();
367 if (!new)
368 return -ENOMEM;
369 old = current_cred();
370
371 retval = -EPERM;
372 if (rgid != (gid_t) -1) {
373 if (gid_eq(old->gid, krgid) ||
374 gid_eq(old->egid, krgid) ||
375 ns_capable(old->user_ns, CAP_SETGID))
376 new->gid = krgid;
377 else
378 goto error;
379 }
380 if (egid != (gid_t) -1) {
381 if (gid_eq(old->gid, kegid) ||
382 gid_eq(old->egid, kegid) ||
383 gid_eq(old->sgid, kegid) ||
384 ns_capable(old->user_ns, CAP_SETGID))
385 new->egid = kegid;
386 else
387 goto error;
388 }
389
390 if (rgid != (gid_t) -1 ||
391 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
392 new->sgid = new->egid;
393 new->fsgid = new->egid;
394
395 return commit_creds(new);
396
397 error:
398 abort_creds(new);
399 return retval;
400 }
401
402 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
403 {
404 return __sys_setregid(rgid, egid);
405 }
406
407 /*
408 * setgid() is implemented like SysV w/ SAVED_IDS
409 *
410 * SMP: Same implicit races as above.
411 */
412 long __sys_setgid(gid_t gid)
413 {
414 struct user_namespace *ns = current_user_ns();
415 const struct cred *old;
416 struct cred *new;
417 int retval;
418 kgid_t kgid;
419
420 kgid = make_kgid(ns, gid);
421 if (!gid_valid(kgid))
422 return -EINVAL;
423
424 new = prepare_creds();
425 if (!new)
426 return -ENOMEM;
427 old = current_cred();
428
429 retval = -EPERM;
430 if (ns_capable(old->user_ns, CAP_SETGID))
431 new->gid = new->egid = new->sgid = new->fsgid = kgid;
432 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
433 new->egid = new->fsgid = kgid;
434 else
435 goto error;
436
437 return commit_creds(new);
438
439 error:
440 abort_creds(new);
441 return retval;
442 }
443
444 SYSCALL_DEFINE1(setgid, gid_t, gid)
445 {
446 return __sys_setgid(gid);
447 }
448
449 /*
450 * change the user struct in a credentials set to match the new UID
451 */
452 static int set_user(struct cred *new)
453 {
454 struct user_struct *new_user;
455
456 new_user = alloc_uid(new->uid);
457 if (!new_user)
458 return -EAGAIN;
459
460 /*
461 * We don't fail in case of NPROC limit excess here because too many
462 * poorly written programs don't check set*uid() return code, assuming
463 * it never fails if called by root. We may still enforce NPROC limit
464 * for programs doing set*uid()+execve() by harmlessly deferring the
465 * failure to the execve() stage.
466 */
467 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
468 new_user != INIT_USER)
469 current->flags |= PF_NPROC_EXCEEDED;
470 else
471 current->flags &= ~PF_NPROC_EXCEEDED;
472
473 free_uid(new->user);
474 new->user = new_user;
475 return 0;
476 }
477
478 /*
479 * Unprivileged users may change the real uid to the effective uid
480 * or vice versa. (BSD-style)
481 *
482 * If you set the real uid at all, or set the effective uid to a value not
483 * equal to the real uid, then the saved uid is set to the new effective uid.
484 *
485 * This makes it possible for a setuid program to completely drop its
486 * privileges, which is often a useful assertion to make when you are doing
487 * a security audit over a program.
488 *
489 * The general idea is that a program which uses just setreuid() will be
490 * 100% compatible with BSD. A program which uses just setuid() will be
491 * 100% compatible with POSIX with saved IDs.
492 */
493 long __sys_setreuid(uid_t ruid, uid_t euid)
494 {
495 struct user_namespace *ns = current_user_ns();
496 const struct cred *old;
497 struct cred *new;
498 int retval;
499 kuid_t kruid, keuid;
500
501 kruid = make_kuid(ns, ruid);
502 keuid = make_kuid(ns, euid);
503
504 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
505 return -EINVAL;
506 if ((euid != (uid_t) -1) && !uid_valid(keuid))
507 return -EINVAL;
508
509 new = prepare_creds();
510 if (!new)
511 return -ENOMEM;
512 old = current_cred();
513
514 retval = -EPERM;
515 if (ruid != (uid_t) -1) {
516 new->uid = kruid;
517 if (!uid_eq(old->uid, kruid) &&
518 !uid_eq(old->euid, kruid) &&
519 !ns_capable_setid(old->user_ns, CAP_SETUID))
520 goto error;
521 }
522
523 if (euid != (uid_t) -1) {
524 new->euid = keuid;
525 if (!uid_eq(old->uid, keuid) &&
526 !uid_eq(old->euid, keuid) &&
527 !uid_eq(old->suid, keuid) &&
528 !ns_capable_setid(old->user_ns, CAP_SETUID))
529 goto error;
530 }
531
532 if (!uid_eq(new->uid, old->uid)) {
533 retval = set_user(new);
534 if (retval < 0)
535 goto error;
536 }
537 if (ruid != (uid_t) -1 ||
538 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
539 new->suid = new->euid;
540 new->fsuid = new->euid;
541
542 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
543 if (retval < 0)
544 goto error;
545
546 return commit_creds(new);
547
548 error:
549 abort_creds(new);
550 return retval;
551 }
552
553 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
554 {
555 return __sys_setreuid(ruid, euid);
556 }
557
558 /*
559 * setuid() is implemented like SysV with SAVED_IDS
560 *
561 * Note that SAVED_ID's is deficient in that a setuid root program
562 * like sendmail, for example, cannot set its uid to be a normal
563 * user and then switch back, because if you're root, setuid() sets
564 * the saved uid too. If you don't like this, blame the bright people
565 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
566 * will allow a root program to temporarily drop privileges and be able to
567 * regain them by swapping the real and effective uid.
568 */
569 long __sys_setuid(uid_t uid)
570 {
571 struct user_namespace *ns = current_user_ns();
572 const struct cred *old;
573 struct cred *new;
574 int retval;
575 kuid_t kuid;
576
577 kuid = make_kuid(ns, uid);
578 if (!uid_valid(kuid))
579 return -EINVAL;
580
581 new = prepare_creds();
582 if (!new)
583 return -ENOMEM;
584 old = current_cred();
585
586 retval = -EPERM;
587 if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
588 new->suid = new->uid = kuid;
589 if (!uid_eq(kuid, old->uid)) {
590 retval = set_user(new);
591 if (retval < 0)
592 goto error;
593 }
594 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
595 goto error;
596 }
597
598 new->fsuid = new->euid = kuid;
599
600 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
601 if (retval < 0)
602 goto error;
603
604 return commit_creds(new);
605
606 error:
607 abort_creds(new);
608 return retval;
609 }
610
611 SYSCALL_DEFINE1(setuid, uid_t, uid)
612 {
613 return __sys_setuid(uid);
614 }
615
616
617 /*
618 * This function implements a generic ability to update ruid, euid,
619 * and suid. This allows you to implement the 4.4 compatible seteuid().
620 */
621 long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
622 {
623 struct user_namespace *ns = current_user_ns();
624 const struct cred *old;
625 struct cred *new;
626 int retval;
627 kuid_t kruid, keuid, ksuid;
628
629 kruid = make_kuid(ns, ruid);
630 keuid = make_kuid(ns, euid);
631 ksuid = make_kuid(ns, suid);
632
633 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
634 return -EINVAL;
635
636 if ((euid != (uid_t) -1) && !uid_valid(keuid))
637 return -EINVAL;
638
639 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
640 return -EINVAL;
641
642 new = prepare_creds();
643 if (!new)
644 return -ENOMEM;
645
646 old = current_cred();
647
648 retval = -EPERM;
649 if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
650 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
651 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
652 goto error;
653 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
654 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
655 goto error;
656 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
657 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
658 goto error;
659 }
660
661 if (ruid != (uid_t) -1) {
662 new->uid = kruid;
663 if (!uid_eq(kruid, old->uid)) {
664 retval = set_user(new);
665 if (retval < 0)
666 goto error;
667 }
668 }
669 if (euid != (uid_t) -1)
670 new->euid = keuid;
671 if (suid != (uid_t) -1)
672 new->suid = ksuid;
673 new->fsuid = new->euid;
674
675 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
676 if (retval < 0)
677 goto error;
678
679 return commit_creds(new);
680
681 error:
682 abort_creds(new);
683 return retval;
684 }
685
686 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
687 {
688 return __sys_setresuid(ruid, euid, suid);
689 }
690
691 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
692 {
693 const struct cred *cred = current_cred();
694 int retval;
695 uid_t ruid, euid, suid;
696
697 ruid = from_kuid_munged(cred->user_ns, cred->uid);
698 euid = from_kuid_munged(cred->user_ns, cred->euid);
699 suid = from_kuid_munged(cred->user_ns, cred->suid);
700
701 retval = put_user(ruid, ruidp);
702 if (!retval) {
703 retval = put_user(euid, euidp);
704 if (!retval)
705 return put_user(suid, suidp);
706 }
707 return retval;
708 }
709
710 /*
711 * Same as above, but for rgid, egid, sgid.
712 */
713 long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
714 {
715 struct user_namespace *ns = current_user_ns();
716 const struct cred *old;
717 struct cred *new;
718 int retval;
719 kgid_t krgid, kegid, ksgid;
720
721 krgid = make_kgid(ns, rgid);
722 kegid = make_kgid(ns, egid);
723 ksgid = make_kgid(ns, sgid);
724
725 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
726 return -EINVAL;
727 if ((egid != (gid_t) -1) && !gid_valid(kegid))
728 return -EINVAL;
729 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
730 return -EINVAL;
731
732 new = prepare_creds();
733 if (!new)
734 return -ENOMEM;
735 old = current_cred();
736
737 retval = -EPERM;
738 if (!ns_capable(old->user_ns, CAP_SETGID)) {
739 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
740 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
741 goto error;
742 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
743 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
744 goto error;
745 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
746 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
747 goto error;
748 }
749
750 if (rgid != (gid_t) -1)
751 new->gid = krgid;
752 if (egid != (gid_t) -1)
753 new->egid = kegid;
754 if (sgid != (gid_t) -1)
755 new->sgid = ksgid;
756 new->fsgid = new->egid;
757
758 return commit_creds(new);
759
760 error:
761 abort_creds(new);
762 return retval;
763 }
764
765 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
766 {
767 return __sys_setresgid(rgid, egid, sgid);
768 }
769
770 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
771 {
772 const struct cred *cred = current_cred();
773 int retval;
774 gid_t rgid, egid, sgid;
775
776 rgid = from_kgid_munged(cred->user_ns, cred->gid);
777 egid = from_kgid_munged(cred->user_ns, cred->egid);
778 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
779
780 retval = put_user(rgid, rgidp);
781 if (!retval) {
782 retval = put_user(egid, egidp);
783 if (!retval)
784 retval = put_user(sgid, sgidp);
785 }
786
787 return retval;
788 }
789
790
791 /*
792 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
793 * is used for "access()" and for the NFS daemon (letting nfsd stay at
794 * whatever uid it wants to). It normally shadows "euid", except when
795 * explicitly set by setfsuid() or for access..
796 */
797 long __sys_setfsuid(uid_t uid)
798 {
799 const struct cred *old;
800 struct cred *new;
801 uid_t old_fsuid;
802 kuid_t kuid;
803
804 old = current_cred();
805 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
806
807 kuid = make_kuid(old->user_ns, uid);
808 if (!uid_valid(kuid))
809 return old_fsuid;
810
811 new = prepare_creds();
812 if (!new)
813 return old_fsuid;
814
815 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
816 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
817 ns_capable_setid(old->user_ns, CAP_SETUID)) {
818 if (!uid_eq(kuid, old->fsuid)) {
819 new->fsuid = kuid;
820 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
821 goto change_okay;
822 }
823 }
824
825 abort_creds(new);
826 return old_fsuid;
827
828 change_okay:
829 commit_creds(new);
830 return old_fsuid;
831 }
832
833 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
834 {
835 return __sys_setfsuid(uid);
836 }
837
838 /*
839 * Samma på svenska..
840 */
841 long __sys_setfsgid(gid_t gid)
842 {
843 const struct cred *old;
844 struct cred *new;
845 gid_t old_fsgid;
846 kgid_t kgid;
847
848 old = current_cred();
849 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
850
851 kgid = make_kgid(old->user_ns, gid);
852 if (!gid_valid(kgid))
853 return old_fsgid;
854
855 new = prepare_creds();
856 if (!new)
857 return old_fsgid;
858
859 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
860 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
861 ns_capable(old->user_ns, CAP_SETGID)) {
862 if (!gid_eq(kgid, old->fsgid)) {
863 new->fsgid = kgid;
864 goto change_okay;
865 }
866 }
867
868 abort_creds(new);
869 return old_fsgid;
870
871 change_okay:
872 commit_creds(new);
873 return old_fsgid;
874 }
875
876 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
877 {
878 return __sys_setfsgid(gid);
879 }
880 #endif /* CONFIG_MULTIUSER */
881
882 /**
883 * sys_getpid - return the thread group id of the current process
884 *
885 * Note, despite the name, this returns the tgid not the pid. The tgid and
886 * the pid are identical unless CLONE_THREAD was specified on clone() in
887 * which case the tgid is the same in all threads of the same group.
888 *
889 * This is SMP safe as current->tgid does not change.
890 */
891 SYSCALL_DEFINE0(getpid)
892 {
893 return task_tgid_vnr(current);
894 }
895
896 /* Thread ID - the internal kernel "pid" */
897 SYSCALL_DEFINE0(gettid)
898 {
899 return task_pid_vnr(current);
900 }
901
902 /*
903 * Accessing ->real_parent is not SMP-safe, it could
904 * change from under us. However, we can use a stale
905 * value of ->real_parent under rcu_read_lock(), see
906 * release_task()->call_rcu(delayed_put_task_struct).
907 */
908 SYSCALL_DEFINE0(getppid)
909 {
910 int pid;
911
912 rcu_read_lock();
913 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
914 rcu_read_unlock();
915
916 return pid;
917 }
918
919 SYSCALL_DEFINE0(getuid)
920 {
921 /* Only we change this so SMP safe */
922 return from_kuid_munged(current_user_ns(), current_uid());
923 }
924
925 SYSCALL_DEFINE0(geteuid)
926 {
927 /* Only we change this so SMP safe */
928 return from_kuid_munged(current_user_ns(), current_euid());
929 }
930
931 SYSCALL_DEFINE0(getgid)
932 {
933 /* Only we change this so SMP safe */
934 return from_kgid_munged(current_user_ns(), current_gid());
935 }
936
937 SYSCALL_DEFINE0(getegid)
938 {
939 /* Only we change this so SMP safe */
940 return from_kgid_munged(current_user_ns(), current_egid());
941 }
942
943 static void do_sys_times(struct tms *tms)
944 {
945 u64 tgutime, tgstime, cutime, cstime;
946
947 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
948 cutime = current->signal->cutime;
949 cstime = current->signal->cstime;
950 tms->tms_utime = nsec_to_clock_t(tgutime);
951 tms->tms_stime = nsec_to_clock_t(tgstime);
952 tms->tms_cutime = nsec_to_clock_t(cutime);
953 tms->tms_cstime = nsec_to_clock_t(cstime);
954 }
955
956 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
957 {
958 if (tbuf) {
959 struct tms tmp;
960
961 do_sys_times(&tmp);
962 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
963 return -EFAULT;
964 }
965 force_successful_syscall_return();
966 return (long) jiffies_64_to_clock_t(get_jiffies_64());
967 }
968
969 #ifdef CONFIG_COMPAT
970 static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
971 {
972 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
973 }
974
975 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
976 {
977 if (tbuf) {
978 struct tms tms;
979 struct compat_tms tmp;
980
981 do_sys_times(&tms);
982 /* Convert our struct tms to the compat version. */
983 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
984 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
985 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
986 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
987 if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
988 return -EFAULT;
989 }
990 force_successful_syscall_return();
991 return compat_jiffies_to_clock_t(jiffies);
992 }
993 #endif
994
995 /*
996 * This needs some heavy checking ...
997 * I just haven't the stomach for it. I also don't fully
998 * understand sessions/pgrp etc. Let somebody who does explain it.
999 *
1000 * OK, I think I have the protection semantics right.... this is really
1001 * only important on a multi-user system anyway, to make sure one user
1002 * can't send a signal to a process owned by another. -TYT, 12/12/91
1003 *
1004 * !PF_FORKNOEXEC check to conform completely to POSIX.
1005 */
1006 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1007 {
1008 struct task_struct *p;
1009 struct task_struct *group_leader = current->group_leader;
1010 struct pid *pgrp;
1011 int err;
1012
1013 if (!pid)
1014 pid = task_pid_vnr(group_leader);
1015 if (!pgid)
1016 pgid = pid;
1017 if (pgid < 0)
1018 return -EINVAL;
1019 rcu_read_lock();
1020
1021 /* From this point forward we keep holding onto the tasklist lock
1022 * so that our parent does not change from under us. -DaveM
1023 */
1024 write_lock_irq(&tasklist_lock);
1025
1026 err = -ESRCH;
1027 p = find_task_by_vpid(pid);
1028 if (!p)
1029 goto out;
1030
1031 err = -EINVAL;
1032 if (!thread_group_leader(p))
1033 goto out;
1034
1035 if (same_thread_group(p->real_parent, group_leader)) {
1036 err = -EPERM;
1037 if (task_session(p) != task_session(group_leader))
1038 goto out;
1039 err = -EACCES;
1040 if (!(p->flags & PF_FORKNOEXEC))
1041 goto out;
1042 } else {
1043 err = -ESRCH;
1044 if (p != group_leader)
1045 goto out;
1046 }
1047
1048 err = -EPERM;
1049 if (p->signal->leader)
1050 goto out;
1051
1052 pgrp = task_pid(p);
1053 if (pgid != pid) {
1054 struct task_struct *g;
1055
1056 pgrp = find_vpid(pgid);
1057 g = pid_task(pgrp, PIDTYPE_PGID);
1058 if (!g || task_session(g) != task_session(group_leader))
1059 goto out;
1060 }
1061
1062 err = security_task_setpgid(p, pgid);
1063 if (err)
1064 goto out;
1065
1066 if (task_pgrp(p) != pgrp)
1067 change_pid(p, PIDTYPE_PGID, pgrp);
1068
1069 err = 0;
1070 out:
1071 /* All paths lead to here, thus we are safe. -DaveM */
1072 write_unlock_irq(&tasklist_lock);
1073 rcu_read_unlock();
1074 return err;
1075 }
1076
1077 static int do_getpgid(pid_t pid)
1078 {
1079 struct task_struct *p;
1080 struct pid *grp;
1081 int retval;
1082
1083 rcu_read_lock();
1084 if (!pid)
1085 grp = task_pgrp(current);
1086 else {
1087 retval = -ESRCH;
1088 p = find_task_by_vpid(pid);
1089 if (!p)
1090 goto out;
1091 grp = task_pgrp(p);
1092 if (!grp)
1093 goto out;
1094
1095 retval = security_task_getpgid(p);
1096 if (retval)
1097 goto out;
1098 }
1099 retval = pid_vnr(grp);
1100 out:
1101 rcu_read_unlock();
1102 return retval;
1103 }
1104
1105 SYSCALL_DEFINE1(getpgid, pid_t, pid)
1106 {
1107 return do_getpgid(pid);
1108 }
1109
1110 #ifdef __ARCH_WANT_SYS_GETPGRP
1111
1112 SYSCALL_DEFINE0(getpgrp)
1113 {
1114 return do_getpgid(0);
1115 }
1116
1117 #endif
1118
1119 SYSCALL_DEFINE1(getsid, pid_t, pid)
1120 {
1121 struct task_struct *p;
1122 struct pid *sid;
1123 int retval;
1124
1125 rcu_read_lock();
1126 if (!pid)
1127 sid = task_session(current);
1128 else {
1129 retval = -ESRCH;
1130 p = find_task_by_vpid(pid);
1131 if (!p)
1132 goto out;
1133 sid = task_session(p);
1134 if (!sid)
1135 goto out;
1136
1137 retval = security_task_getsid(p);
1138 if (retval)
1139 goto out;
1140 }
1141 retval = pid_vnr(sid);
1142 out:
1143 rcu_read_unlock();
1144 return retval;
1145 }
1146
1147 static void set_special_pids(struct pid *pid)
1148 {
1149 struct task_struct *curr = current->group_leader;
1150
1151 if (task_session(curr) != pid)
1152 change_pid(curr, PIDTYPE_SID, pid);
1153
1154 if (task_pgrp(curr) != pid)
1155 change_pid(curr, PIDTYPE_PGID, pid);
1156 }
1157
1158 int ksys_setsid(void)
1159 {
1160 struct task_struct *group_leader = current->group_leader;
1161 struct pid *sid = task_pid(group_leader);
1162 pid_t session = pid_vnr(sid);
1163 int err = -EPERM;
1164
1165 write_lock_irq(&tasklist_lock);
1166 /* Fail if I am already a session leader */
1167 if (group_leader->signal->leader)
1168 goto out;
1169
1170 /* Fail if a process group id already exists that equals the
1171 * proposed session id.
1172 */
1173 if (pid_task(sid, PIDTYPE_PGID))
1174 goto out;
1175
1176 group_leader->signal->leader = 1;
1177 set_special_pids(sid);
1178
1179 proc_clear_tty(group_leader);
1180
1181 err = session;
1182 out:
1183 write_unlock_irq(&tasklist_lock);
1184 if (err > 0) {
1185 proc_sid_connector(group_leader);
1186 sched_autogroup_create_attach(group_leader);
1187 }
1188 return err;
1189 }
1190
1191 SYSCALL_DEFINE0(setsid)
1192 {
1193 return ksys_setsid();
1194 }
1195
1196 DECLARE_RWSEM(uts_sem);
1197
1198 #ifdef COMPAT_UTS_MACHINE
1199 static char compat_uts_machine[__OLD_UTS_LEN+1] = COMPAT_UTS_MACHINE;
1200
1201 static int __init parse_compat_uts_machine(char *arg)
1202 {
1203 strncpy(compat_uts_machine, arg, __OLD_UTS_LEN);
1204 compat_uts_machine[__OLD_UTS_LEN] = 0;
1205 return 0;
1206 }
1207 early_param("compat_uts_machine", parse_compat_uts_machine);
1208
1209 #undef COMPAT_UTS_MACHINE
1210 #define COMPAT_UTS_MACHINE compat_uts_machine
1211 #endif
1212
1213 #ifdef COMPAT_UTS_MACHINE
1214 #define override_architecture(name) \
1215 (personality(current->personality) == PER_LINUX32 && \
1216 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1217 sizeof(COMPAT_UTS_MACHINE)))
1218 #else
1219 #define override_architecture(name) 0
1220 #endif
1221
1222 /*
1223 * Work around broken programs that cannot handle "Linux 3.0".
1224 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1225 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1226 * 2.6.60.
1227 */
1228 static int override_release(char __user *release, size_t len)
1229 {
1230 int ret = 0;
1231
1232 if (current->personality & UNAME26) {
1233 const char *rest = UTS_RELEASE;
1234 char buf[65] = { 0 };
1235 int ndots = 0;
1236 unsigned v;
1237 size_t copy;
1238
1239 while (*rest) {
1240 if (*rest == '.' && ++ndots >= 3)
1241 break;
1242 if (!isdigit(*rest) && *rest != '.')
1243 break;
1244 rest++;
1245 }
1246 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1247 copy = clamp_t(size_t, len, 1, sizeof(buf));
1248 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1249 ret = copy_to_user(release, buf, copy + 1);
1250 }
1251 return ret;
1252 }
1253
1254 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1255 {
1256 struct new_utsname tmp;
1257
1258 down_read(&uts_sem);
1259 memcpy(&tmp, utsname(), sizeof(tmp));
1260 up_read(&uts_sem);
1261 if (copy_to_user(name, &tmp, sizeof(tmp)))
1262 return -EFAULT;
1263
1264 if (override_release(name->release, sizeof(name->release)))
1265 return -EFAULT;
1266 if (override_architecture(name))
1267 return -EFAULT;
1268 return 0;
1269 }
1270
1271 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1272 /*
1273 * Old cruft
1274 */
1275 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1276 {
1277 struct old_utsname tmp;
1278
1279 if (!name)
1280 return -EFAULT;
1281
1282 down_read(&uts_sem);
1283 memcpy(&tmp, utsname(), sizeof(tmp));
1284 up_read(&uts_sem);
1285 if (copy_to_user(name, &tmp, sizeof(tmp)))
1286 return -EFAULT;
1287
1288 if (override_release(name->release, sizeof(name->release)))
1289 return -EFAULT;
1290 if (override_architecture(name))
1291 return -EFAULT;
1292 return 0;
1293 }
1294
1295 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1296 {
1297 struct oldold_utsname tmp;
1298
1299 if (!name)
1300 return -EFAULT;
1301
1302 memset(&tmp, 0, sizeof(tmp));
1303
1304 down_read(&uts_sem);
1305 memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1306 memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1307 memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1308 memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1309 memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1310 up_read(&uts_sem);
1311 if (copy_to_user(name, &tmp, sizeof(tmp)))
1312 return -EFAULT;
1313
1314 if (override_architecture(name))
1315 return -EFAULT;
1316 if (override_release(name->release, sizeof(name->release)))
1317 return -EFAULT;
1318 return 0;
1319 }
1320 #endif
1321
1322 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1323 {
1324 int errno;
1325 char tmp[__NEW_UTS_LEN];
1326
1327 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1328 return -EPERM;
1329
1330 if (len < 0 || len > __NEW_UTS_LEN)
1331 return -EINVAL;
1332 errno = -EFAULT;
1333 if (!copy_from_user(tmp, name, len)) {
1334 struct new_utsname *u;
1335
1336 down_write(&uts_sem);
1337 u = utsname();
1338 memcpy(u->nodename, tmp, len);
1339 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1340 errno = 0;
1341 uts_proc_notify(UTS_PROC_HOSTNAME);
1342 up_write(&uts_sem);
1343 }
1344 return errno;
1345 }
1346
1347 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1348
1349 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1350 {
1351 int i;
1352 struct new_utsname *u;
1353 char tmp[__NEW_UTS_LEN + 1];
1354
1355 if (len < 0)
1356 return -EINVAL;
1357 down_read(&uts_sem);
1358 u = utsname();
1359 i = 1 + strlen(u->nodename);
1360 if (i > len)
1361 i = len;
1362 memcpy(tmp, u->nodename, i);
1363 up_read(&uts_sem);
1364 if (copy_to_user(name, tmp, i))
1365 return -EFAULT;
1366 return 0;
1367 }
1368
1369 #endif
1370
1371 /*
1372 * Only setdomainname; getdomainname can be implemented by calling
1373 * uname()
1374 */
1375 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1376 {
1377 int errno;
1378 char tmp[__NEW_UTS_LEN];
1379
1380 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1381 return -EPERM;
1382 if (len < 0 || len > __NEW_UTS_LEN)
1383 return -EINVAL;
1384
1385 errno = -EFAULT;
1386 if (!copy_from_user(tmp, name, len)) {
1387 struct new_utsname *u;
1388
1389 down_write(&uts_sem);
1390 u = utsname();
1391 memcpy(u->domainname, tmp, len);
1392 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1393 errno = 0;
1394 uts_proc_notify(UTS_PROC_DOMAINNAME);
1395 up_write(&uts_sem);
1396 }
1397 return errno;
1398 }
1399
1400 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1401 {
1402 struct rlimit value;
1403 int ret;
1404
1405 ret = do_prlimit(current, resource, NULL, &value);
1406 if (!ret)
1407 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1408
1409 return ret;
1410 }
1411
1412 #ifdef CONFIG_COMPAT
1413
1414 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1415 struct compat_rlimit __user *, rlim)
1416 {
1417 struct rlimit r;
1418 struct compat_rlimit r32;
1419
1420 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1421 return -EFAULT;
1422
1423 if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1424 r.rlim_cur = RLIM_INFINITY;
1425 else
1426 r.rlim_cur = r32.rlim_cur;
1427 if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1428 r.rlim_max = RLIM_INFINITY;
1429 else
1430 r.rlim_max = r32.rlim_max;
1431 return do_prlimit(current, resource, &r, NULL);
1432 }
1433
1434 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1435 struct compat_rlimit __user *, rlim)
1436 {
1437 struct rlimit r;
1438 int ret;
1439
1440 ret = do_prlimit(current, resource, NULL, &r);
1441 if (!ret) {
1442 struct compat_rlimit r32;
1443 if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1444 r32.rlim_cur = COMPAT_RLIM_INFINITY;
1445 else
1446 r32.rlim_cur = r.rlim_cur;
1447 if (r.rlim_max > COMPAT_RLIM_INFINITY)
1448 r32.rlim_max = COMPAT_RLIM_INFINITY;
1449 else
1450 r32.rlim_max = r.rlim_max;
1451
1452 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1453 return -EFAULT;
1454 }
1455 return ret;
1456 }
1457
1458 #endif
1459
1460 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1461
1462 /*
1463 * Back compatibility for getrlimit. Needed for some apps.
1464 */
1465 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1466 struct rlimit __user *, rlim)
1467 {
1468 struct rlimit x;
1469 if (resource >= RLIM_NLIMITS)
1470 return -EINVAL;
1471
1472 resource = array_index_nospec(resource, RLIM_NLIMITS);
1473 task_lock(current->group_leader);
1474 x = current->signal->rlim[resource];
1475 task_unlock(current->group_leader);
1476 if (x.rlim_cur > 0x7FFFFFFF)
1477 x.rlim_cur = 0x7FFFFFFF;
1478 if (x.rlim_max > 0x7FFFFFFF)
1479 x.rlim_max = 0x7FFFFFFF;
1480 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1481 }
1482
1483 #ifdef CONFIG_COMPAT
1484 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1485 struct compat_rlimit __user *, rlim)
1486 {
1487 struct rlimit r;
1488
1489 if (resource >= RLIM_NLIMITS)
1490 return -EINVAL;
1491
1492 resource = array_index_nospec(resource, RLIM_NLIMITS);
1493 task_lock(current->group_leader);
1494 r = current->signal->rlim[resource];
1495 task_unlock(current->group_leader);
1496 if (r.rlim_cur > 0x7FFFFFFF)
1497 r.rlim_cur = 0x7FFFFFFF;
1498 if (r.rlim_max > 0x7FFFFFFF)
1499 r.rlim_max = 0x7FFFFFFF;
1500
1501 if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1502 put_user(r.rlim_max, &rlim->rlim_max))
1503 return -EFAULT;
1504 return 0;
1505 }
1506 #endif
1507
1508 #endif
1509
1510 static inline bool rlim64_is_infinity(__u64 rlim64)
1511 {
1512 #if BITS_PER_LONG < 64
1513 return rlim64 >= ULONG_MAX;
1514 #else
1515 return rlim64 == RLIM64_INFINITY;
1516 #endif
1517 }
1518
1519 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1520 {
1521 if (rlim->rlim_cur == RLIM_INFINITY)
1522 rlim64->rlim_cur = RLIM64_INFINITY;
1523 else
1524 rlim64->rlim_cur = rlim->rlim_cur;
1525 if (rlim->rlim_max == RLIM_INFINITY)
1526 rlim64->rlim_max = RLIM64_INFINITY;
1527 else
1528 rlim64->rlim_max = rlim->rlim_max;
1529 }
1530
1531 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1532 {
1533 if (rlim64_is_infinity(rlim64->rlim_cur))
1534 rlim->rlim_cur = RLIM_INFINITY;
1535 else
1536 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1537 if (rlim64_is_infinity(rlim64->rlim_max))
1538 rlim->rlim_max = RLIM_INFINITY;
1539 else
1540 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1541 }
1542
1543 /* make sure you are allowed to change @tsk limits before calling this */
1544 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1545 struct rlimit *new_rlim, struct rlimit *old_rlim)
1546 {
1547 struct rlimit *rlim;
1548 int retval = 0;
1549
1550 if (resource >= RLIM_NLIMITS)
1551 return -EINVAL;
1552 if (new_rlim) {
1553 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1554 return -EINVAL;
1555 if (resource == RLIMIT_NOFILE &&
1556 new_rlim->rlim_max > sysctl_nr_open)
1557 return -EPERM;
1558 }
1559
1560 /* protect tsk->signal and tsk->sighand from disappearing */
1561 read_lock(&tasklist_lock);
1562 if (!tsk->sighand) {
1563 retval = -ESRCH;
1564 goto out;
1565 }
1566
1567 rlim = tsk->signal->rlim + resource;
1568 task_lock(tsk->group_leader);
1569 if (new_rlim) {
1570 /* Keep the capable check against init_user_ns until
1571 cgroups can contain all limits */
1572 if (new_rlim->rlim_max > rlim->rlim_max &&
1573 !capable(CAP_SYS_RESOURCE))
1574 retval = -EPERM;
1575 if (!retval)
1576 retval = security_task_setrlimit(tsk, resource, new_rlim);
1577 }
1578 if (!retval) {
1579 if (old_rlim)
1580 *old_rlim = *rlim;
1581 if (new_rlim)
1582 *rlim = *new_rlim;
1583 }
1584 task_unlock(tsk->group_leader);
1585
1586 /*
1587 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1588 * infite. In case of RLIM_INFINITY the posix CPU timer code
1589 * ignores the rlimit.
1590 */
1591 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1592 new_rlim->rlim_cur != RLIM_INFINITY &&
1593 IS_ENABLED(CONFIG_POSIX_TIMERS))
1594 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1595 out:
1596 read_unlock(&tasklist_lock);
1597 return retval;
1598 }
1599
1600 /* rcu lock must be held */
1601 static int check_prlimit_permission(struct task_struct *task,
1602 unsigned int flags)
1603 {
1604 const struct cred *cred = current_cred(), *tcred;
1605 bool id_match;
1606
1607 if (current == task)
1608 return 0;
1609
1610 tcred = __task_cred(task);
1611 id_match = (uid_eq(cred->uid, tcred->euid) &&
1612 uid_eq(cred->uid, tcred->suid) &&
1613 uid_eq(cred->uid, tcred->uid) &&
1614 gid_eq(cred->gid, tcred->egid) &&
1615 gid_eq(cred->gid, tcred->sgid) &&
1616 gid_eq(cred->gid, tcred->gid));
1617 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1618 return -EPERM;
1619
1620 return security_task_prlimit(cred, tcred, flags);
1621 }
1622
1623 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1624 const struct rlimit64 __user *, new_rlim,
1625 struct rlimit64 __user *, old_rlim)
1626 {
1627 struct rlimit64 old64, new64;
1628 struct rlimit old, new;
1629 struct task_struct *tsk;
1630 unsigned int checkflags = 0;
1631 int ret;
1632
1633 if (old_rlim)
1634 checkflags |= LSM_PRLIMIT_READ;
1635
1636 if (new_rlim) {
1637 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1638 return -EFAULT;
1639 rlim64_to_rlim(&new64, &new);
1640 checkflags |= LSM_PRLIMIT_WRITE;
1641 }
1642
1643 rcu_read_lock();
1644 tsk = pid ? find_task_by_vpid(pid) : current;
1645 if (!tsk) {
1646 rcu_read_unlock();
1647 return -ESRCH;
1648 }
1649 ret = check_prlimit_permission(tsk, checkflags);
1650 if (ret) {
1651 rcu_read_unlock();
1652 return ret;
1653 }
1654 get_task_struct(tsk);
1655 rcu_read_unlock();
1656
1657 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1658 old_rlim ? &old : NULL);
1659
1660 if (!ret && old_rlim) {
1661 rlim_to_rlim64(&old, &old64);
1662 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1663 ret = -EFAULT;
1664 }
1665
1666 put_task_struct(tsk);
1667 return ret;
1668 }
1669
1670 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1671 {
1672 struct rlimit new_rlim;
1673
1674 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1675 return -EFAULT;
1676 return do_prlimit(current, resource, &new_rlim, NULL);
1677 }
1678
1679 /*
1680 * It would make sense to put struct rusage in the task_struct,
1681 * except that would make the task_struct be *really big*. After
1682 * task_struct gets moved into malloc'ed memory, it would
1683 * make sense to do this. It will make moving the rest of the information
1684 * a lot simpler! (Which we're not doing right now because we're not
1685 * measuring them yet).
1686 *
1687 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1688 * races with threads incrementing their own counters. But since word
1689 * reads are atomic, we either get new values or old values and we don't
1690 * care which for the sums. We always take the siglock to protect reading
1691 * the c* fields from p->signal from races with exit.c updating those
1692 * fields when reaping, so a sample either gets all the additions of a
1693 * given child after it's reaped, or none so this sample is before reaping.
1694 *
1695 * Locking:
1696 * We need to take the siglock for CHILDEREN, SELF and BOTH
1697 * for the cases current multithreaded, non-current single threaded
1698 * non-current multithreaded. Thread traversal is now safe with
1699 * the siglock held.
1700 * Strictly speaking, we donot need to take the siglock if we are current and
1701 * single threaded, as no one else can take our signal_struct away, no one
1702 * else can reap the children to update signal->c* counters, and no one else
1703 * can race with the signal-> fields. If we do not take any lock, the
1704 * signal-> fields could be read out of order while another thread was just
1705 * exiting. So we should place a read memory barrier when we avoid the lock.
1706 * On the writer side, write memory barrier is implied in __exit_signal
1707 * as __exit_signal releases the siglock spinlock after updating the signal->
1708 * fields. But we don't do this yet to keep things simple.
1709 *
1710 */
1711
1712 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1713 {
1714 r->ru_nvcsw += t->nvcsw;
1715 r->ru_nivcsw += t->nivcsw;
1716 r->ru_minflt += t->min_flt;
1717 r->ru_majflt += t->maj_flt;
1718 r->ru_inblock += task_io_get_inblock(t);
1719 r->ru_oublock += task_io_get_oublock(t);
1720 }
1721
1722 void getrusage(struct task_struct *p, int who, struct rusage *r)
1723 {
1724 struct task_struct *t;
1725 unsigned long flags;
1726 u64 tgutime, tgstime, utime, stime;
1727 unsigned long maxrss = 0;
1728
1729 memset((char *)r, 0, sizeof (*r));
1730 utime = stime = 0;
1731
1732 if (who == RUSAGE_THREAD) {
1733 task_cputime_adjusted(current, &utime, &stime);
1734 accumulate_thread_rusage(p, r);
1735 maxrss = p->signal->maxrss;
1736 goto out;
1737 }
1738
1739 if (!lock_task_sighand(p, &flags))
1740 return;
1741
1742 switch (who) {
1743 case RUSAGE_BOTH:
1744 case RUSAGE_CHILDREN:
1745 utime = p->signal->cutime;
1746 stime = p->signal->cstime;
1747 r->ru_nvcsw = p->signal->cnvcsw;
1748 r->ru_nivcsw = p->signal->cnivcsw;
1749 r->ru_minflt = p->signal->cmin_flt;
1750 r->ru_majflt = p->signal->cmaj_flt;
1751 r->ru_inblock = p->signal->cinblock;
1752 r->ru_oublock = p->signal->coublock;
1753 maxrss = p->signal->cmaxrss;
1754
1755 if (who == RUSAGE_CHILDREN)
1756 break;
1757 /* fall through */
1758
1759 case RUSAGE_SELF:
1760 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1761 utime += tgutime;
1762 stime += tgstime;
1763 r->ru_nvcsw += p->signal->nvcsw;
1764 r->ru_nivcsw += p->signal->nivcsw;
1765 r->ru_minflt += p->signal->min_flt;
1766 r->ru_majflt += p->signal->maj_flt;
1767 r->ru_inblock += p->signal->inblock;
1768 r->ru_oublock += p->signal->oublock;
1769 if (maxrss < p->signal->maxrss)
1770 maxrss = p->signal->maxrss;
1771 t = p;
1772 do {
1773 accumulate_thread_rusage(t, r);
1774 } while_each_thread(p, t);
1775 break;
1776
1777 default:
1778 BUG();
1779 }
1780 unlock_task_sighand(p, &flags);
1781
1782 out:
1783 r->ru_utime = ns_to_timeval(utime);
1784 r->ru_stime = ns_to_timeval(stime);
1785
1786 if (who != RUSAGE_CHILDREN) {
1787 struct mm_struct *mm = get_task_mm(p);
1788
1789 if (mm) {
1790 setmax_mm_hiwater_rss(&maxrss, mm);
1791 mmput(mm);
1792 }
1793 }
1794 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1795 }
1796
1797 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1798 {
1799 struct rusage r;
1800
1801 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1802 who != RUSAGE_THREAD)
1803 return -EINVAL;
1804
1805 getrusage(current, who, &r);
1806 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1807 }
1808
1809 #ifdef CONFIG_COMPAT
1810 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1811 {
1812 struct rusage r;
1813
1814 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1815 who != RUSAGE_THREAD)
1816 return -EINVAL;
1817
1818 getrusage(current, who, &r);
1819 return put_compat_rusage(&r, ru);
1820 }
1821 #endif
1822
1823 SYSCALL_DEFINE1(umask, int, mask)
1824 {
1825 mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1826 return mask;
1827 }
1828
1829 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1830 {
1831 struct fd exe;
1832 struct file *old_exe, *exe_file;
1833 struct inode *inode;
1834 int err;
1835
1836 exe = fdget(fd);
1837 if (!exe.file)
1838 return -EBADF;
1839
1840 inode = file_inode(exe.file);
1841
1842 /*
1843 * Because the original mm->exe_file points to executable file, make
1844 * sure that this one is executable as well, to avoid breaking an
1845 * overall picture.
1846 */
1847 err = -EACCES;
1848 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1849 goto exit;
1850
1851 err = inode_permission(inode, MAY_EXEC);
1852 if (err)
1853 goto exit;
1854
1855 /*
1856 * Forbid mm->exe_file change if old file still mapped.
1857 */
1858 exe_file = get_mm_exe_file(mm);
1859 err = -EBUSY;
1860 if (exe_file) {
1861 struct vm_area_struct *vma;
1862
1863 down_read(&mm->mmap_sem);
1864 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1865 if (!vma->vm_file)
1866 continue;
1867 if (path_equal(&vma->vm_file->f_path,
1868 &exe_file->f_path))
1869 goto exit_err;
1870 }
1871
1872 up_read(&mm->mmap_sem);
1873 fput(exe_file);
1874 }
1875
1876 err = 0;
1877 /* set the new file, lockless */
1878 get_file(exe.file);
1879 old_exe = xchg(&mm->exe_file, exe.file);
1880 if (old_exe)
1881 fput(old_exe);
1882 exit:
1883 fdput(exe);
1884 return err;
1885 exit_err:
1886 up_read(&mm->mmap_sem);
1887 fput(exe_file);
1888 goto exit;
1889 }
1890
1891 /*
1892 * Check arithmetic relations of passed addresses.
1893 *
1894 * WARNING: we don't require any capability here so be very careful
1895 * in what is allowed for modification from userspace.
1896 */
1897 static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1898 {
1899 unsigned long mmap_max_addr = TASK_SIZE;
1900 int error = -EINVAL, i;
1901
1902 static const unsigned char offsets[] = {
1903 offsetof(struct prctl_mm_map, start_code),
1904 offsetof(struct prctl_mm_map, end_code),
1905 offsetof(struct prctl_mm_map, start_data),
1906 offsetof(struct prctl_mm_map, end_data),
1907 offsetof(struct prctl_mm_map, start_brk),
1908 offsetof(struct prctl_mm_map, brk),
1909 offsetof(struct prctl_mm_map, start_stack),
1910 offsetof(struct prctl_mm_map, arg_start),
1911 offsetof(struct prctl_mm_map, arg_end),
1912 offsetof(struct prctl_mm_map, env_start),
1913 offsetof(struct prctl_mm_map, env_end),
1914 };
1915
1916 /*
1917 * Make sure the members are not somewhere outside
1918 * of allowed address space.
1919 */
1920 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1921 u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1922
1923 if ((unsigned long)val >= mmap_max_addr ||
1924 (unsigned long)val < mmap_min_addr)
1925 goto out;
1926 }
1927
1928 /*
1929 * Make sure the pairs are ordered.
1930 */
1931 #define __prctl_check_order(__m1, __op, __m2) \
1932 ((unsigned long)prctl_map->__m1 __op \
1933 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1934 error = __prctl_check_order(start_code, <, end_code);
1935 error |= __prctl_check_order(start_data,<=, end_data);
1936 error |= __prctl_check_order(start_brk, <=, brk);
1937 error |= __prctl_check_order(arg_start, <=, arg_end);
1938 error |= __prctl_check_order(env_start, <=, env_end);
1939 if (error)
1940 goto out;
1941 #undef __prctl_check_order
1942
1943 error = -EINVAL;
1944
1945 /*
1946 * Neither we should allow to override limits if they set.
1947 */
1948 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1949 prctl_map->start_brk, prctl_map->end_data,
1950 prctl_map->start_data))
1951 goto out;
1952
1953 error = 0;
1954 out:
1955 return error;
1956 }
1957
1958 #ifdef CONFIG_CHECKPOINT_RESTORE
1959 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1960 {
1961 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1962 unsigned long user_auxv[AT_VECTOR_SIZE];
1963 struct mm_struct *mm = current->mm;
1964 int error;
1965
1966 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1967 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1968
1969 if (opt == PR_SET_MM_MAP_SIZE)
1970 return put_user((unsigned int)sizeof(prctl_map),
1971 (unsigned int __user *)addr);
1972
1973 if (data_size != sizeof(prctl_map))
1974 return -EINVAL;
1975
1976 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1977 return -EFAULT;
1978
1979 error = validate_prctl_map_addr(&prctl_map);
1980 if (error)
1981 return error;
1982
1983 if (prctl_map.auxv_size) {
1984 /*
1985 * Someone is trying to cheat the auxv vector.
1986 */
1987 if (!prctl_map.auxv ||
1988 prctl_map.auxv_size > sizeof(mm->saved_auxv))
1989 return -EINVAL;
1990
1991 memset(user_auxv, 0, sizeof(user_auxv));
1992 if (copy_from_user(user_auxv,
1993 (const void __user *)prctl_map.auxv,
1994 prctl_map.auxv_size))
1995 return -EFAULT;
1996
1997 /* Last entry must be AT_NULL as specification requires */
1998 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
1999 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
2000 }
2001
2002 if (prctl_map.exe_fd != (u32)-1) {
2003 /*
2004 * Make sure the caller has the rights to
2005 * change /proc/pid/exe link: only local sys admin should
2006 * be allowed to.
2007 */
2008 if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
2009 return -EINVAL;
2010
2011 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2012 if (error)
2013 return error;
2014 }
2015
2016 /*
2017 * arg_lock protects concurent updates but we still need mmap_sem for
2018 * read to exclude races with sys_brk.
2019 */
2020 down_read(&mm->mmap_sem);
2021
2022 /*
2023 * We don't validate if these members are pointing to
2024 * real present VMAs because application may have correspond
2025 * VMAs already unmapped and kernel uses these members for statistics
2026 * output in procfs mostly, except
2027 *
2028 * - @start_brk/@brk which are used in do_brk but kernel lookups
2029 * for VMAs when updating these memvers so anything wrong written
2030 * here cause kernel to swear at userspace program but won't lead
2031 * to any problem in kernel itself
2032 */
2033
2034 spin_lock(&mm->arg_lock);
2035 mm->start_code = prctl_map.start_code;
2036 mm->end_code = prctl_map.end_code;
2037 mm->start_data = prctl_map.start_data;
2038 mm->end_data = prctl_map.end_data;
2039 mm->start_brk = prctl_map.start_brk;
2040 mm->brk = prctl_map.brk;
2041 mm->start_stack = prctl_map.start_stack;
2042 mm->arg_start = prctl_map.arg_start;
2043 mm->arg_end = prctl_map.arg_end;
2044 mm->env_start = prctl_map.env_start;
2045 mm->env_end = prctl_map.env_end;
2046 spin_unlock(&mm->arg_lock);
2047
2048 /*
2049 * Note this update of @saved_auxv is lockless thus
2050 * if someone reads this member in procfs while we're
2051 * updating -- it may get partly updated results. It's
2052 * known and acceptable trade off: we leave it as is to
2053 * not introduce additional locks here making the kernel
2054 * more complex.
2055 */
2056 if (prctl_map.auxv_size)
2057 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2058
2059 up_read(&mm->mmap_sem);
2060 return 0;
2061 }
2062 #endif /* CONFIG_CHECKPOINT_RESTORE */
2063
2064 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2065 unsigned long len)
2066 {
2067 /*
2068 * This doesn't move the auxiliary vector itself since it's pinned to
2069 * mm_struct, but it permits filling the vector with new values. It's
2070 * up to the caller to provide sane values here, otherwise userspace
2071 * tools which use this vector might be unhappy.
2072 */
2073 unsigned long user_auxv[AT_VECTOR_SIZE];
2074
2075 if (len > sizeof(user_auxv))
2076 return -EINVAL;
2077
2078 if (copy_from_user(user_auxv, (const void __user *)addr, len))
2079 return -EFAULT;
2080
2081 /* Make sure the last entry is always AT_NULL */
2082 user_auxv[AT_VECTOR_SIZE - 2] = 0;
2083 user_auxv[AT_VECTOR_SIZE - 1] = 0;
2084
2085 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2086
2087 task_lock(current);
2088 memcpy(mm->saved_auxv, user_auxv, len);
2089 task_unlock(current);
2090
2091 return 0;
2092 }
2093
2094 static int prctl_set_mm(int opt, unsigned long addr,
2095 unsigned long arg4, unsigned long arg5)
2096 {
2097 struct mm_struct *mm = current->mm;
2098 struct prctl_mm_map prctl_map = {
2099 .auxv = NULL,
2100 .auxv_size = 0,
2101 .exe_fd = -1,
2102 };
2103 struct vm_area_struct *vma;
2104 int error;
2105
2106 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2107 opt != PR_SET_MM_MAP &&
2108 opt != PR_SET_MM_MAP_SIZE)))
2109 return -EINVAL;
2110
2111 #ifdef CONFIG_CHECKPOINT_RESTORE
2112 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2113 return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2114 #endif
2115
2116 if (!capable(CAP_SYS_RESOURCE))
2117 return -EPERM;
2118
2119 if (opt == PR_SET_MM_EXE_FILE)
2120 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2121
2122 if (opt == PR_SET_MM_AUXV)
2123 return prctl_set_auxv(mm, addr, arg4);
2124
2125 if (addr >= TASK_SIZE || addr < mmap_min_addr)
2126 return -EINVAL;
2127
2128 error = -EINVAL;
2129
2130 /*
2131 * arg_lock protects concurent updates of arg boundaries, we need
2132 * mmap_sem for a) concurrent sys_brk, b) finding VMA for addr
2133 * validation.
2134 */
2135 down_read(&mm->mmap_sem);
2136 vma = find_vma(mm, addr);
2137
2138 spin_lock(&mm->arg_lock);
2139 prctl_map.start_code = mm->start_code;
2140 prctl_map.end_code = mm->end_code;
2141 prctl_map.start_data = mm->start_data;
2142 prctl_map.end_data = mm->end_data;
2143 prctl_map.start_brk = mm->start_brk;
2144 prctl_map.brk = mm->brk;
2145 prctl_map.start_stack = mm->start_stack;
2146 prctl_map.arg_start = mm->arg_start;
2147 prctl_map.arg_end = mm->arg_end;
2148 prctl_map.env_start = mm->env_start;
2149 prctl_map.env_end = mm->env_end;
2150
2151 switch (opt) {
2152 case PR_SET_MM_START_CODE:
2153 prctl_map.start_code = addr;
2154 break;
2155 case PR_SET_MM_END_CODE:
2156 prctl_map.end_code = addr;
2157 break;
2158 case PR_SET_MM_START_DATA:
2159 prctl_map.start_data = addr;
2160 break;
2161 case PR_SET_MM_END_DATA:
2162 prctl_map.end_data = addr;
2163 break;
2164 case PR_SET_MM_START_STACK:
2165 prctl_map.start_stack = addr;
2166 break;
2167 case PR_SET_MM_START_BRK:
2168 prctl_map.start_brk = addr;
2169 break;
2170 case PR_SET_MM_BRK:
2171 prctl_map.brk = addr;
2172 break;
2173 case PR_SET_MM_ARG_START:
2174 prctl_map.arg_start = addr;
2175 break;
2176 case PR_SET_MM_ARG_END:
2177 prctl_map.arg_end = addr;
2178 break;
2179 case PR_SET_MM_ENV_START:
2180 prctl_map.env_start = addr;
2181 break;
2182 case PR_SET_MM_ENV_END:
2183 prctl_map.env_end = addr;
2184 break;
2185 default:
2186 goto out;
2187 }
2188
2189 error = validate_prctl_map_addr(&prctl_map);
2190 if (error)
2191 goto out;
2192
2193 switch (opt) {
2194 /*
2195 * If command line arguments and environment
2196 * are placed somewhere else on stack, we can
2197 * set them up here, ARG_START/END to setup
2198 * command line argumets and ENV_START/END
2199 * for environment.
2200 */
2201 case PR_SET_MM_START_STACK:
2202 case PR_SET_MM_ARG_START:
2203 case PR_SET_MM_ARG_END:
2204 case PR_SET_MM_ENV_START:
2205 case PR_SET_MM_ENV_END:
2206 if (!vma) {
2207 error = -EFAULT;
2208 goto out;
2209 }
2210 }
2211
2212 mm->start_code = prctl_map.start_code;
2213 mm->end_code = prctl_map.end_code;
2214 mm->start_data = prctl_map.start_data;
2215 mm->end_data = prctl_map.end_data;
2216 mm->start_brk = prctl_map.start_brk;
2217 mm->brk = prctl_map.brk;
2218 mm->start_stack = prctl_map.start_stack;
2219 mm->arg_start = prctl_map.arg_start;
2220 mm->arg_end = prctl_map.arg_end;
2221 mm->env_start = prctl_map.env_start;
2222 mm->env_end = prctl_map.env_end;
2223
2224 error = 0;
2225 out:
2226 spin_unlock(&mm->arg_lock);
2227 up_read(&mm->mmap_sem);
2228 return error;
2229 }
2230
2231 #ifdef CONFIG_CHECKPOINT_RESTORE
2232 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2233 {
2234 return put_user(me->clear_child_tid, tid_addr);
2235 }
2236 #else
2237 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2238 {
2239 return -EINVAL;
2240 }
2241 #endif
2242
2243 static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2244 {
2245 /*
2246 * If task has has_child_subreaper - all its decendants
2247 * already have these flag too and new decendants will
2248 * inherit it on fork, skip them.
2249 *
2250 * If we've found child_reaper - skip descendants in
2251 * it's subtree as they will never get out pidns.
2252 */
2253 if (p->signal->has_child_subreaper ||
2254 is_child_reaper(task_pid(p)))
2255 return 0;
2256
2257 p->signal->has_child_subreaper = 1;
2258 return 1;
2259 }
2260
2261 int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2262 {
2263 return -EINVAL;
2264 }
2265
2266 int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2267 unsigned long ctrl)
2268 {
2269 return -EINVAL;
2270 }
2271
2272 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2273 unsigned long, arg4, unsigned long, arg5)
2274 {
2275 struct task_struct *me = current;
2276 unsigned char comm[sizeof(me->comm)];
2277 long error;
2278
2279 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2280 if (error != -ENOSYS)
2281 return error;
2282
2283 error = 0;
2284 switch (option) {
2285 case PR_SET_PDEATHSIG:
2286 if (!valid_signal(arg2)) {
2287 error = -EINVAL;
2288 break;
2289 }
2290 me->pdeath_signal = arg2;
2291 break;
2292 case PR_GET_PDEATHSIG:
2293 error = put_user(me->pdeath_signal, (int __user *)arg2);
2294 break;
2295 case PR_GET_DUMPABLE:
2296 error = get_dumpable(me->mm);
2297 break;
2298 case PR_SET_DUMPABLE:
2299 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2300 error = -EINVAL;
2301 break;
2302 }
2303 set_dumpable(me->mm, arg2);
2304 break;
2305
2306 case PR_SET_UNALIGN:
2307 error = SET_UNALIGN_CTL(me, arg2);
2308 break;
2309 case PR_GET_UNALIGN:
2310 error = GET_UNALIGN_CTL(me, arg2);
2311 break;
2312 case PR_SET_FPEMU:
2313 error = SET_FPEMU_CTL(me, arg2);
2314 break;
2315 case PR_GET_FPEMU:
2316 error = GET_FPEMU_CTL(me, arg2);
2317 break;
2318 case PR_SET_FPEXC:
2319 error = SET_FPEXC_CTL(me, arg2);
2320 break;
2321 case PR_GET_FPEXC:
2322 error = GET_FPEXC_CTL(me, arg2);
2323 break;
2324 case PR_GET_TIMING:
2325 error = PR_TIMING_STATISTICAL;
2326 break;
2327 case PR_SET_TIMING:
2328 if (arg2 != PR_TIMING_STATISTICAL)
2329 error = -EINVAL;
2330 break;
2331 case PR_SET_NAME:
2332 comm[sizeof(me->comm) - 1] = 0;
2333 if (strncpy_from_user(comm, (char __user *)arg2,
2334 sizeof(me->comm) - 1) < 0)
2335 return -EFAULT;
2336 set_task_comm(me, comm);
2337 proc_comm_connector(me);
2338 break;
2339 case PR_GET_NAME:
2340 get_task_comm(comm, me);
2341 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2342 return -EFAULT;
2343 break;
2344 case PR_GET_ENDIAN:
2345 error = GET_ENDIAN(me, arg2);
2346 break;
2347 case PR_SET_ENDIAN:
2348 error = SET_ENDIAN(me, arg2);
2349 break;
2350 case PR_GET_SECCOMP:
2351 error = prctl_get_seccomp();
2352 break;
2353 case PR_SET_SECCOMP:
2354 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2355 break;
2356 case PR_GET_TSC:
2357 error = GET_TSC_CTL(arg2);
2358 break;
2359 case PR_SET_TSC:
2360 error = SET_TSC_CTL(arg2);
2361 break;
2362 case PR_TASK_PERF_EVENTS_DISABLE:
2363 error = perf_event_task_disable();
2364 break;
2365 case PR_TASK_PERF_EVENTS_ENABLE:
2366 error = perf_event_task_enable();
2367 break;
2368 case PR_GET_TIMERSLACK:
2369 if (current->timer_slack_ns > ULONG_MAX)
2370 error = ULONG_MAX;
2371 else
2372 error = current->timer_slack_ns;
2373 break;
2374 case PR_SET_TIMERSLACK:
2375 if (arg2 <= 0)
2376 current->timer_slack_ns =
2377 current->default_timer_slack_ns;
2378 else
2379 current->timer_slack_ns = arg2;
2380 break;
2381 case PR_MCE_KILL:
2382 if (arg4 | arg5)
2383 return -EINVAL;
2384 switch (arg2) {
2385 case PR_MCE_KILL_CLEAR:
2386 if (arg3 != 0)
2387 return -EINVAL;
2388 current->flags &= ~PF_MCE_PROCESS;
2389 break;
2390 case PR_MCE_KILL_SET:
2391 current->flags |= PF_MCE_PROCESS;
2392 if (arg3 == PR_MCE_KILL_EARLY)
2393 current->flags |= PF_MCE_EARLY;
2394 else if (arg3 == PR_MCE_KILL_LATE)
2395 current->flags &= ~PF_MCE_EARLY;
2396 else if (arg3 == PR_MCE_KILL_DEFAULT)
2397 current->flags &=
2398 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2399 else
2400 return -EINVAL;
2401 break;
2402 default:
2403 return -EINVAL;
2404 }
2405 break;
2406 case PR_MCE_KILL_GET:
2407 if (arg2 | arg3 | arg4 | arg5)
2408 return -EINVAL;
2409 if (current->flags & PF_MCE_PROCESS)
2410 error = (current->flags & PF_MCE_EARLY) ?
2411 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2412 else
2413 error = PR_MCE_KILL_DEFAULT;
2414 break;
2415 case PR_SET_MM:
2416 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2417 break;
2418 case PR_GET_TID_ADDRESS:
2419 error = prctl_get_tid_address(me, (int __user **)arg2);
2420 break;
2421 case PR_SET_CHILD_SUBREAPER:
2422 me->signal->is_child_subreaper = !!arg2;
2423 if (!arg2)
2424 break;
2425
2426 walk_process_tree(me, propagate_has_child_subreaper, NULL);
2427 break;
2428 case PR_GET_CHILD_SUBREAPER:
2429 error = put_user(me->signal->is_child_subreaper,
2430 (int __user *)arg2);
2431 break;
2432 case PR_SET_NO_NEW_PRIVS:
2433 if (arg2 != 1 || arg3 || arg4 || arg5)
2434 return -EINVAL;
2435
2436 task_set_no_new_privs(current);
2437 break;
2438 case PR_GET_NO_NEW_PRIVS:
2439 if (arg2 || arg3 || arg4 || arg5)
2440 return -EINVAL;
2441 return task_no_new_privs(current) ? 1 : 0;
2442 case PR_GET_THP_DISABLE:
2443 if (arg2 || arg3 || arg4 || arg5)
2444 return -EINVAL;
2445 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2446 break;
2447 case PR_SET_THP_DISABLE:
2448 if (arg3 || arg4 || arg5)
2449 return -EINVAL;
2450 if (down_write_killable(&me->mm->mmap_sem))
2451 return -EINTR;
2452 if (arg2)
2453 set_bit(MMF_DISABLE_THP, &me->mm->flags);
2454 else
2455 clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2456 up_write(&me->mm->mmap_sem);
2457 break;
2458 case PR_MPX_ENABLE_MANAGEMENT:
2459 case PR_MPX_DISABLE_MANAGEMENT:
2460 /* No longer implemented: */
2461 return -EINVAL;
2462 case PR_SET_FP_MODE:
2463 error = SET_FP_MODE(me, arg2);
2464 break;
2465 case PR_GET_FP_MODE:
2466 error = GET_FP_MODE(me);
2467 break;
2468 case PR_SVE_SET_VL:
2469 error = SVE_SET_VL(arg2);
2470 break;
2471 case PR_SVE_GET_VL:
2472 error = SVE_GET_VL();
2473 break;
2474 case PR_GET_SPECULATION_CTRL:
2475 if (arg3 || arg4 || arg5)
2476 return -EINVAL;
2477 error = arch_prctl_spec_ctrl_get(me, arg2);
2478 break;
2479 case PR_SET_SPECULATION_CTRL:
2480 if (arg4 || arg5)
2481 return -EINVAL;
2482 error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2483 break;
2484 case PR_PAC_RESET_KEYS:
2485 if (arg3 || arg4 || arg5)
2486 return -EINVAL;
2487 error = PAC_RESET_KEYS(me, arg2);
2488 break;
2489 case PR_SET_TAGGED_ADDR_CTRL:
2490 if (arg3 || arg4 || arg5)
2491 return -EINVAL;
2492 error = SET_TAGGED_ADDR_CTRL(arg2);
2493 break;
2494 case PR_GET_TAGGED_ADDR_CTRL:
2495 if (arg2 || arg3 || arg4 || arg5)
2496 return -EINVAL;
2497 error = GET_TAGGED_ADDR_CTRL();
2498 break;
2499 default:
2500 error = -EINVAL;
2501 break;
2502 }
2503 return error;
2504 }
2505
2506 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2507 struct getcpu_cache __user *, unused)
2508 {
2509 int err = 0;
2510 int cpu = raw_smp_processor_id();
2511
2512 if (cpup)
2513 err |= put_user(cpu, cpup);
2514 if (nodep)
2515 err |= put_user(cpu_to_node(cpu), nodep);
2516 return err ? -EFAULT : 0;
2517 }
2518
2519 /**
2520 * do_sysinfo - fill in sysinfo struct
2521 * @info: pointer to buffer to fill
2522 */
2523 static int do_sysinfo(struct sysinfo *info)
2524 {
2525 unsigned long mem_total, sav_total;
2526 unsigned int mem_unit, bitcount;
2527 struct timespec64 tp;
2528
2529 memset(info, 0, sizeof(struct sysinfo));
2530
2531 ktime_get_boottime_ts64(&tp);
2532 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2533
2534 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2535
2536 info->procs = nr_threads;
2537
2538 si_meminfo(info);
2539 si_swapinfo(info);
2540
2541 /*
2542 * If the sum of all the available memory (i.e. ram + swap)
2543 * is less than can be stored in a 32 bit unsigned long then
2544 * we can be binary compatible with 2.2.x kernels. If not,
2545 * well, in that case 2.2.x was broken anyways...
2546 *
2547 * -Erik Andersen <andersee@debian.org>
2548 */
2549
2550 mem_total = info->totalram + info->totalswap;
2551 if (mem_total < info->totalram || mem_total < info->totalswap)
2552 goto out;
2553 bitcount = 0;
2554 mem_unit = info->mem_unit;
2555 while (mem_unit > 1) {
2556 bitcount++;
2557 mem_unit >>= 1;
2558 sav_total = mem_total;
2559 mem_total <<= 1;
2560 if (mem_total < sav_total)
2561 goto out;
2562 }
2563
2564 /*
2565 * If mem_total did not overflow, multiply all memory values by
2566 * info->mem_unit and set it to 1. This leaves things compatible
2567 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2568 * kernels...
2569 */
2570
2571 info->mem_unit = 1;
2572 info->totalram <<= bitcount;
2573 info->freeram <<= bitcount;
2574 info->sharedram <<= bitcount;
2575 info->bufferram <<= bitcount;
2576 info->totalswap <<= bitcount;
2577 info->freeswap <<= bitcount;
2578 info->totalhigh <<= bitcount;
2579 info->freehigh <<= bitcount;
2580
2581 out:
2582 return 0;
2583 }
2584
2585 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2586 {
2587 struct sysinfo val;
2588
2589 do_sysinfo(&val);
2590
2591 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2592 return -EFAULT;
2593
2594 return 0;
2595 }
2596
2597 #ifdef CONFIG_COMPAT
2598 struct compat_sysinfo {
2599 s32 uptime;
2600 u32 loads[3];
2601 u32 totalram;
2602 u32 freeram;
2603 u32 sharedram;
2604 u32 bufferram;
2605 u32 totalswap;
2606 u32 freeswap;
2607 u16 procs;
2608 u16 pad;
2609 u32 totalhigh;
2610 u32 freehigh;
2611 u32 mem_unit;
2612 char _f[20-2*sizeof(u32)-sizeof(int)];
2613 };
2614
2615 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2616 {
2617 struct sysinfo s;
2618
2619 do_sysinfo(&s);
2620
2621 /* Check to see if any memory value is too large for 32-bit and scale
2622 * down if needed
2623 */
2624 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2625 int bitcount = 0;
2626
2627 while (s.mem_unit < PAGE_SIZE) {
2628 s.mem_unit <<= 1;
2629 bitcount++;
2630 }
2631
2632 s.totalram >>= bitcount;
2633 s.freeram >>= bitcount;
2634 s.sharedram >>= bitcount;
2635 s.bufferram >>= bitcount;
2636 s.totalswap >>= bitcount;
2637 s.freeswap >>= bitcount;
2638 s.totalhigh >>= bitcount;
2639 s.freehigh >>= bitcount;
2640 }
2641
2642 if (!access_ok(info, sizeof(struct compat_sysinfo)) ||
2643 __put_user(s.uptime, &info->uptime) ||
2644 __put_user(s.loads[0], &info->loads[0]) ||
2645 __put_user(s.loads[1], &info->loads[1]) ||
2646 __put_user(s.loads[2], &info->loads[2]) ||
2647 __put_user(s.totalram, &info->totalram) ||
2648 __put_user(s.freeram, &info->freeram) ||
2649 __put_user(s.sharedram, &info->sharedram) ||
2650 __put_user(s.bufferram, &info->bufferram) ||
2651 __put_user(s.totalswap, &info->totalswap) ||
2652 __put_user(s.freeswap, &info->freeswap) ||
2653 __put_user(s.procs, &info->procs) ||
2654 __put_user(s.totalhigh, &info->totalhigh) ||
2655 __put_user(s.freehigh, &info->freehigh) ||
2656 __put_user(s.mem_unit, &info->mem_unit))
2657 return -EFAULT;
2658
2659 return 0;
2660 }
2661 #endif /* CONFIG_COMPAT */