4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/module.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
37 #include <linux/compat.h>
38 #include <linux/syscalls.h>
39 #include <linux/kprobes.h>
40 #include <linux/user_namespace.h>
42 #include <asm/uaccess.h>
44 #include <asm/unistd.h>
46 #ifndef SET_UNALIGN_CTL
47 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
49 #ifndef GET_UNALIGN_CTL
50 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
53 # define SET_FPEMU_CTL(a,b) (-EINVAL)
56 # define GET_FPEMU_CTL(a,b) (-EINVAL)
59 # define SET_FPEXC_CTL(a,b) (-EINVAL)
62 # define GET_FPEXC_CTL(a,b) (-EINVAL)
65 # define GET_ENDIAN(a,b) (-EINVAL)
68 # define SET_ENDIAN(a,b) (-EINVAL)
71 # define GET_TSC_CTL(a) (-EINVAL)
74 # define SET_TSC_CTL(a) (-EINVAL)
78 * this is where the system-wide overflow UID and GID are defined, for
79 * architectures that now have 32-bit UID/GID but didn't in the past
82 int overflowuid
= DEFAULT_OVERFLOWUID
;
83 int overflowgid
= DEFAULT_OVERFLOWGID
;
86 EXPORT_SYMBOL(overflowuid
);
87 EXPORT_SYMBOL(overflowgid
);
91 * the same as above, but for filesystems which can only store a 16-bit
92 * UID and GID. as such, this is needed on all architectures
95 int fs_overflowuid
= DEFAULT_FS_OVERFLOWUID
;
96 int fs_overflowgid
= DEFAULT_FS_OVERFLOWUID
;
98 EXPORT_SYMBOL(fs_overflowuid
);
99 EXPORT_SYMBOL(fs_overflowgid
);
102 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
107 EXPORT_SYMBOL(cad_pid
);
110 * If set, this is used for preparing the system to power off.
113 void (*pm_power_off_prepare
)(void);
116 * set the priority of a task
117 * - the caller must hold the RCU read lock
119 static int set_one_prio(struct task_struct
*p
, int niceval
, int error
)
121 const struct cred
*cred
= current_cred(), *pcred
= __task_cred(p
);
124 if (pcred
->uid
!= cred
->euid
&&
125 pcred
->euid
!= cred
->euid
&& !capable(CAP_SYS_NICE
)) {
129 if (niceval
< task_nice(p
) && !can_nice(p
, niceval
)) {
133 no_nice
= security_task_setnice(p
, niceval
);
140 set_user_nice(p
, niceval
);
145 asmlinkage
long sys_setpriority(int which
, int who
, int niceval
)
147 struct task_struct
*g
, *p
;
148 struct user_struct
*user
;
149 const struct cred
*cred
= current_cred();
153 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
156 /* normalize: avoid signed division (rounding problems) */
163 read_lock(&tasklist_lock
);
167 p
= find_task_by_vpid(who
);
171 error
= set_one_prio(p
, niceval
, error
);
175 pgrp
= find_vpid(who
);
177 pgrp
= task_pgrp(current
);
178 do_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
) {
179 error
= set_one_prio(p
, niceval
, error
);
180 } while_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
);
186 else if ((who
!= cred
->uid
) &&
187 !(user
= find_user(who
)))
188 goto out_unlock
; /* No processes for this user */
191 if (__task_cred(p
)->uid
== who
)
192 error
= set_one_prio(p
, niceval
, error
);
193 while_each_thread(g
, p
);
194 if (who
!= cred
->uid
)
195 free_uid(user
); /* For find_user() */
199 read_unlock(&tasklist_lock
);
205 * Ugh. To avoid negative return values, "getpriority()" will
206 * not return the normal nice-value, but a negated value that
207 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
208 * to stay compatible.
210 asmlinkage
long sys_getpriority(int which
, int who
)
212 struct task_struct
*g
, *p
;
213 struct user_struct
*user
;
214 const struct cred
*cred
= current_cred();
215 long niceval
, retval
= -ESRCH
;
218 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
221 read_lock(&tasklist_lock
);
225 p
= find_task_by_vpid(who
);
229 niceval
= 20 - task_nice(p
);
230 if (niceval
> retval
)
236 pgrp
= find_vpid(who
);
238 pgrp
= task_pgrp(current
);
239 do_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
) {
240 niceval
= 20 - task_nice(p
);
241 if (niceval
> retval
)
243 } while_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
);
246 user
= (struct user_struct
*) cred
->user
;
249 else if ((who
!= cred
->uid
) &&
250 !(user
= find_user(who
)))
251 goto out_unlock
; /* No processes for this user */
254 if (__task_cred(p
)->uid
== who
) {
255 niceval
= 20 - task_nice(p
);
256 if (niceval
> retval
)
259 while_each_thread(g
, p
);
260 if (who
!= cred
->uid
)
261 free_uid(user
); /* for find_user() */
265 read_unlock(&tasklist_lock
);
271 * emergency_restart - reboot the system
273 * Without shutting down any hardware or taking any locks
274 * reboot the system. This is called when we know we are in
275 * trouble so this is our best effort to reboot. This is
276 * safe to call in interrupt context.
278 void emergency_restart(void)
280 machine_emergency_restart();
282 EXPORT_SYMBOL_GPL(emergency_restart
);
284 void kernel_restart_prepare(char *cmd
)
286 blocking_notifier_call_chain(&reboot_notifier_list
, SYS_RESTART
, cmd
);
287 system_state
= SYSTEM_RESTART
;
293 * kernel_restart - reboot the system
294 * @cmd: pointer to buffer containing command to execute for restart
297 * Shutdown everything and perform a clean reboot.
298 * This is not safe to call in interrupt context.
300 void kernel_restart(char *cmd
)
302 kernel_restart_prepare(cmd
);
304 printk(KERN_EMERG
"Restarting system.\n");
306 printk(KERN_EMERG
"Restarting system with command '%s'.\n", cmd
);
307 machine_restart(cmd
);
309 EXPORT_SYMBOL_GPL(kernel_restart
);
311 static void kernel_shutdown_prepare(enum system_states state
)
313 blocking_notifier_call_chain(&reboot_notifier_list
,
314 (state
== SYSTEM_HALT
)?SYS_HALT
:SYS_POWER_OFF
, NULL
);
315 system_state
= state
;
319 * kernel_halt - halt the system
321 * Shutdown everything and perform a clean system halt.
323 void kernel_halt(void)
325 kernel_shutdown_prepare(SYSTEM_HALT
);
327 printk(KERN_EMERG
"System halted.\n");
331 EXPORT_SYMBOL_GPL(kernel_halt
);
334 * kernel_power_off - power_off the system
336 * Shutdown everything and perform a clean system power_off.
338 void kernel_power_off(void)
340 kernel_shutdown_prepare(SYSTEM_POWER_OFF
);
341 if (pm_power_off_prepare
)
342 pm_power_off_prepare();
343 disable_nonboot_cpus();
345 printk(KERN_EMERG
"Power down.\n");
348 EXPORT_SYMBOL_GPL(kernel_power_off
);
350 * Reboot system call: for obvious reasons only root may call it,
351 * and even root needs to set up some magic numbers in the registers
352 * so that some mistake won't make this reboot the whole machine.
353 * You can also set the meaning of the ctrl-alt-del-key here.
355 * reboot doesn't sync: do that yourself before calling this.
357 asmlinkage
long sys_reboot(int magic1
, int magic2
, unsigned int cmd
, void __user
* arg
)
361 /* We only trust the superuser with rebooting the system. */
362 if (!capable(CAP_SYS_BOOT
))
365 /* For safety, we require "magic" arguments. */
366 if (magic1
!= LINUX_REBOOT_MAGIC1
||
367 (magic2
!= LINUX_REBOOT_MAGIC2
&&
368 magic2
!= LINUX_REBOOT_MAGIC2A
&&
369 magic2
!= LINUX_REBOOT_MAGIC2B
&&
370 magic2
!= LINUX_REBOOT_MAGIC2C
))
373 /* Instead of trying to make the power_off code look like
374 * halt when pm_power_off is not set do it the easy way.
376 if ((cmd
== LINUX_REBOOT_CMD_POWER_OFF
) && !pm_power_off
)
377 cmd
= LINUX_REBOOT_CMD_HALT
;
381 case LINUX_REBOOT_CMD_RESTART
:
382 kernel_restart(NULL
);
385 case LINUX_REBOOT_CMD_CAD_ON
:
389 case LINUX_REBOOT_CMD_CAD_OFF
:
393 case LINUX_REBOOT_CMD_HALT
:
399 case LINUX_REBOOT_CMD_POWER_OFF
:
405 case LINUX_REBOOT_CMD_RESTART2
:
406 if (strncpy_from_user(&buffer
[0], arg
, sizeof(buffer
) - 1) < 0) {
410 buffer
[sizeof(buffer
) - 1] = '\0';
412 kernel_restart(buffer
);
416 case LINUX_REBOOT_CMD_KEXEC
:
419 ret
= kernel_kexec();
425 #ifdef CONFIG_HIBERNATION
426 case LINUX_REBOOT_CMD_SW_SUSPEND
:
428 int ret
= hibernate();
442 static void deferred_cad(struct work_struct
*dummy
)
444 kernel_restart(NULL
);
448 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
449 * As it's called within an interrupt, it may NOT sync: the only choice
450 * is whether to reboot at once, or just ignore the ctrl-alt-del.
452 void ctrl_alt_del(void)
454 static DECLARE_WORK(cad_work
, deferred_cad
);
457 schedule_work(&cad_work
);
459 kill_cad_pid(SIGINT
, 1);
463 * Unprivileged users may change the real gid to the effective gid
464 * or vice versa. (BSD-style)
466 * If you set the real gid at all, or set the effective gid to a value not
467 * equal to the real gid, then the saved gid is set to the new effective gid.
469 * This makes it possible for a setgid program to completely drop its
470 * privileges, which is often a useful assertion to make when you are doing
471 * a security audit over a program.
473 * The general idea is that a program which uses just setregid() will be
474 * 100% compatible with BSD. A program which uses just setgid() will be
475 * 100% compatible with POSIX with saved IDs.
477 * SMP: There are not races, the GIDs are checked only by filesystem
478 * operations (as far as semantic preservation is concerned).
480 asmlinkage
long sys_setregid(gid_t rgid
, gid_t egid
)
482 struct cred
*cred
= current
->cred
;
483 int old_rgid
= cred
->gid
;
484 int old_egid
= cred
->egid
;
485 int new_rgid
= old_rgid
;
486 int new_egid
= old_egid
;
489 retval
= security_task_setgid(rgid
, egid
, (gid_t
)-1, LSM_SETID_RE
);
493 if (rgid
!= (gid_t
) -1) {
494 if ((old_rgid
== rgid
) ||
495 (cred
->egid
== rgid
) ||
501 if (egid
!= (gid_t
) -1) {
502 if ((old_rgid
== egid
) ||
503 (cred
->egid
== egid
) ||
504 (cred
->sgid
== egid
) ||
510 if (new_egid
!= old_egid
) {
511 set_dumpable(current
->mm
, suid_dumpable
);
514 if (rgid
!= (gid_t
) -1 ||
515 (egid
!= (gid_t
) -1 && egid
!= old_rgid
))
516 cred
->sgid
= new_egid
;
517 cred
->fsgid
= new_egid
;
518 cred
->egid
= new_egid
;
519 cred
->gid
= new_rgid
;
520 key_fsgid_changed(current
);
521 proc_id_connector(current
, PROC_EVENT_GID
);
526 * setgid() is implemented like SysV w/ SAVED_IDS
528 * SMP: Same implicit races as above.
530 asmlinkage
long sys_setgid(gid_t gid
)
532 struct cred
*cred
= current
->cred
;
533 int old_egid
= cred
->egid
;
536 retval
= security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_ID
);
540 if (capable(CAP_SETGID
)) {
541 if (old_egid
!= gid
) {
542 set_dumpable(current
->mm
, suid_dumpable
);
545 cred
->gid
= cred
->egid
= cred
->sgid
= cred
->fsgid
= gid
;
546 } else if ((gid
== cred
->gid
) || (gid
== cred
->sgid
)) {
547 if (old_egid
!= gid
) {
548 set_dumpable(current
->mm
, suid_dumpable
);
551 cred
->egid
= cred
->fsgid
= gid
;
556 key_fsgid_changed(current
);
557 proc_id_connector(current
, PROC_EVENT_GID
);
561 static int set_user(uid_t new_ruid
, int dumpclear
)
563 struct user_struct
*new_user
;
565 new_user
= alloc_uid(current
->nsproxy
->user_ns
, new_ruid
);
569 if (atomic_read(&new_user
->processes
) >=
570 current
->signal
->rlim
[RLIMIT_NPROC
].rlim_cur
&&
571 new_user
!= current
->nsproxy
->user_ns
->root_user
) {
576 switch_uid(new_user
);
579 set_dumpable(current
->mm
, suid_dumpable
);
582 current
->cred
->uid
= new_ruid
;
587 * Unprivileged users may change the real uid to the effective uid
588 * or vice versa. (BSD-style)
590 * If you set the real uid at all, or set the effective uid to a value not
591 * equal to the real uid, then the saved uid is set to the new effective uid.
593 * This makes it possible for a setuid program to completely drop its
594 * privileges, which is often a useful assertion to make when you are doing
595 * a security audit over a program.
597 * The general idea is that a program which uses just setreuid() will be
598 * 100% compatible with BSD. A program which uses just setuid() will be
599 * 100% compatible with POSIX with saved IDs.
601 asmlinkage
long sys_setreuid(uid_t ruid
, uid_t euid
)
603 struct cred
*cred
= current
->cred
;
604 int old_ruid
, old_euid
, old_suid
, new_ruid
, new_euid
;
607 retval
= security_task_setuid(ruid
, euid
, (uid_t
)-1, LSM_SETID_RE
);
611 new_ruid
= old_ruid
= cred
->uid
;
612 new_euid
= old_euid
= cred
->euid
;
613 old_suid
= cred
->suid
;
615 if (ruid
!= (uid_t
) -1) {
617 if ((old_ruid
!= ruid
) &&
618 (cred
->euid
!= ruid
) &&
619 !capable(CAP_SETUID
))
623 if (euid
!= (uid_t
) -1) {
625 if ((old_ruid
!= euid
) &&
626 (cred
->euid
!= euid
) &&
627 (cred
->suid
!= euid
) &&
628 !capable(CAP_SETUID
))
632 if (new_ruid
!= old_ruid
&& set_user(new_ruid
, new_euid
!= old_euid
) < 0)
635 if (new_euid
!= old_euid
) {
636 set_dumpable(current
->mm
, suid_dumpable
);
639 cred
->fsuid
= cred
->euid
= new_euid
;
640 if (ruid
!= (uid_t
) -1 ||
641 (euid
!= (uid_t
) -1 && euid
!= old_ruid
))
642 cred
->suid
= cred
->euid
;
643 cred
->fsuid
= cred
->euid
;
645 key_fsuid_changed(current
);
646 proc_id_connector(current
, PROC_EVENT_UID
);
648 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RE
);
654 * setuid() is implemented like SysV with SAVED_IDS
656 * Note that SAVED_ID's is deficient in that a setuid root program
657 * like sendmail, for example, cannot set its uid to be a normal
658 * user and then switch back, because if you're root, setuid() sets
659 * the saved uid too. If you don't like this, blame the bright people
660 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
661 * will allow a root program to temporarily drop privileges and be able to
662 * regain them by swapping the real and effective uid.
664 asmlinkage
long sys_setuid(uid_t uid
)
666 struct cred
*cred
= current
->cred
;
667 int old_euid
= cred
->euid
;
668 int old_ruid
, old_suid
, new_suid
;
671 retval
= security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_ID
);
675 old_ruid
= cred
->uid
;
676 old_suid
= cred
->suid
;
679 if (capable(CAP_SETUID
)) {
680 if (uid
!= old_ruid
&& set_user(uid
, old_euid
!= uid
) < 0)
683 } else if ((uid
!= cred
->uid
) && (uid
!= new_suid
))
686 if (old_euid
!= uid
) {
687 set_dumpable(current
->mm
, suid_dumpable
);
690 cred
->fsuid
= cred
->euid
= uid
;
691 cred
->suid
= new_suid
;
693 key_fsuid_changed(current
);
694 proc_id_connector(current
, PROC_EVENT_UID
);
696 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_ID
);
701 * This function implements a generic ability to update ruid, euid,
702 * and suid. This allows you to implement the 4.4 compatible seteuid().
704 asmlinkage
long sys_setresuid(uid_t ruid
, uid_t euid
, uid_t suid
)
706 struct cred
*cred
= current
->cred
;
707 int old_ruid
= cred
->uid
;
708 int old_euid
= cred
->euid
;
709 int old_suid
= cred
->suid
;
712 retval
= security_task_setuid(ruid
, euid
, suid
, LSM_SETID_RES
);
716 if (!capable(CAP_SETUID
)) {
717 if ((ruid
!= (uid_t
) -1) && (ruid
!= cred
->uid
) &&
718 (ruid
!= cred
->euid
) && (ruid
!= cred
->suid
))
720 if ((euid
!= (uid_t
) -1) && (euid
!= cred
->uid
) &&
721 (euid
!= cred
->euid
) && (euid
!= cred
->suid
))
723 if ((suid
!= (uid_t
) -1) && (suid
!= cred
->uid
) &&
724 (suid
!= cred
->euid
) && (suid
!= cred
->suid
))
727 if (ruid
!= (uid_t
) -1) {
728 if (ruid
!= cred
->uid
&&
729 set_user(ruid
, euid
!= cred
->euid
) < 0)
732 if (euid
!= (uid_t
) -1) {
733 if (euid
!= cred
->euid
) {
734 set_dumpable(current
->mm
, suid_dumpable
);
739 cred
->fsuid
= cred
->euid
;
740 if (suid
!= (uid_t
) -1)
743 key_fsuid_changed(current
);
744 proc_id_connector(current
, PROC_EVENT_UID
);
746 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RES
);
749 asmlinkage
long sys_getresuid(uid_t __user
*ruid
, uid_t __user
*euid
, uid_t __user
*suid
)
751 const struct cred
*cred
= current_cred();
754 if (!(retval
= put_user(cred
->uid
, ruid
)) &&
755 !(retval
= put_user(cred
->euid
, euid
)))
756 retval
= put_user(cred
->suid
, suid
);
762 * Same as above, but for rgid, egid, sgid.
764 asmlinkage
long sys_setresgid(gid_t rgid
, gid_t egid
, gid_t sgid
)
766 struct cred
*cred
= current
->cred
;
769 retval
= security_task_setgid(rgid
, egid
, sgid
, LSM_SETID_RES
);
773 if (!capable(CAP_SETGID
)) {
774 if ((rgid
!= (gid_t
) -1) && (rgid
!= cred
->gid
) &&
775 (rgid
!= cred
->egid
) && (rgid
!= cred
->sgid
))
777 if ((egid
!= (gid_t
) -1) && (egid
!= cred
->gid
) &&
778 (egid
!= cred
->egid
) && (egid
!= cred
->sgid
))
780 if ((sgid
!= (gid_t
) -1) && (sgid
!= cred
->gid
) &&
781 (sgid
!= cred
->egid
) && (sgid
!= cred
->sgid
))
784 if (egid
!= (gid_t
) -1) {
785 if (egid
!= cred
->egid
) {
786 set_dumpable(current
->mm
, suid_dumpable
);
791 cred
->fsgid
= cred
->egid
;
792 if (rgid
!= (gid_t
) -1)
794 if (sgid
!= (gid_t
) -1)
797 key_fsgid_changed(current
);
798 proc_id_connector(current
, PROC_EVENT_GID
);
802 asmlinkage
long sys_getresgid(gid_t __user
*rgid
, gid_t __user
*egid
, gid_t __user
*sgid
)
804 const struct cred
*cred
= current_cred();
807 if (!(retval
= put_user(cred
->gid
, rgid
)) &&
808 !(retval
= put_user(cred
->egid
, egid
)))
809 retval
= put_user(cred
->sgid
, sgid
);
816 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
817 * is used for "access()" and for the NFS daemon (letting nfsd stay at
818 * whatever uid it wants to). It normally shadows "euid", except when
819 * explicitly set by setfsuid() or for access..
821 asmlinkage
long sys_setfsuid(uid_t uid
)
823 struct cred
*cred
= current
->cred
;
826 old_fsuid
= cred
->fsuid
;
827 if (security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
))
830 if (uid
== cred
->uid
|| uid
== cred
->euid
||
831 uid
== cred
->suid
|| uid
== cred
->fsuid
||
832 capable(CAP_SETUID
)) {
833 if (uid
!= old_fsuid
) {
834 set_dumpable(current
->mm
, suid_dumpable
);
840 key_fsuid_changed(current
);
841 proc_id_connector(current
, PROC_EVENT_UID
);
843 security_task_post_setuid(old_fsuid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
);
849 * Samma på svenska..
851 asmlinkage
long sys_setfsgid(gid_t gid
)
853 struct cred
*cred
= current
->cred
;
856 old_fsgid
= cred
->fsgid
;
857 if (security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_FS
))
860 if (gid
== cred
->gid
|| gid
== cred
->egid
||
861 gid
== cred
->sgid
|| gid
== cred
->fsgid
||
862 capable(CAP_SETGID
)) {
863 if (gid
!= old_fsgid
) {
864 set_dumpable(current
->mm
, suid_dumpable
);
868 key_fsgid_changed(current
);
869 proc_id_connector(current
, PROC_EVENT_GID
);
874 void do_sys_times(struct tms
*tms
)
876 struct task_cputime cputime
;
877 cputime_t cutime
, cstime
;
879 spin_lock_irq(¤t
->sighand
->siglock
);
880 thread_group_cputime(current
, &cputime
);
881 cutime
= current
->signal
->cutime
;
882 cstime
= current
->signal
->cstime
;
883 spin_unlock_irq(¤t
->sighand
->siglock
);
884 tms
->tms_utime
= cputime_to_clock_t(cputime
.utime
);
885 tms
->tms_stime
= cputime_to_clock_t(cputime
.stime
);
886 tms
->tms_cutime
= cputime_to_clock_t(cutime
);
887 tms
->tms_cstime
= cputime_to_clock_t(cstime
);
890 asmlinkage
long sys_times(struct tms __user
* tbuf
)
896 if (copy_to_user(tbuf
, &tmp
, sizeof(struct tms
)))
899 return (long) jiffies_64_to_clock_t(get_jiffies_64());
903 * This needs some heavy checking ...
904 * I just haven't the stomach for it. I also don't fully
905 * understand sessions/pgrp etc. Let somebody who does explain it.
907 * OK, I think I have the protection semantics right.... this is really
908 * only important on a multi-user system anyway, to make sure one user
909 * can't send a signal to a process owned by another. -TYT, 12/12/91
911 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
914 asmlinkage
long sys_setpgid(pid_t pid
, pid_t pgid
)
916 struct task_struct
*p
;
917 struct task_struct
*group_leader
= current
->group_leader
;
922 pid
= task_pid_vnr(group_leader
);
928 /* From this point forward we keep holding onto the tasklist lock
929 * so that our parent does not change from under us. -DaveM
931 write_lock_irq(&tasklist_lock
);
934 p
= find_task_by_vpid(pid
);
939 if (!thread_group_leader(p
))
942 if (same_thread_group(p
->real_parent
, group_leader
)) {
944 if (task_session(p
) != task_session(group_leader
))
951 if (p
!= group_leader
)
956 if (p
->signal
->leader
)
961 struct task_struct
*g
;
963 pgrp
= find_vpid(pgid
);
964 g
= pid_task(pgrp
, PIDTYPE_PGID
);
965 if (!g
|| task_session(g
) != task_session(group_leader
))
969 err
= security_task_setpgid(p
, pgid
);
973 if (task_pgrp(p
) != pgrp
) {
974 change_pid(p
, PIDTYPE_PGID
, pgrp
);
975 set_task_pgrp(p
, pid_nr(pgrp
));
980 /* All paths lead to here, thus we are safe. -DaveM */
981 write_unlock_irq(&tasklist_lock
);
985 asmlinkage
long sys_getpgid(pid_t pid
)
987 struct task_struct
*p
;
993 grp
= task_pgrp(current
);
996 p
= find_task_by_vpid(pid
);
1003 retval
= security_task_getpgid(p
);
1007 retval
= pid_vnr(grp
);
1013 #ifdef __ARCH_WANT_SYS_GETPGRP
1015 asmlinkage
long sys_getpgrp(void)
1017 return sys_getpgid(0);
1022 asmlinkage
long sys_getsid(pid_t pid
)
1024 struct task_struct
*p
;
1030 sid
= task_session(current
);
1033 p
= find_task_by_vpid(pid
);
1036 sid
= task_session(p
);
1040 retval
= security_task_getsid(p
);
1044 retval
= pid_vnr(sid
);
1050 asmlinkage
long sys_setsid(void)
1052 struct task_struct
*group_leader
= current
->group_leader
;
1053 struct pid
*sid
= task_pid(group_leader
);
1054 pid_t session
= pid_vnr(sid
);
1057 write_lock_irq(&tasklist_lock
);
1058 /* Fail if I am already a session leader */
1059 if (group_leader
->signal
->leader
)
1062 /* Fail if a process group id already exists that equals the
1063 * proposed session id.
1065 if (pid_task(sid
, PIDTYPE_PGID
))
1068 group_leader
->signal
->leader
= 1;
1069 __set_special_pids(sid
);
1071 proc_clear_tty(group_leader
);
1075 write_unlock_irq(&tasklist_lock
);
1080 * Supplementary group IDs
1083 /* init to 2 - one for init_task, one to ensure it is never freed */
1084 struct group_info init_groups
= { .usage
= ATOMIC_INIT(2) };
1086 struct group_info
*groups_alloc(int gidsetsize
)
1088 struct group_info
*group_info
;
1092 nblocks
= (gidsetsize
+ NGROUPS_PER_BLOCK
- 1) / NGROUPS_PER_BLOCK
;
1093 /* Make sure we always allocate at least one indirect block pointer */
1094 nblocks
= nblocks
? : 1;
1095 group_info
= kmalloc(sizeof(*group_info
) + nblocks
*sizeof(gid_t
*), GFP_USER
);
1098 group_info
->ngroups
= gidsetsize
;
1099 group_info
->nblocks
= nblocks
;
1100 atomic_set(&group_info
->usage
, 1);
1102 if (gidsetsize
<= NGROUPS_SMALL
)
1103 group_info
->blocks
[0] = group_info
->small_block
;
1105 for (i
= 0; i
< nblocks
; i
++) {
1107 b
= (void *)__get_free_page(GFP_USER
);
1109 goto out_undo_partial_alloc
;
1110 group_info
->blocks
[i
] = b
;
1115 out_undo_partial_alloc
:
1117 free_page((unsigned long)group_info
->blocks
[i
]);
1123 EXPORT_SYMBOL(groups_alloc
);
1125 void groups_free(struct group_info
*group_info
)
1127 if (group_info
->blocks
[0] != group_info
->small_block
) {
1129 for (i
= 0; i
< group_info
->nblocks
; i
++)
1130 free_page((unsigned long)group_info
->blocks
[i
]);
1135 EXPORT_SYMBOL(groups_free
);
1137 /* export the group_info to a user-space array */
1138 static int groups_to_user(gid_t __user
*grouplist
,
1139 struct group_info
*group_info
)
1142 unsigned int count
= group_info
->ngroups
;
1144 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1145 unsigned int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1146 unsigned int len
= cp_count
* sizeof(*grouplist
);
1148 if (copy_to_user(grouplist
, group_info
->blocks
[i
], len
))
1151 grouplist
+= NGROUPS_PER_BLOCK
;
1157 /* fill a group_info from a user-space array - it must be allocated already */
1158 static int groups_from_user(struct group_info
*group_info
,
1159 gid_t __user
*grouplist
)
1162 unsigned int count
= group_info
->ngroups
;
1164 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1165 unsigned int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1166 unsigned int len
= cp_count
* sizeof(*grouplist
);
1168 if (copy_from_user(group_info
->blocks
[i
], grouplist
, len
))
1171 grouplist
+= NGROUPS_PER_BLOCK
;
1177 /* a simple Shell sort */
1178 static void groups_sort(struct group_info
*group_info
)
1180 int base
, max
, stride
;
1181 int gidsetsize
= group_info
->ngroups
;
1183 for (stride
= 1; stride
< gidsetsize
; stride
= 3 * stride
+ 1)
1188 max
= gidsetsize
- stride
;
1189 for (base
= 0; base
< max
; base
++) {
1191 int right
= left
+ stride
;
1192 gid_t tmp
= GROUP_AT(group_info
, right
);
1194 while (left
>= 0 && GROUP_AT(group_info
, left
) > tmp
) {
1195 GROUP_AT(group_info
, right
) =
1196 GROUP_AT(group_info
, left
);
1200 GROUP_AT(group_info
, right
) = tmp
;
1206 /* a simple bsearch */
1207 int groups_search(const struct group_info
*group_info
, gid_t grp
)
1209 unsigned int left
, right
;
1215 right
= group_info
->ngroups
;
1216 while (left
< right
) {
1217 unsigned int mid
= (left
+right
)/2;
1218 int cmp
= grp
- GROUP_AT(group_info
, mid
);
1230 * set_groups - Change a group subscription in a security record
1231 * @sec: The security record to alter
1232 * @group_info: The group list to impose
1234 * Validate a group subscription and, if valid, impose it upon a task security
1237 int set_groups(struct cred
*cred
, struct group_info
*group_info
)
1240 struct group_info
*old_info
;
1242 retval
= security_task_setgroups(group_info
);
1246 groups_sort(group_info
);
1247 get_group_info(group_info
);
1249 spin_lock(&cred
->lock
);
1250 old_info
= cred
->group_info
;
1251 cred
->group_info
= group_info
;
1252 spin_unlock(&cred
->lock
);
1254 put_group_info(old_info
);
1258 EXPORT_SYMBOL(set_groups
);
1261 * set_current_groups - Change current's group subscription
1262 * @group_info: The group list to impose
1264 * Validate a group subscription and, if valid, impose it upon current's task
1267 int set_current_groups(struct group_info
*group_info
)
1269 return set_groups(current
->cred
, group_info
);
1272 EXPORT_SYMBOL(set_current_groups
);
1274 asmlinkage
long sys_getgroups(int gidsetsize
, gid_t __user
*grouplist
)
1276 const struct cred
*cred
= current_cred();
1282 /* no need to grab task_lock here; it cannot change */
1283 i
= cred
->group_info
->ngroups
;
1285 if (i
> gidsetsize
) {
1289 if (groups_to_user(grouplist
, cred
->group_info
)) {
1299 * SMP: Our groups are copy-on-write. We can set them safely
1300 * without another task interfering.
1303 asmlinkage
long sys_setgroups(int gidsetsize
, gid_t __user
*grouplist
)
1305 struct group_info
*group_info
;
1308 if (!capable(CAP_SETGID
))
1310 if ((unsigned)gidsetsize
> NGROUPS_MAX
)
1313 group_info
= groups_alloc(gidsetsize
);
1316 retval
= groups_from_user(group_info
, grouplist
);
1318 put_group_info(group_info
);
1322 retval
= set_current_groups(group_info
);
1323 put_group_info(group_info
);
1329 * Check whether we're fsgid/egid or in the supplemental group..
1331 int in_group_p(gid_t grp
)
1333 const struct cred
*cred
= current_cred();
1336 if (grp
!= cred
->fsgid
)
1337 retval
= groups_search(cred
->group_info
, grp
);
1341 EXPORT_SYMBOL(in_group_p
);
1343 int in_egroup_p(gid_t grp
)
1345 const struct cred
*cred
= current_cred();
1348 if (grp
!= cred
->egid
)
1349 retval
= groups_search(cred
->group_info
, grp
);
1353 EXPORT_SYMBOL(in_egroup_p
);
1355 DECLARE_RWSEM(uts_sem
);
1357 asmlinkage
long sys_newuname(struct new_utsname __user
* name
)
1361 down_read(&uts_sem
);
1362 if (copy_to_user(name
, utsname(), sizeof *name
))
1368 asmlinkage
long sys_sethostname(char __user
*name
, int len
)
1371 char tmp
[__NEW_UTS_LEN
];
1373 if (!capable(CAP_SYS_ADMIN
))
1375 if (len
< 0 || len
> __NEW_UTS_LEN
)
1377 down_write(&uts_sem
);
1379 if (!copy_from_user(tmp
, name
, len
)) {
1380 struct new_utsname
*u
= utsname();
1382 memcpy(u
->nodename
, tmp
, len
);
1383 memset(u
->nodename
+ len
, 0, sizeof(u
->nodename
) - len
);
1390 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1392 asmlinkage
long sys_gethostname(char __user
*name
, int len
)
1395 struct new_utsname
*u
;
1399 down_read(&uts_sem
);
1401 i
= 1 + strlen(u
->nodename
);
1405 if (copy_to_user(name
, u
->nodename
, i
))
1414 * Only setdomainname; getdomainname can be implemented by calling
1417 asmlinkage
long sys_setdomainname(char __user
*name
, int len
)
1420 char tmp
[__NEW_UTS_LEN
];
1422 if (!capable(CAP_SYS_ADMIN
))
1424 if (len
< 0 || len
> __NEW_UTS_LEN
)
1427 down_write(&uts_sem
);
1429 if (!copy_from_user(tmp
, name
, len
)) {
1430 struct new_utsname
*u
= utsname();
1432 memcpy(u
->domainname
, tmp
, len
);
1433 memset(u
->domainname
+ len
, 0, sizeof(u
->domainname
) - len
);
1440 asmlinkage
long sys_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1442 if (resource
>= RLIM_NLIMITS
)
1445 struct rlimit value
;
1446 task_lock(current
->group_leader
);
1447 value
= current
->signal
->rlim
[resource
];
1448 task_unlock(current
->group_leader
);
1449 return copy_to_user(rlim
, &value
, sizeof(*rlim
)) ? -EFAULT
: 0;
1453 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1456 * Back compatibility for getrlimit. Needed for some apps.
1459 asmlinkage
long sys_old_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1462 if (resource
>= RLIM_NLIMITS
)
1465 task_lock(current
->group_leader
);
1466 x
= current
->signal
->rlim
[resource
];
1467 task_unlock(current
->group_leader
);
1468 if (x
.rlim_cur
> 0x7FFFFFFF)
1469 x
.rlim_cur
= 0x7FFFFFFF;
1470 if (x
.rlim_max
> 0x7FFFFFFF)
1471 x
.rlim_max
= 0x7FFFFFFF;
1472 return copy_to_user(rlim
, &x
, sizeof(x
))?-EFAULT
:0;
1477 asmlinkage
long sys_setrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1479 struct rlimit new_rlim
, *old_rlim
;
1482 if (resource
>= RLIM_NLIMITS
)
1484 if (copy_from_user(&new_rlim
, rlim
, sizeof(*rlim
)))
1486 old_rlim
= current
->signal
->rlim
+ resource
;
1487 if ((new_rlim
.rlim_max
> old_rlim
->rlim_max
) &&
1488 !capable(CAP_SYS_RESOURCE
))
1491 if (resource
== RLIMIT_NOFILE
) {
1492 if (new_rlim
.rlim_max
== RLIM_INFINITY
)
1493 new_rlim
.rlim_max
= sysctl_nr_open
;
1494 if (new_rlim
.rlim_cur
== RLIM_INFINITY
)
1495 new_rlim
.rlim_cur
= sysctl_nr_open
;
1496 if (new_rlim
.rlim_max
> sysctl_nr_open
)
1500 if (new_rlim
.rlim_cur
> new_rlim
.rlim_max
)
1503 retval
= security_task_setrlimit(resource
, &new_rlim
);
1507 if (resource
== RLIMIT_CPU
&& new_rlim
.rlim_cur
== 0) {
1509 * The caller is asking for an immediate RLIMIT_CPU
1510 * expiry. But we use the zero value to mean "it was
1511 * never set". So let's cheat and make it one second
1514 new_rlim
.rlim_cur
= 1;
1517 task_lock(current
->group_leader
);
1518 *old_rlim
= new_rlim
;
1519 task_unlock(current
->group_leader
);
1521 if (resource
!= RLIMIT_CPU
)
1525 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1526 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1527 * very long-standing error, and fixing it now risks breakage of
1528 * applications, so we live with it
1530 if (new_rlim
.rlim_cur
== RLIM_INFINITY
)
1533 update_rlimit_cpu(new_rlim
.rlim_cur
);
1539 * It would make sense to put struct rusage in the task_struct,
1540 * except that would make the task_struct be *really big*. After
1541 * task_struct gets moved into malloc'ed memory, it would
1542 * make sense to do this. It will make moving the rest of the information
1543 * a lot simpler! (Which we're not doing right now because we're not
1544 * measuring them yet).
1546 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1547 * races with threads incrementing their own counters. But since word
1548 * reads are atomic, we either get new values or old values and we don't
1549 * care which for the sums. We always take the siglock to protect reading
1550 * the c* fields from p->signal from races with exit.c updating those
1551 * fields when reaping, so a sample either gets all the additions of a
1552 * given child after it's reaped, or none so this sample is before reaping.
1555 * We need to take the siglock for CHILDEREN, SELF and BOTH
1556 * for the cases current multithreaded, non-current single threaded
1557 * non-current multithreaded. Thread traversal is now safe with
1559 * Strictly speaking, we donot need to take the siglock if we are current and
1560 * single threaded, as no one else can take our signal_struct away, no one
1561 * else can reap the children to update signal->c* counters, and no one else
1562 * can race with the signal-> fields. If we do not take any lock, the
1563 * signal-> fields could be read out of order while another thread was just
1564 * exiting. So we should place a read memory barrier when we avoid the lock.
1565 * On the writer side, write memory barrier is implied in __exit_signal
1566 * as __exit_signal releases the siglock spinlock after updating the signal->
1567 * fields. But we don't do this yet to keep things simple.
1571 static void accumulate_thread_rusage(struct task_struct
*t
, struct rusage
*r
)
1573 r
->ru_nvcsw
+= t
->nvcsw
;
1574 r
->ru_nivcsw
+= t
->nivcsw
;
1575 r
->ru_minflt
+= t
->min_flt
;
1576 r
->ru_majflt
+= t
->maj_flt
;
1577 r
->ru_inblock
+= task_io_get_inblock(t
);
1578 r
->ru_oublock
+= task_io_get_oublock(t
);
1581 static void k_getrusage(struct task_struct
*p
, int who
, struct rusage
*r
)
1583 struct task_struct
*t
;
1584 unsigned long flags
;
1585 cputime_t utime
, stime
;
1586 struct task_cputime cputime
;
1588 memset((char *) r
, 0, sizeof *r
);
1589 utime
= stime
= cputime_zero
;
1591 if (who
== RUSAGE_THREAD
) {
1592 accumulate_thread_rusage(p
, r
);
1596 if (!lock_task_sighand(p
, &flags
))
1601 case RUSAGE_CHILDREN
:
1602 utime
= p
->signal
->cutime
;
1603 stime
= p
->signal
->cstime
;
1604 r
->ru_nvcsw
= p
->signal
->cnvcsw
;
1605 r
->ru_nivcsw
= p
->signal
->cnivcsw
;
1606 r
->ru_minflt
= p
->signal
->cmin_flt
;
1607 r
->ru_majflt
= p
->signal
->cmaj_flt
;
1608 r
->ru_inblock
= p
->signal
->cinblock
;
1609 r
->ru_oublock
= p
->signal
->coublock
;
1611 if (who
== RUSAGE_CHILDREN
)
1615 thread_group_cputime(p
, &cputime
);
1616 utime
= cputime_add(utime
, cputime
.utime
);
1617 stime
= cputime_add(stime
, cputime
.stime
);
1618 r
->ru_nvcsw
+= p
->signal
->nvcsw
;
1619 r
->ru_nivcsw
+= p
->signal
->nivcsw
;
1620 r
->ru_minflt
+= p
->signal
->min_flt
;
1621 r
->ru_majflt
+= p
->signal
->maj_flt
;
1622 r
->ru_inblock
+= p
->signal
->inblock
;
1623 r
->ru_oublock
+= p
->signal
->oublock
;
1626 accumulate_thread_rusage(t
, r
);
1634 unlock_task_sighand(p
, &flags
);
1637 cputime_to_timeval(utime
, &r
->ru_utime
);
1638 cputime_to_timeval(stime
, &r
->ru_stime
);
1641 int getrusage(struct task_struct
*p
, int who
, struct rusage __user
*ru
)
1644 k_getrusage(p
, who
, &r
);
1645 return copy_to_user(ru
, &r
, sizeof(r
)) ? -EFAULT
: 0;
1648 asmlinkage
long sys_getrusage(int who
, struct rusage __user
*ru
)
1650 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
&&
1651 who
!= RUSAGE_THREAD
)
1653 return getrusage(current
, who
, ru
);
1656 asmlinkage
long sys_umask(int mask
)
1658 mask
= xchg(¤t
->fs
->umask
, mask
& S_IRWXUGO
);
1662 asmlinkage
long sys_prctl(int option
, unsigned long arg2
, unsigned long arg3
,
1663 unsigned long arg4
, unsigned long arg5
)
1665 struct task_struct
*me
= current
;
1666 unsigned char comm
[sizeof(me
->comm
)];
1669 if (security_task_prctl(option
, arg2
, arg3
, arg4
, arg5
, &error
))
1673 case PR_SET_PDEATHSIG
:
1674 if (!valid_signal(arg2
)) {
1678 me
->pdeath_signal
= arg2
;
1681 case PR_GET_PDEATHSIG
:
1682 error
= put_user(me
->pdeath_signal
, (int __user
*)arg2
);
1684 case PR_GET_DUMPABLE
:
1685 error
= get_dumpable(me
->mm
);
1687 case PR_SET_DUMPABLE
:
1688 if (arg2
< 0 || arg2
> 1) {
1692 set_dumpable(me
->mm
, arg2
);
1696 case PR_SET_UNALIGN
:
1697 error
= SET_UNALIGN_CTL(me
, arg2
);
1699 case PR_GET_UNALIGN
:
1700 error
= GET_UNALIGN_CTL(me
, arg2
);
1703 error
= SET_FPEMU_CTL(me
, arg2
);
1706 error
= GET_FPEMU_CTL(me
, arg2
);
1709 error
= SET_FPEXC_CTL(me
, arg2
);
1712 error
= GET_FPEXC_CTL(me
, arg2
);
1715 error
= PR_TIMING_STATISTICAL
;
1718 if (arg2
!= PR_TIMING_STATISTICAL
)
1725 comm
[sizeof(me
->comm
)-1] = 0;
1726 if (strncpy_from_user(comm
, (char __user
*)arg2
,
1727 sizeof(me
->comm
) - 1) < 0)
1729 set_task_comm(me
, comm
);
1732 get_task_comm(comm
, me
);
1733 if (copy_to_user((char __user
*)arg2
, comm
,
1738 error
= GET_ENDIAN(me
, arg2
);
1741 error
= SET_ENDIAN(me
, arg2
);
1744 case PR_GET_SECCOMP
:
1745 error
= prctl_get_seccomp();
1747 case PR_SET_SECCOMP
:
1748 error
= prctl_set_seccomp(arg2
);
1751 error
= GET_TSC_CTL(arg2
);
1754 error
= SET_TSC_CTL(arg2
);
1756 case PR_GET_TIMERSLACK
:
1757 error
= current
->timer_slack_ns
;
1759 case PR_SET_TIMERSLACK
:
1761 current
->timer_slack_ns
=
1762 current
->default_timer_slack_ns
;
1764 current
->timer_slack_ns
= arg2
;
1774 asmlinkage
long sys_getcpu(unsigned __user
*cpup
, unsigned __user
*nodep
,
1775 struct getcpu_cache __user
*unused
)
1778 int cpu
= raw_smp_processor_id();
1780 err
|= put_user(cpu
, cpup
);
1782 err
|= put_user(cpu_to_node(cpu
), nodep
);
1783 return err
? -EFAULT
: 0;
1786 char poweroff_cmd
[POWEROFF_CMD_PATH_LEN
] = "/sbin/poweroff";
1788 static void argv_cleanup(char **argv
, char **envp
)
1794 * orderly_poweroff - Trigger an orderly system poweroff
1795 * @force: force poweroff if command execution fails
1797 * This may be called from any context to trigger a system shutdown.
1798 * If the orderly shutdown fails, it will force an immediate shutdown.
1800 int orderly_poweroff(bool force
)
1803 char **argv
= argv_split(GFP_ATOMIC
, poweroff_cmd
, &argc
);
1804 static char *envp
[] = {
1806 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
1810 struct subprocess_info
*info
;
1813 printk(KERN_WARNING
"%s failed to allocate memory for \"%s\"\n",
1814 __func__
, poweroff_cmd
);
1818 info
= call_usermodehelper_setup(argv
[0], argv
, envp
, GFP_ATOMIC
);
1824 call_usermodehelper_setcleanup(info
, argv_cleanup
);
1826 ret
= call_usermodehelper_exec(info
, UMH_NO_WAIT
);
1830 printk(KERN_WARNING
"Failed to start orderly shutdown: "
1831 "forcing the issue\n");
1833 /* I guess this should try to kick off some daemon to
1834 sync and poweroff asap. Or not even bother syncing
1835 if we're doing an emergency shutdown? */
1842 EXPORT_SYMBOL_GPL(orderly_poweroff
);