4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/module.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
37 #include <linux/compat.h>
38 #include <linux/syscalls.h>
39 #include <linux/kprobes.h>
40 #include <linux/user_namespace.h>
42 #include <asm/uaccess.h>
44 #include <asm/unistd.h>
46 #ifndef SET_UNALIGN_CTL
47 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
49 #ifndef GET_UNALIGN_CTL
50 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
53 # define SET_FPEMU_CTL(a,b) (-EINVAL)
56 # define GET_FPEMU_CTL(a,b) (-EINVAL)
59 # define SET_FPEXC_CTL(a,b) (-EINVAL)
62 # define GET_FPEXC_CTL(a,b) (-EINVAL)
65 # define GET_ENDIAN(a,b) (-EINVAL)
68 # define SET_ENDIAN(a,b) (-EINVAL)
72 * this is where the system-wide overflow UID and GID are defined, for
73 * architectures that now have 32-bit UID/GID but didn't in the past
76 int overflowuid
= DEFAULT_OVERFLOWUID
;
77 int overflowgid
= DEFAULT_OVERFLOWGID
;
80 EXPORT_SYMBOL(overflowuid
);
81 EXPORT_SYMBOL(overflowgid
);
85 * the same as above, but for filesystems which can only store a 16-bit
86 * UID and GID. as such, this is needed on all architectures
89 int fs_overflowuid
= DEFAULT_FS_OVERFLOWUID
;
90 int fs_overflowgid
= DEFAULT_FS_OVERFLOWUID
;
92 EXPORT_SYMBOL(fs_overflowuid
);
93 EXPORT_SYMBOL(fs_overflowgid
);
96 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
101 EXPORT_SYMBOL(cad_pid
);
104 * If set, this is used for preparing the system to power off.
107 void (*pm_power_off_prepare
)(void);
109 static int set_one_prio(struct task_struct
*p
, int niceval
, int error
)
113 if (p
->uid
!= current
->euid
&&
114 p
->euid
!= current
->euid
&& !capable(CAP_SYS_NICE
)) {
118 if (niceval
< task_nice(p
) && !can_nice(p
, niceval
)) {
122 no_nice
= security_task_setnice(p
, niceval
);
129 set_user_nice(p
, niceval
);
134 asmlinkage
long sys_setpriority(int which
, int who
, int niceval
)
136 struct task_struct
*g
, *p
;
137 struct user_struct
*user
;
141 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
144 /* normalize: avoid signed division (rounding problems) */
151 read_lock(&tasklist_lock
);
155 p
= find_task_by_pid_ns(who
,
156 current
->nsproxy
->pid_ns
);
160 error
= set_one_prio(p
, niceval
, error
);
164 pgrp
= find_vpid(who
);
166 pgrp
= task_pgrp(current
);
167 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
168 error
= set_one_prio(p
, niceval
, error
);
169 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
172 user
= current
->user
;
176 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
177 goto out_unlock
; /* No processes for this user */
181 error
= set_one_prio(p
, niceval
, error
);
182 while_each_thread(g
, p
);
183 if (who
!= current
->uid
)
184 free_uid(user
); /* For find_user() */
188 read_unlock(&tasklist_lock
);
194 * Ugh. To avoid negative return values, "getpriority()" will
195 * not return the normal nice-value, but a negated value that
196 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
197 * to stay compatible.
199 asmlinkage
long sys_getpriority(int which
, int who
)
201 struct task_struct
*g
, *p
;
202 struct user_struct
*user
;
203 long niceval
, retval
= -ESRCH
;
206 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
209 read_lock(&tasklist_lock
);
213 p
= find_task_by_pid_ns(who
,
214 current
->nsproxy
->pid_ns
);
218 niceval
= 20 - task_nice(p
);
219 if (niceval
> retval
)
225 pgrp
= find_vpid(who
);
227 pgrp
= task_pgrp(current
);
228 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
229 niceval
= 20 - task_nice(p
);
230 if (niceval
> retval
)
232 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
235 user
= current
->user
;
239 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
240 goto out_unlock
; /* No processes for this user */
244 niceval
= 20 - task_nice(p
);
245 if (niceval
> retval
)
248 while_each_thread(g
, p
);
249 if (who
!= current
->uid
)
250 free_uid(user
); /* for find_user() */
254 read_unlock(&tasklist_lock
);
260 * emergency_restart - reboot the system
262 * Without shutting down any hardware or taking any locks
263 * reboot the system. This is called when we know we are in
264 * trouble so this is our best effort to reboot. This is
265 * safe to call in interrupt context.
267 void emergency_restart(void)
269 machine_emergency_restart();
271 EXPORT_SYMBOL_GPL(emergency_restart
);
273 static void kernel_restart_prepare(char *cmd
)
275 blocking_notifier_call_chain(&reboot_notifier_list
, SYS_RESTART
, cmd
);
276 system_state
= SYSTEM_RESTART
;
282 * kernel_restart - reboot the system
283 * @cmd: pointer to buffer containing command to execute for restart
286 * Shutdown everything and perform a clean reboot.
287 * This is not safe to call in interrupt context.
289 void kernel_restart(char *cmd
)
291 kernel_restart_prepare(cmd
);
293 printk(KERN_EMERG
"Restarting system.\n");
295 printk(KERN_EMERG
"Restarting system with command '%s'.\n", cmd
);
296 machine_restart(cmd
);
298 EXPORT_SYMBOL_GPL(kernel_restart
);
301 * kernel_kexec - reboot the system
303 * Move into place and start executing a preloaded standalone
304 * executable. If nothing was preloaded return an error.
306 static void kernel_kexec(void)
309 struct kimage
*image
;
310 image
= xchg(&kexec_image
, NULL
);
313 kernel_restart_prepare(NULL
);
314 printk(KERN_EMERG
"Starting new kernel\n");
316 machine_kexec(image
);
320 void kernel_shutdown_prepare(enum system_states state
)
322 blocking_notifier_call_chain(&reboot_notifier_list
,
323 (state
== SYSTEM_HALT
)?SYS_HALT
:SYS_POWER_OFF
, NULL
);
324 system_state
= state
;
328 * kernel_halt - halt the system
330 * Shutdown everything and perform a clean system halt.
332 void kernel_halt(void)
334 kernel_shutdown_prepare(SYSTEM_HALT
);
336 printk(KERN_EMERG
"System halted.\n");
340 EXPORT_SYMBOL_GPL(kernel_halt
);
343 * kernel_power_off - power_off the system
345 * Shutdown everything and perform a clean system power_off.
347 void kernel_power_off(void)
349 kernel_shutdown_prepare(SYSTEM_POWER_OFF
);
350 if (pm_power_off_prepare
)
351 pm_power_off_prepare();
352 disable_nonboot_cpus();
354 printk(KERN_EMERG
"Power down.\n");
357 EXPORT_SYMBOL_GPL(kernel_power_off
);
359 * Reboot system call: for obvious reasons only root may call it,
360 * and even root needs to set up some magic numbers in the registers
361 * so that some mistake won't make this reboot the whole machine.
362 * You can also set the meaning of the ctrl-alt-del-key here.
364 * reboot doesn't sync: do that yourself before calling this.
366 asmlinkage
long sys_reboot(int magic1
, int magic2
, unsigned int cmd
, void __user
* arg
)
370 /* We only trust the superuser with rebooting the system. */
371 if (!capable(CAP_SYS_BOOT
))
374 /* For safety, we require "magic" arguments. */
375 if (magic1
!= LINUX_REBOOT_MAGIC1
||
376 (magic2
!= LINUX_REBOOT_MAGIC2
&&
377 magic2
!= LINUX_REBOOT_MAGIC2A
&&
378 magic2
!= LINUX_REBOOT_MAGIC2B
&&
379 magic2
!= LINUX_REBOOT_MAGIC2C
))
382 /* Instead of trying to make the power_off code look like
383 * halt when pm_power_off is not set do it the easy way.
385 if ((cmd
== LINUX_REBOOT_CMD_POWER_OFF
) && !pm_power_off
)
386 cmd
= LINUX_REBOOT_CMD_HALT
;
390 case LINUX_REBOOT_CMD_RESTART
:
391 kernel_restart(NULL
);
394 case LINUX_REBOOT_CMD_CAD_ON
:
398 case LINUX_REBOOT_CMD_CAD_OFF
:
402 case LINUX_REBOOT_CMD_HALT
:
408 case LINUX_REBOOT_CMD_POWER_OFF
:
414 case LINUX_REBOOT_CMD_RESTART2
:
415 if (strncpy_from_user(&buffer
[0], arg
, sizeof(buffer
) - 1) < 0) {
419 buffer
[sizeof(buffer
) - 1] = '\0';
421 kernel_restart(buffer
);
424 case LINUX_REBOOT_CMD_KEXEC
:
429 #ifdef CONFIG_HIBERNATION
430 case LINUX_REBOOT_CMD_SW_SUSPEND
:
432 int ret
= hibernate();
446 static void deferred_cad(struct work_struct
*dummy
)
448 kernel_restart(NULL
);
452 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
453 * As it's called within an interrupt, it may NOT sync: the only choice
454 * is whether to reboot at once, or just ignore the ctrl-alt-del.
456 void ctrl_alt_del(void)
458 static DECLARE_WORK(cad_work
, deferred_cad
);
461 schedule_work(&cad_work
);
463 kill_cad_pid(SIGINT
, 1);
467 * Unprivileged users may change the real gid to the effective gid
468 * or vice versa. (BSD-style)
470 * If you set the real gid at all, or set the effective gid to a value not
471 * equal to the real gid, then the saved gid is set to the new effective gid.
473 * This makes it possible for a setgid program to completely drop its
474 * privileges, which is often a useful assertion to make when you are doing
475 * a security audit over a program.
477 * The general idea is that a program which uses just setregid() will be
478 * 100% compatible with BSD. A program which uses just setgid() will be
479 * 100% compatible with POSIX with saved IDs.
481 * SMP: There are not races, the GIDs are checked only by filesystem
482 * operations (as far as semantic preservation is concerned).
484 asmlinkage
long sys_setregid(gid_t rgid
, gid_t egid
)
486 int old_rgid
= current
->gid
;
487 int old_egid
= current
->egid
;
488 int new_rgid
= old_rgid
;
489 int new_egid
= old_egid
;
492 retval
= security_task_setgid(rgid
, egid
, (gid_t
)-1, LSM_SETID_RE
);
496 if (rgid
!= (gid_t
) -1) {
497 if ((old_rgid
== rgid
) ||
498 (current
->egid
==rgid
) ||
504 if (egid
!= (gid_t
) -1) {
505 if ((old_rgid
== egid
) ||
506 (current
->egid
== egid
) ||
507 (current
->sgid
== egid
) ||
513 if (new_egid
!= old_egid
) {
514 set_dumpable(current
->mm
, suid_dumpable
);
517 if (rgid
!= (gid_t
) -1 ||
518 (egid
!= (gid_t
) -1 && egid
!= old_rgid
))
519 current
->sgid
= new_egid
;
520 current
->fsgid
= new_egid
;
521 current
->egid
= new_egid
;
522 current
->gid
= new_rgid
;
523 key_fsgid_changed(current
);
524 proc_id_connector(current
, PROC_EVENT_GID
);
529 * setgid() is implemented like SysV w/ SAVED_IDS
531 * SMP: Same implicit races as above.
533 asmlinkage
long sys_setgid(gid_t gid
)
535 int old_egid
= current
->egid
;
538 retval
= security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_ID
);
542 if (capable(CAP_SETGID
)) {
543 if (old_egid
!= gid
) {
544 set_dumpable(current
->mm
, suid_dumpable
);
547 current
->gid
= current
->egid
= current
->sgid
= current
->fsgid
= gid
;
548 } else if ((gid
== current
->gid
) || (gid
== current
->sgid
)) {
549 if (old_egid
!= gid
) {
550 set_dumpable(current
->mm
, suid_dumpable
);
553 current
->egid
= current
->fsgid
= gid
;
558 key_fsgid_changed(current
);
559 proc_id_connector(current
, PROC_EVENT_GID
);
563 static int set_user(uid_t new_ruid
, int dumpclear
)
565 struct user_struct
*new_user
;
567 new_user
= alloc_uid(current
->nsproxy
->user_ns
, new_ruid
);
571 if (atomic_read(&new_user
->processes
) >=
572 current
->signal
->rlim
[RLIMIT_NPROC
].rlim_cur
&&
573 new_user
!= current
->nsproxy
->user_ns
->root_user
) {
578 switch_uid(new_user
);
581 set_dumpable(current
->mm
, suid_dumpable
);
584 current
->uid
= new_ruid
;
589 * Unprivileged users may change the real uid to the effective uid
590 * or vice versa. (BSD-style)
592 * If you set the real uid at all, or set the effective uid to a value not
593 * equal to the real uid, then the saved uid is set to the new effective uid.
595 * This makes it possible for a setuid program to completely drop its
596 * privileges, which is often a useful assertion to make when you are doing
597 * a security audit over a program.
599 * The general idea is that a program which uses just setreuid() will be
600 * 100% compatible with BSD. A program which uses just setuid() will be
601 * 100% compatible with POSIX with saved IDs.
603 asmlinkage
long sys_setreuid(uid_t ruid
, uid_t euid
)
605 int old_ruid
, old_euid
, old_suid
, new_ruid
, new_euid
;
608 retval
= security_task_setuid(ruid
, euid
, (uid_t
)-1, LSM_SETID_RE
);
612 new_ruid
= old_ruid
= current
->uid
;
613 new_euid
= old_euid
= current
->euid
;
614 old_suid
= current
->suid
;
616 if (ruid
!= (uid_t
) -1) {
618 if ((old_ruid
!= ruid
) &&
619 (current
->euid
!= ruid
) &&
620 !capable(CAP_SETUID
))
624 if (euid
!= (uid_t
) -1) {
626 if ((old_ruid
!= euid
) &&
627 (current
->euid
!= euid
) &&
628 (current
->suid
!= euid
) &&
629 !capable(CAP_SETUID
))
633 if (new_ruid
!= old_ruid
&& set_user(new_ruid
, new_euid
!= old_euid
) < 0)
636 if (new_euid
!= old_euid
) {
637 set_dumpable(current
->mm
, suid_dumpable
);
640 current
->fsuid
= current
->euid
= new_euid
;
641 if (ruid
!= (uid_t
) -1 ||
642 (euid
!= (uid_t
) -1 && euid
!= old_ruid
))
643 current
->suid
= current
->euid
;
644 current
->fsuid
= current
->euid
;
646 key_fsuid_changed(current
);
647 proc_id_connector(current
, PROC_EVENT_UID
);
649 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RE
);
655 * setuid() is implemented like SysV with SAVED_IDS
657 * Note that SAVED_ID's is deficient in that a setuid root program
658 * like sendmail, for example, cannot set its uid to be a normal
659 * user and then switch back, because if you're root, setuid() sets
660 * the saved uid too. If you don't like this, blame the bright people
661 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
662 * will allow a root program to temporarily drop privileges and be able to
663 * regain them by swapping the real and effective uid.
665 asmlinkage
long sys_setuid(uid_t uid
)
667 int old_euid
= current
->euid
;
668 int old_ruid
, old_suid
, new_suid
;
671 retval
= security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_ID
);
675 old_ruid
= current
->uid
;
676 old_suid
= current
->suid
;
679 if (capable(CAP_SETUID
)) {
680 if (uid
!= old_ruid
&& set_user(uid
, old_euid
!= uid
) < 0)
683 } else if ((uid
!= current
->uid
) && (uid
!= new_suid
))
686 if (old_euid
!= uid
) {
687 set_dumpable(current
->mm
, suid_dumpable
);
690 current
->fsuid
= current
->euid
= uid
;
691 current
->suid
= new_suid
;
693 key_fsuid_changed(current
);
694 proc_id_connector(current
, PROC_EVENT_UID
);
696 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_ID
);
701 * This function implements a generic ability to update ruid, euid,
702 * and suid. This allows you to implement the 4.4 compatible seteuid().
704 asmlinkage
long sys_setresuid(uid_t ruid
, uid_t euid
, uid_t suid
)
706 int old_ruid
= current
->uid
;
707 int old_euid
= current
->euid
;
708 int old_suid
= current
->suid
;
711 retval
= security_task_setuid(ruid
, euid
, suid
, LSM_SETID_RES
);
715 if (!capable(CAP_SETUID
)) {
716 if ((ruid
!= (uid_t
) -1) && (ruid
!= current
->uid
) &&
717 (ruid
!= current
->euid
) && (ruid
!= current
->suid
))
719 if ((euid
!= (uid_t
) -1) && (euid
!= current
->uid
) &&
720 (euid
!= current
->euid
) && (euid
!= current
->suid
))
722 if ((suid
!= (uid_t
) -1) && (suid
!= current
->uid
) &&
723 (suid
!= current
->euid
) && (suid
!= current
->suid
))
726 if (ruid
!= (uid_t
) -1) {
727 if (ruid
!= current
->uid
&& set_user(ruid
, euid
!= current
->euid
) < 0)
730 if (euid
!= (uid_t
) -1) {
731 if (euid
!= current
->euid
) {
732 set_dumpable(current
->mm
, suid_dumpable
);
735 current
->euid
= euid
;
737 current
->fsuid
= current
->euid
;
738 if (suid
!= (uid_t
) -1)
739 current
->suid
= suid
;
741 key_fsuid_changed(current
);
742 proc_id_connector(current
, PROC_EVENT_UID
);
744 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RES
);
747 asmlinkage
long sys_getresuid(uid_t __user
*ruid
, uid_t __user
*euid
, uid_t __user
*suid
)
751 if (!(retval
= put_user(current
->uid
, ruid
)) &&
752 !(retval
= put_user(current
->euid
, euid
)))
753 retval
= put_user(current
->suid
, suid
);
759 * Same as above, but for rgid, egid, sgid.
761 asmlinkage
long sys_setresgid(gid_t rgid
, gid_t egid
, gid_t sgid
)
765 retval
= security_task_setgid(rgid
, egid
, sgid
, LSM_SETID_RES
);
769 if (!capable(CAP_SETGID
)) {
770 if ((rgid
!= (gid_t
) -1) && (rgid
!= current
->gid
) &&
771 (rgid
!= current
->egid
) && (rgid
!= current
->sgid
))
773 if ((egid
!= (gid_t
) -1) && (egid
!= current
->gid
) &&
774 (egid
!= current
->egid
) && (egid
!= current
->sgid
))
776 if ((sgid
!= (gid_t
) -1) && (sgid
!= current
->gid
) &&
777 (sgid
!= current
->egid
) && (sgid
!= current
->sgid
))
780 if (egid
!= (gid_t
) -1) {
781 if (egid
!= current
->egid
) {
782 set_dumpable(current
->mm
, suid_dumpable
);
785 current
->egid
= egid
;
787 current
->fsgid
= current
->egid
;
788 if (rgid
!= (gid_t
) -1)
790 if (sgid
!= (gid_t
) -1)
791 current
->sgid
= sgid
;
793 key_fsgid_changed(current
);
794 proc_id_connector(current
, PROC_EVENT_GID
);
798 asmlinkage
long sys_getresgid(gid_t __user
*rgid
, gid_t __user
*egid
, gid_t __user
*sgid
)
802 if (!(retval
= put_user(current
->gid
, rgid
)) &&
803 !(retval
= put_user(current
->egid
, egid
)))
804 retval
= put_user(current
->sgid
, sgid
);
811 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
812 * is used for "access()" and for the NFS daemon (letting nfsd stay at
813 * whatever uid it wants to). It normally shadows "euid", except when
814 * explicitly set by setfsuid() or for access..
816 asmlinkage
long sys_setfsuid(uid_t uid
)
820 old_fsuid
= current
->fsuid
;
821 if (security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
))
824 if (uid
== current
->uid
|| uid
== current
->euid
||
825 uid
== current
->suid
|| uid
== current
->fsuid
||
826 capable(CAP_SETUID
)) {
827 if (uid
!= old_fsuid
) {
828 set_dumpable(current
->mm
, suid_dumpable
);
831 current
->fsuid
= uid
;
834 key_fsuid_changed(current
);
835 proc_id_connector(current
, PROC_EVENT_UID
);
837 security_task_post_setuid(old_fsuid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
);
843 * Samma på svenska..
845 asmlinkage
long sys_setfsgid(gid_t gid
)
849 old_fsgid
= current
->fsgid
;
850 if (security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_FS
))
853 if (gid
== current
->gid
|| gid
== current
->egid
||
854 gid
== current
->sgid
|| gid
== current
->fsgid
||
855 capable(CAP_SETGID
)) {
856 if (gid
!= old_fsgid
) {
857 set_dumpable(current
->mm
, suid_dumpable
);
860 current
->fsgid
= gid
;
861 key_fsgid_changed(current
);
862 proc_id_connector(current
, PROC_EVENT_GID
);
867 asmlinkage
long sys_times(struct tms __user
* tbuf
)
870 * In the SMP world we might just be unlucky and have one of
871 * the times increment as we use it. Since the value is an
872 * atomically safe type this is just fine. Conceptually its
873 * as if the syscall took an instant longer to occur.
877 struct task_struct
*tsk
= current
;
878 struct task_struct
*t
;
879 cputime_t utime
, stime
, cutime
, cstime
;
881 spin_lock_irq(&tsk
->sighand
->siglock
);
882 utime
= tsk
->signal
->utime
;
883 stime
= tsk
->signal
->stime
;
886 utime
= cputime_add(utime
, t
->utime
);
887 stime
= cputime_add(stime
, t
->stime
);
891 cutime
= tsk
->signal
->cutime
;
892 cstime
= tsk
->signal
->cstime
;
893 spin_unlock_irq(&tsk
->sighand
->siglock
);
895 tmp
.tms_utime
= cputime_to_clock_t(utime
);
896 tmp
.tms_stime
= cputime_to_clock_t(stime
);
897 tmp
.tms_cutime
= cputime_to_clock_t(cutime
);
898 tmp
.tms_cstime
= cputime_to_clock_t(cstime
);
899 if (copy_to_user(tbuf
, &tmp
, sizeof(struct tms
)))
902 return (long) jiffies_64_to_clock_t(get_jiffies_64());
906 * This needs some heavy checking ...
907 * I just haven't the stomach for it. I also don't fully
908 * understand sessions/pgrp etc. Let somebody who does explain it.
910 * OK, I think I have the protection semantics right.... this is really
911 * only important on a multi-user system anyway, to make sure one user
912 * can't send a signal to a process owned by another. -TYT, 12/12/91
914 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
917 asmlinkage
long sys_setpgid(pid_t pid
, pid_t pgid
)
919 struct task_struct
*p
;
920 struct task_struct
*group_leader
= current
->group_leader
;
922 struct pid_namespace
*ns
;
925 pid
= task_pid_vnr(group_leader
);
931 /* From this point forward we keep holding onto the tasklist lock
932 * so that our parent does not change from under us. -DaveM
934 ns
= current
->nsproxy
->pid_ns
;
936 write_lock_irq(&tasklist_lock
);
939 p
= find_task_by_pid_ns(pid
, ns
);
944 if (!thread_group_leader(p
))
947 if (p
->real_parent
->tgid
== group_leader
->tgid
) {
949 if (task_session(p
) != task_session(group_leader
))
956 if (p
!= group_leader
)
961 if (p
->signal
->leader
)
965 struct task_struct
*g
;
967 g
= find_task_by_pid_type_ns(PIDTYPE_PGID
, pgid
, ns
);
968 if (!g
|| task_session(g
) != task_session(group_leader
))
972 err
= security_task_setpgid(p
, pgid
);
976 if (task_pgrp_nr_ns(p
, ns
) != pgid
) {
979 detach_pid(p
, PIDTYPE_PGID
);
980 pid
= find_vpid(pgid
);
981 attach_pid(p
, PIDTYPE_PGID
, pid
);
982 p
->signal
->pgrp
= pid_nr(pid
);
987 /* All paths lead to here, thus we are safe. -DaveM */
988 write_unlock_irq(&tasklist_lock
);
992 asmlinkage
long sys_getpgid(pid_t pid
)
995 return task_pgrp_vnr(current
);
998 struct task_struct
*p
;
999 struct pid_namespace
*ns
;
1001 ns
= current
->nsproxy
->pid_ns
;
1003 read_lock(&tasklist_lock
);
1004 p
= find_task_by_pid_ns(pid
, ns
);
1007 retval
= security_task_getpgid(p
);
1009 retval
= task_pgrp_nr_ns(p
, ns
);
1011 read_unlock(&tasklist_lock
);
1016 #ifdef __ARCH_WANT_SYS_GETPGRP
1018 asmlinkage
long sys_getpgrp(void)
1020 /* SMP - assuming writes are word atomic this is fine */
1021 return task_pgrp_vnr(current
);
1026 asmlinkage
long sys_getsid(pid_t pid
)
1029 return task_session_vnr(current
);
1032 struct task_struct
*p
;
1033 struct pid_namespace
*ns
;
1035 ns
= current
->nsproxy
->pid_ns
;
1037 read_lock(&tasklist_lock
);
1038 p
= find_task_by_pid_ns(pid
, ns
);
1041 retval
= security_task_getsid(p
);
1043 retval
= task_session_nr_ns(p
, ns
);
1045 read_unlock(&tasklist_lock
);
1050 asmlinkage
long sys_setsid(void)
1052 struct task_struct
*group_leader
= current
->group_leader
;
1056 write_lock_irq(&tasklist_lock
);
1058 /* Fail if I am already a session leader */
1059 if (group_leader
->signal
->leader
)
1062 session
= group_leader
->pid
;
1063 /* Fail if a process group id already exists that equals the
1064 * proposed session id.
1066 * Don't check if session id == 1 because kernel threads use this
1067 * session id and so the check will always fail and make it so
1068 * init cannot successfully call setsid.
1070 if (session
> 1 && find_task_by_pid_type(PIDTYPE_PGID
, session
))
1073 group_leader
->signal
->leader
= 1;
1074 __set_special_pids(session
, session
);
1076 spin_lock(&group_leader
->sighand
->siglock
);
1077 group_leader
->signal
->tty
= NULL
;
1078 spin_unlock(&group_leader
->sighand
->siglock
);
1080 err
= task_pgrp_vnr(group_leader
);
1082 write_unlock_irq(&tasklist_lock
);
1087 * Supplementary group IDs
1090 /* init to 2 - one for init_task, one to ensure it is never freed */
1091 struct group_info init_groups
= { .usage
= ATOMIC_INIT(2) };
1093 struct group_info
*groups_alloc(int gidsetsize
)
1095 struct group_info
*group_info
;
1099 nblocks
= (gidsetsize
+ NGROUPS_PER_BLOCK
- 1) / NGROUPS_PER_BLOCK
;
1100 /* Make sure we always allocate at least one indirect block pointer */
1101 nblocks
= nblocks
? : 1;
1102 group_info
= kmalloc(sizeof(*group_info
) + nblocks
*sizeof(gid_t
*), GFP_USER
);
1105 group_info
->ngroups
= gidsetsize
;
1106 group_info
->nblocks
= nblocks
;
1107 atomic_set(&group_info
->usage
, 1);
1109 if (gidsetsize
<= NGROUPS_SMALL
)
1110 group_info
->blocks
[0] = group_info
->small_block
;
1112 for (i
= 0; i
< nblocks
; i
++) {
1114 b
= (void *)__get_free_page(GFP_USER
);
1116 goto out_undo_partial_alloc
;
1117 group_info
->blocks
[i
] = b
;
1122 out_undo_partial_alloc
:
1124 free_page((unsigned long)group_info
->blocks
[i
]);
1130 EXPORT_SYMBOL(groups_alloc
);
1132 void groups_free(struct group_info
*group_info
)
1134 if (group_info
->blocks
[0] != group_info
->small_block
) {
1136 for (i
= 0; i
< group_info
->nblocks
; i
++)
1137 free_page((unsigned long)group_info
->blocks
[i
]);
1142 EXPORT_SYMBOL(groups_free
);
1144 /* export the group_info to a user-space array */
1145 static int groups_to_user(gid_t __user
*grouplist
,
1146 struct group_info
*group_info
)
1149 int count
= group_info
->ngroups
;
1151 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1152 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1153 int off
= i
* NGROUPS_PER_BLOCK
;
1154 int len
= cp_count
* sizeof(*grouplist
);
1156 if (copy_to_user(grouplist
+off
, group_info
->blocks
[i
], len
))
1164 /* fill a group_info from a user-space array - it must be allocated already */
1165 static int groups_from_user(struct group_info
*group_info
,
1166 gid_t __user
*grouplist
)
1169 int count
= group_info
->ngroups
;
1171 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1172 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1173 int off
= i
* NGROUPS_PER_BLOCK
;
1174 int len
= cp_count
* sizeof(*grouplist
);
1176 if (copy_from_user(group_info
->blocks
[i
], grouplist
+off
, len
))
1184 /* a simple Shell sort */
1185 static void groups_sort(struct group_info
*group_info
)
1187 int base
, max
, stride
;
1188 int gidsetsize
= group_info
->ngroups
;
1190 for (stride
= 1; stride
< gidsetsize
; stride
= 3 * stride
+ 1)
1195 max
= gidsetsize
- stride
;
1196 for (base
= 0; base
< max
; base
++) {
1198 int right
= left
+ stride
;
1199 gid_t tmp
= GROUP_AT(group_info
, right
);
1201 while (left
>= 0 && GROUP_AT(group_info
, left
) > tmp
) {
1202 GROUP_AT(group_info
, right
) =
1203 GROUP_AT(group_info
, left
);
1207 GROUP_AT(group_info
, right
) = tmp
;
1213 /* a simple bsearch */
1214 int groups_search(struct group_info
*group_info
, gid_t grp
)
1216 unsigned int left
, right
;
1222 right
= group_info
->ngroups
;
1223 while (left
< right
) {
1224 unsigned int mid
= (left
+right
)/2;
1225 int cmp
= grp
- GROUP_AT(group_info
, mid
);
1236 /* validate and set current->group_info */
1237 int set_current_groups(struct group_info
*group_info
)
1240 struct group_info
*old_info
;
1242 retval
= security_task_setgroups(group_info
);
1246 groups_sort(group_info
);
1247 get_group_info(group_info
);
1250 old_info
= current
->group_info
;
1251 current
->group_info
= group_info
;
1252 task_unlock(current
);
1254 put_group_info(old_info
);
1259 EXPORT_SYMBOL(set_current_groups
);
1261 asmlinkage
long sys_getgroups(int gidsetsize
, gid_t __user
*grouplist
)
1266 * SMP: Nobody else can change our grouplist. Thus we are
1273 /* no need to grab task_lock here; it cannot change */
1274 i
= current
->group_info
->ngroups
;
1276 if (i
> gidsetsize
) {
1280 if (groups_to_user(grouplist
, current
->group_info
)) {
1290 * SMP: Our groups are copy-on-write. We can set them safely
1291 * without another task interfering.
1294 asmlinkage
long sys_setgroups(int gidsetsize
, gid_t __user
*grouplist
)
1296 struct group_info
*group_info
;
1299 if (!capable(CAP_SETGID
))
1301 if ((unsigned)gidsetsize
> NGROUPS_MAX
)
1304 group_info
= groups_alloc(gidsetsize
);
1307 retval
= groups_from_user(group_info
, grouplist
);
1309 put_group_info(group_info
);
1313 retval
= set_current_groups(group_info
);
1314 put_group_info(group_info
);
1320 * Check whether we're fsgid/egid or in the supplemental group..
1322 int in_group_p(gid_t grp
)
1325 if (grp
!= current
->fsgid
)
1326 retval
= groups_search(current
->group_info
, grp
);
1330 EXPORT_SYMBOL(in_group_p
);
1332 int in_egroup_p(gid_t grp
)
1335 if (grp
!= current
->egid
)
1336 retval
= groups_search(current
->group_info
, grp
);
1340 EXPORT_SYMBOL(in_egroup_p
);
1342 DECLARE_RWSEM(uts_sem
);
1344 EXPORT_SYMBOL(uts_sem
);
1346 asmlinkage
long sys_newuname(struct new_utsname __user
* name
)
1350 down_read(&uts_sem
);
1351 if (copy_to_user(name
, utsname(), sizeof *name
))
1357 asmlinkage
long sys_sethostname(char __user
*name
, int len
)
1360 char tmp
[__NEW_UTS_LEN
];
1362 if (!capable(CAP_SYS_ADMIN
))
1364 if (len
< 0 || len
> __NEW_UTS_LEN
)
1366 down_write(&uts_sem
);
1368 if (!copy_from_user(tmp
, name
, len
)) {
1369 memcpy(utsname()->nodename
, tmp
, len
);
1370 utsname()->nodename
[len
] = 0;
1377 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1379 asmlinkage
long sys_gethostname(char __user
*name
, int len
)
1385 down_read(&uts_sem
);
1386 i
= 1 + strlen(utsname()->nodename
);
1390 if (copy_to_user(name
, utsname()->nodename
, i
))
1399 * Only setdomainname; getdomainname can be implemented by calling
1402 asmlinkage
long sys_setdomainname(char __user
*name
, int len
)
1405 char tmp
[__NEW_UTS_LEN
];
1407 if (!capable(CAP_SYS_ADMIN
))
1409 if (len
< 0 || len
> __NEW_UTS_LEN
)
1412 down_write(&uts_sem
);
1414 if (!copy_from_user(tmp
, name
, len
)) {
1415 memcpy(utsname()->domainname
, tmp
, len
);
1416 utsname()->domainname
[len
] = 0;
1423 asmlinkage
long sys_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1425 if (resource
>= RLIM_NLIMITS
)
1428 struct rlimit value
;
1429 task_lock(current
->group_leader
);
1430 value
= current
->signal
->rlim
[resource
];
1431 task_unlock(current
->group_leader
);
1432 return copy_to_user(rlim
, &value
, sizeof(*rlim
)) ? -EFAULT
: 0;
1436 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1439 * Back compatibility for getrlimit. Needed for some apps.
1442 asmlinkage
long sys_old_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1445 if (resource
>= RLIM_NLIMITS
)
1448 task_lock(current
->group_leader
);
1449 x
= current
->signal
->rlim
[resource
];
1450 task_unlock(current
->group_leader
);
1451 if (x
.rlim_cur
> 0x7FFFFFFF)
1452 x
.rlim_cur
= 0x7FFFFFFF;
1453 if (x
.rlim_max
> 0x7FFFFFFF)
1454 x
.rlim_max
= 0x7FFFFFFF;
1455 return copy_to_user(rlim
, &x
, sizeof(x
))?-EFAULT
:0;
1460 asmlinkage
long sys_setrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1462 struct rlimit new_rlim
, *old_rlim
;
1463 unsigned long it_prof_secs
;
1466 if (resource
>= RLIM_NLIMITS
)
1468 if (copy_from_user(&new_rlim
, rlim
, sizeof(*rlim
)))
1470 if (new_rlim
.rlim_cur
> new_rlim
.rlim_max
)
1472 old_rlim
= current
->signal
->rlim
+ resource
;
1473 if ((new_rlim
.rlim_max
> old_rlim
->rlim_max
) &&
1474 !capable(CAP_SYS_RESOURCE
))
1476 if (resource
== RLIMIT_NOFILE
&& new_rlim
.rlim_max
> NR_OPEN
)
1479 retval
= security_task_setrlimit(resource
, &new_rlim
);
1483 if (resource
== RLIMIT_CPU
&& new_rlim
.rlim_cur
== 0) {
1485 * The caller is asking for an immediate RLIMIT_CPU
1486 * expiry. But we use the zero value to mean "it was
1487 * never set". So let's cheat and make it one second
1490 new_rlim
.rlim_cur
= 1;
1493 task_lock(current
->group_leader
);
1494 *old_rlim
= new_rlim
;
1495 task_unlock(current
->group_leader
);
1497 if (resource
!= RLIMIT_CPU
)
1501 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1502 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1503 * very long-standing error, and fixing it now risks breakage of
1504 * applications, so we live with it
1506 if (new_rlim
.rlim_cur
== RLIM_INFINITY
)
1509 it_prof_secs
= cputime_to_secs(current
->signal
->it_prof_expires
);
1510 if (it_prof_secs
== 0 || new_rlim
.rlim_cur
<= it_prof_secs
) {
1511 unsigned long rlim_cur
= new_rlim
.rlim_cur
;
1514 cputime
= secs_to_cputime(rlim_cur
);
1515 read_lock(&tasklist_lock
);
1516 spin_lock_irq(¤t
->sighand
->siglock
);
1517 set_process_cpu_timer(current
, CPUCLOCK_PROF
, &cputime
, NULL
);
1518 spin_unlock_irq(¤t
->sighand
->siglock
);
1519 read_unlock(&tasklist_lock
);
1526 * It would make sense to put struct rusage in the task_struct,
1527 * except that would make the task_struct be *really big*. After
1528 * task_struct gets moved into malloc'ed memory, it would
1529 * make sense to do this. It will make moving the rest of the information
1530 * a lot simpler! (Which we're not doing right now because we're not
1531 * measuring them yet).
1533 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1534 * races with threads incrementing their own counters. But since word
1535 * reads are atomic, we either get new values or old values and we don't
1536 * care which for the sums. We always take the siglock to protect reading
1537 * the c* fields from p->signal from races with exit.c updating those
1538 * fields when reaping, so a sample either gets all the additions of a
1539 * given child after it's reaped, or none so this sample is before reaping.
1542 * We need to take the siglock for CHILDEREN, SELF and BOTH
1543 * for the cases current multithreaded, non-current single threaded
1544 * non-current multithreaded. Thread traversal is now safe with
1546 * Strictly speaking, we donot need to take the siglock if we are current and
1547 * single threaded, as no one else can take our signal_struct away, no one
1548 * else can reap the children to update signal->c* counters, and no one else
1549 * can race with the signal-> fields. If we do not take any lock, the
1550 * signal-> fields could be read out of order while another thread was just
1551 * exiting. So we should place a read memory barrier when we avoid the lock.
1552 * On the writer side, write memory barrier is implied in __exit_signal
1553 * as __exit_signal releases the siglock spinlock after updating the signal->
1554 * fields. But we don't do this yet to keep things simple.
1558 static void k_getrusage(struct task_struct
*p
, int who
, struct rusage
*r
)
1560 struct task_struct
*t
;
1561 unsigned long flags
;
1562 cputime_t utime
, stime
;
1564 memset((char *) r
, 0, sizeof *r
);
1565 utime
= stime
= cputime_zero
;
1568 if (!lock_task_sighand(p
, &flags
)) {
1575 case RUSAGE_CHILDREN
:
1576 utime
= p
->signal
->cutime
;
1577 stime
= p
->signal
->cstime
;
1578 r
->ru_nvcsw
= p
->signal
->cnvcsw
;
1579 r
->ru_nivcsw
= p
->signal
->cnivcsw
;
1580 r
->ru_minflt
= p
->signal
->cmin_flt
;
1581 r
->ru_majflt
= p
->signal
->cmaj_flt
;
1582 r
->ru_inblock
= p
->signal
->cinblock
;
1583 r
->ru_oublock
= p
->signal
->coublock
;
1585 if (who
== RUSAGE_CHILDREN
)
1589 utime
= cputime_add(utime
, p
->signal
->utime
);
1590 stime
= cputime_add(stime
, p
->signal
->stime
);
1591 r
->ru_nvcsw
+= p
->signal
->nvcsw
;
1592 r
->ru_nivcsw
+= p
->signal
->nivcsw
;
1593 r
->ru_minflt
+= p
->signal
->min_flt
;
1594 r
->ru_majflt
+= p
->signal
->maj_flt
;
1595 r
->ru_inblock
+= p
->signal
->inblock
;
1596 r
->ru_oublock
+= p
->signal
->oublock
;
1599 utime
= cputime_add(utime
, t
->utime
);
1600 stime
= cputime_add(stime
, t
->stime
);
1601 r
->ru_nvcsw
+= t
->nvcsw
;
1602 r
->ru_nivcsw
+= t
->nivcsw
;
1603 r
->ru_minflt
+= t
->min_flt
;
1604 r
->ru_majflt
+= t
->maj_flt
;
1605 r
->ru_inblock
+= task_io_get_inblock(t
);
1606 r
->ru_oublock
+= task_io_get_oublock(t
);
1615 unlock_task_sighand(p
, &flags
);
1618 cputime_to_timeval(utime
, &r
->ru_utime
);
1619 cputime_to_timeval(stime
, &r
->ru_stime
);
1622 int getrusage(struct task_struct
*p
, int who
, struct rusage __user
*ru
)
1625 k_getrusage(p
, who
, &r
);
1626 return copy_to_user(ru
, &r
, sizeof(r
)) ? -EFAULT
: 0;
1629 asmlinkage
long sys_getrusage(int who
, struct rusage __user
*ru
)
1631 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
)
1633 return getrusage(current
, who
, ru
);
1636 asmlinkage
long sys_umask(int mask
)
1638 mask
= xchg(¤t
->fs
->umask
, mask
& S_IRWXUGO
);
1642 asmlinkage
long sys_prctl(int option
, unsigned long arg2
, unsigned long arg3
,
1643 unsigned long arg4
, unsigned long arg5
)
1647 error
= security_task_prctl(option
, arg2
, arg3
, arg4
, arg5
);
1652 case PR_SET_PDEATHSIG
:
1653 if (!valid_signal(arg2
)) {
1657 current
->pdeath_signal
= arg2
;
1659 case PR_GET_PDEATHSIG
:
1660 error
= put_user(current
->pdeath_signal
, (int __user
*)arg2
);
1662 case PR_GET_DUMPABLE
:
1663 error
= get_dumpable(current
->mm
);
1665 case PR_SET_DUMPABLE
:
1666 if (arg2
< 0 || arg2
> 1) {
1670 set_dumpable(current
->mm
, arg2
);
1673 case PR_SET_UNALIGN
:
1674 error
= SET_UNALIGN_CTL(current
, arg2
);
1676 case PR_GET_UNALIGN
:
1677 error
= GET_UNALIGN_CTL(current
, arg2
);
1680 error
= SET_FPEMU_CTL(current
, arg2
);
1683 error
= GET_FPEMU_CTL(current
, arg2
);
1686 error
= SET_FPEXC_CTL(current
, arg2
);
1689 error
= GET_FPEXC_CTL(current
, arg2
);
1692 error
= PR_TIMING_STATISTICAL
;
1695 if (arg2
== PR_TIMING_STATISTICAL
)
1701 case PR_GET_KEEPCAPS
:
1702 if (current
->keep_capabilities
)
1705 case PR_SET_KEEPCAPS
:
1706 if (arg2
!= 0 && arg2
!= 1) {
1710 current
->keep_capabilities
= arg2
;
1713 struct task_struct
*me
= current
;
1714 unsigned char ncomm
[sizeof(me
->comm
)];
1716 ncomm
[sizeof(me
->comm
)-1] = 0;
1717 if (strncpy_from_user(ncomm
, (char __user
*)arg2
,
1718 sizeof(me
->comm
)-1) < 0)
1720 set_task_comm(me
, ncomm
);
1724 struct task_struct
*me
= current
;
1725 unsigned char tcomm
[sizeof(me
->comm
)];
1727 get_task_comm(tcomm
, me
);
1728 if (copy_to_user((char __user
*)arg2
, tcomm
, sizeof(tcomm
)))
1733 error
= GET_ENDIAN(current
, arg2
);
1736 error
= SET_ENDIAN(current
, arg2
);
1739 case PR_GET_SECCOMP
:
1740 error
= prctl_get_seccomp();
1742 case PR_SET_SECCOMP
:
1743 error
= prctl_set_seccomp(arg2
);
1753 asmlinkage
long sys_getcpu(unsigned __user
*cpup
, unsigned __user
*nodep
,
1754 struct getcpu_cache __user
*cache
)
1757 int cpu
= raw_smp_processor_id();
1759 err
|= put_user(cpu
, cpup
);
1761 err
|= put_user(cpu_to_node(cpu
), nodep
);
1764 * The cache is not needed for this implementation,
1765 * but make sure user programs pass something
1766 * valid. vsyscall implementations can instead make
1767 * good use of the cache. Only use t0 and t1 because
1768 * these are available in both 32bit and 64bit ABI (no
1769 * need for a compat_getcpu). 32bit has enough
1772 unsigned long t0
, t1
;
1773 get_user(t0
, &cache
->blob
[0]);
1774 get_user(t1
, &cache
->blob
[1]);
1777 put_user(t0
, &cache
->blob
[0]);
1778 put_user(t1
, &cache
->blob
[1]);
1780 return err
? -EFAULT
: 0;
1783 char poweroff_cmd
[POWEROFF_CMD_PATH_LEN
] = "/sbin/poweroff";
1785 static void argv_cleanup(char **argv
, char **envp
)
1791 * orderly_poweroff - Trigger an orderly system poweroff
1792 * @force: force poweroff if command execution fails
1794 * This may be called from any context to trigger a system shutdown.
1795 * If the orderly shutdown fails, it will force an immediate shutdown.
1797 int orderly_poweroff(bool force
)
1800 char **argv
= argv_split(GFP_ATOMIC
, poweroff_cmd
, &argc
);
1801 static char *envp
[] = {
1803 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
1807 struct subprocess_info
*info
;
1810 printk(KERN_WARNING
"%s failed to allocate memory for \"%s\"\n",
1811 __func__
, poweroff_cmd
);
1815 info
= call_usermodehelper_setup(argv
[0], argv
, envp
);
1821 call_usermodehelper_setcleanup(info
, argv_cleanup
);
1823 ret
= call_usermodehelper_exec(info
, UMH_NO_WAIT
);
1827 printk(KERN_WARNING
"Failed to start orderly shutdown: "
1828 "forcing the issue\n");
1830 /* I guess this should try to kick off some daemon to
1831 sync and poweroff asap. Or not even bother syncing
1832 if we're doing an emergency shutdown? */
1839 EXPORT_SYMBOL_GPL(orderly_poweroff
);