]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/sys.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6
[mirror_ubuntu-artful-kernel.git] / kernel / sys.c
1 /*
2 * linux/kernel/sys.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/config.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/smp_lock.h>
13 #include <linux/notifier.h>
14 #include <linux/reboot.h>
15 #include <linux/prctl.h>
16 #include <linux/highuid.h>
17 #include <linux/fs.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32
33 #include <linux/compat.h>
34 #include <linux/syscalls.h>
35 #include <linux/kprobes.h>
36
37 #include <asm/uaccess.h>
38 #include <asm/io.h>
39 #include <asm/unistd.h>
40
41 #ifndef SET_UNALIGN_CTL
42 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
43 #endif
44 #ifndef GET_UNALIGN_CTL
45 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
46 #endif
47 #ifndef SET_FPEMU_CTL
48 # define SET_FPEMU_CTL(a,b) (-EINVAL)
49 #endif
50 #ifndef GET_FPEMU_CTL
51 # define GET_FPEMU_CTL(a,b) (-EINVAL)
52 #endif
53 #ifndef SET_FPEXC_CTL
54 # define SET_FPEXC_CTL(a,b) (-EINVAL)
55 #endif
56 #ifndef GET_FPEXC_CTL
57 # define GET_FPEXC_CTL(a,b) (-EINVAL)
58 #endif
59 #ifndef GET_ENDIAN
60 # define GET_ENDIAN(a,b) (-EINVAL)
61 #endif
62 #ifndef SET_ENDIAN
63 # define SET_ENDIAN(a,b) (-EINVAL)
64 #endif
65
66 /*
67 * this is where the system-wide overflow UID and GID are defined, for
68 * architectures that now have 32-bit UID/GID but didn't in the past
69 */
70
71 int overflowuid = DEFAULT_OVERFLOWUID;
72 int overflowgid = DEFAULT_OVERFLOWGID;
73
74 #ifdef CONFIG_UID16
75 EXPORT_SYMBOL(overflowuid);
76 EXPORT_SYMBOL(overflowgid);
77 #endif
78
79 /*
80 * the same as above, but for filesystems which can only store a 16-bit
81 * UID and GID. as such, this is needed on all architectures
82 */
83
84 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
85 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
86
87 EXPORT_SYMBOL(fs_overflowuid);
88 EXPORT_SYMBOL(fs_overflowgid);
89
90 /*
91 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
92 */
93
94 int C_A_D = 1;
95 int cad_pid = 1;
96
97 /*
98 * Notifier list for kernel code which wants to be called
99 * at shutdown. This is used to stop any idling DMA operations
100 * and the like.
101 */
102
103 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
104
105 /*
106 * Notifier chain core routines. The exported routines below
107 * are layered on top of these, with appropriate locking added.
108 */
109
110 static int notifier_chain_register(struct notifier_block **nl,
111 struct notifier_block *n)
112 {
113 while ((*nl) != NULL) {
114 if (n->priority > (*nl)->priority)
115 break;
116 nl = &((*nl)->next);
117 }
118 n->next = *nl;
119 rcu_assign_pointer(*nl, n);
120 return 0;
121 }
122
123 static int notifier_chain_unregister(struct notifier_block **nl,
124 struct notifier_block *n)
125 {
126 while ((*nl) != NULL) {
127 if ((*nl) == n) {
128 rcu_assign_pointer(*nl, n->next);
129 return 0;
130 }
131 nl = &((*nl)->next);
132 }
133 return -ENOENT;
134 }
135
136 static int __kprobes notifier_call_chain(struct notifier_block **nl,
137 unsigned long val, void *v)
138 {
139 int ret = NOTIFY_DONE;
140 struct notifier_block *nb, *next_nb;
141
142 nb = rcu_dereference(*nl);
143 while (nb) {
144 next_nb = rcu_dereference(nb->next);
145 ret = nb->notifier_call(nb, val, v);
146 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
147 break;
148 nb = next_nb;
149 }
150 return ret;
151 }
152
153 /*
154 * Atomic notifier chain routines. Registration and unregistration
155 * use a mutex, and call_chain is synchronized by RCU (no locks).
156 */
157
158 /**
159 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
160 * @nh: Pointer to head of the atomic notifier chain
161 * @n: New entry in notifier chain
162 *
163 * Adds a notifier to an atomic notifier chain.
164 *
165 * Currently always returns zero.
166 */
167
168 int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
169 struct notifier_block *n)
170 {
171 unsigned long flags;
172 int ret;
173
174 spin_lock_irqsave(&nh->lock, flags);
175 ret = notifier_chain_register(&nh->head, n);
176 spin_unlock_irqrestore(&nh->lock, flags);
177 return ret;
178 }
179
180 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
181
182 /**
183 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
184 * @nh: Pointer to head of the atomic notifier chain
185 * @n: Entry to remove from notifier chain
186 *
187 * Removes a notifier from an atomic notifier chain.
188 *
189 * Returns zero on success or %-ENOENT on failure.
190 */
191 int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
192 struct notifier_block *n)
193 {
194 unsigned long flags;
195 int ret;
196
197 spin_lock_irqsave(&nh->lock, flags);
198 ret = notifier_chain_unregister(&nh->head, n);
199 spin_unlock_irqrestore(&nh->lock, flags);
200 synchronize_rcu();
201 return ret;
202 }
203
204 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
205
206 /**
207 * atomic_notifier_call_chain - Call functions in an atomic notifier chain
208 * @nh: Pointer to head of the atomic notifier chain
209 * @val: Value passed unmodified to notifier function
210 * @v: Pointer passed unmodified to notifier function
211 *
212 * Calls each function in a notifier chain in turn. The functions
213 * run in an atomic context, so they must not block.
214 * This routine uses RCU to synchronize with changes to the chain.
215 *
216 * If the return value of the notifier can be and'ed
217 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
218 * will return immediately, with the return value of
219 * the notifier function which halted execution.
220 * Otherwise the return value is the return value
221 * of the last notifier function called.
222 */
223
224 int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
225 unsigned long val, void *v)
226 {
227 int ret;
228
229 rcu_read_lock();
230 ret = notifier_call_chain(&nh->head, val, v);
231 rcu_read_unlock();
232 return ret;
233 }
234
235 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
236
237 /*
238 * Blocking notifier chain routines. All access to the chain is
239 * synchronized by an rwsem.
240 */
241
242 /**
243 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
244 * @nh: Pointer to head of the blocking notifier chain
245 * @n: New entry in notifier chain
246 *
247 * Adds a notifier to a blocking notifier chain.
248 * Must be called in process context.
249 *
250 * Currently always returns zero.
251 */
252
253 int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
254 struct notifier_block *n)
255 {
256 int ret;
257
258 /*
259 * This code gets used during boot-up, when task switching is
260 * not yet working and interrupts must remain disabled. At
261 * such times we must not call down_write().
262 */
263 if (unlikely(system_state == SYSTEM_BOOTING))
264 return notifier_chain_register(&nh->head, n);
265
266 down_write(&nh->rwsem);
267 ret = notifier_chain_register(&nh->head, n);
268 up_write(&nh->rwsem);
269 return ret;
270 }
271
272 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
273
274 /**
275 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
276 * @nh: Pointer to head of the blocking notifier chain
277 * @n: Entry to remove from notifier chain
278 *
279 * Removes a notifier from a blocking notifier chain.
280 * Must be called from process context.
281 *
282 * Returns zero on success or %-ENOENT on failure.
283 */
284 int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
285 struct notifier_block *n)
286 {
287 int ret;
288
289 /*
290 * This code gets used during boot-up, when task switching is
291 * not yet working and interrupts must remain disabled. At
292 * such times we must not call down_write().
293 */
294 if (unlikely(system_state == SYSTEM_BOOTING))
295 return notifier_chain_unregister(&nh->head, n);
296
297 down_write(&nh->rwsem);
298 ret = notifier_chain_unregister(&nh->head, n);
299 up_write(&nh->rwsem);
300 return ret;
301 }
302
303 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
304
305 /**
306 * blocking_notifier_call_chain - Call functions in a blocking notifier chain
307 * @nh: Pointer to head of the blocking notifier chain
308 * @val: Value passed unmodified to notifier function
309 * @v: Pointer passed unmodified to notifier function
310 *
311 * Calls each function in a notifier chain in turn. The functions
312 * run in a process context, so they are allowed to block.
313 *
314 * If the return value of the notifier can be and'ed
315 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
316 * will return immediately, with the return value of
317 * the notifier function which halted execution.
318 * Otherwise the return value is the return value
319 * of the last notifier function called.
320 */
321
322 int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
323 unsigned long val, void *v)
324 {
325 int ret;
326
327 down_read(&nh->rwsem);
328 ret = notifier_call_chain(&nh->head, val, v);
329 up_read(&nh->rwsem);
330 return ret;
331 }
332
333 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
334
335 /*
336 * Raw notifier chain routines. There is no protection;
337 * the caller must provide it. Use at your own risk!
338 */
339
340 /**
341 * raw_notifier_chain_register - Add notifier to a raw notifier chain
342 * @nh: Pointer to head of the raw notifier chain
343 * @n: New entry in notifier chain
344 *
345 * Adds a notifier to a raw notifier chain.
346 * All locking must be provided by the caller.
347 *
348 * Currently always returns zero.
349 */
350
351 int raw_notifier_chain_register(struct raw_notifier_head *nh,
352 struct notifier_block *n)
353 {
354 return notifier_chain_register(&nh->head, n);
355 }
356
357 EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
358
359 /**
360 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
361 * @nh: Pointer to head of the raw notifier chain
362 * @n: Entry to remove from notifier chain
363 *
364 * Removes a notifier from a raw notifier chain.
365 * All locking must be provided by the caller.
366 *
367 * Returns zero on success or %-ENOENT on failure.
368 */
369 int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
370 struct notifier_block *n)
371 {
372 return notifier_chain_unregister(&nh->head, n);
373 }
374
375 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
376
377 /**
378 * raw_notifier_call_chain - Call functions in a raw notifier chain
379 * @nh: Pointer to head of the raw notifier chain
380 * @val: Value passed unmodified to notifier function
381 * @v: Pointer passed unmodified to notifier function
382 *
383 * Calls each function in a notifier chain in turn. The functions
384 * run in an undefined context.
385 * All locking must be provided by the caller.
386 *
387 * If the return value of the notifier can be and'ed
388 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain
389 * will return immediately, with the return value of
390 * the notifier function which halted execution.
391 * Otherwise the return value is the return value
392 * of the last notifier function called.
393 */
394
395 int raw_notifier_call_chain(struct raw_notifier_head *nh,
396 unsigned long val, void *v)
397 {
398 return notifier_call_chain(&nh->head, val, v);
399 }
400
401 EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
402
403 /**
404 * register_reboot_notifier - Register function to be called at reboot time
405 * @nb: Info about notifier function to be called
406 *
407 * Registers a function with the list of functions
408 * to be called at reboot time.
409 *
410 * Currently always returns zero, as blocking_notifier_chain_register
411 * always returns zero.
412 */
413
414 int register_reboot_notifier(struct notifier_block * nb)
415 {
416 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
417 }
418
419 EXPORT_SYMBOL(register_reboot_notifier);
420
421 /**
422 * unregister_reboot_notifier - Unregister previously registered reboot notifier
423 * @nb: Hook to be unregistered
424 *
425 * Unregisters a previously registered reboot
426 * notifier function.
427 *
428 * Returns zero on success, or %-ENOENT on failure.
429 */
430
431 int unregister_reboot_notifier(struct notifier_block * nb)
432 {
433 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
434 }
435
436 EXPORT_SYMBOL(unregister_reboot_notifier);
437
438 static int set_one_prio(struct task_struct *p, int niceval, int error)
439 {
440 int no_nice;
441
442 if (p->uid != current->euid &&
443 p->euid != current->euid && !capable(CAP_SYS_NICE)) {
444 error = -EPERM;
445 goto out;
446 }
447 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
448 error = -EACCES;
449 goto out;
450 }
451 no_nice = security_task_setnice(p, niceval);
452 if (no_nice) {
453 error = no_nice;
454 goto out;
455 }
456 if (error == -ESRCH)
457 error = 0;
458 set_user_nice(p, niceval);
459 out:
460 return error;
461 }
462
463 asmlinkage long sys_setpriority(int which, int who, int niceval)
464 {
465 struct task_struct *g, *p;
466 struct user_struct *user;
467 int error = -EINVAL;
468
469 if (which > 2 || which < 0)
470 goto out;
471
472 /* normalize: avoid signed division (rounding problems) */
473 error = -ESRCH;
474 if (niceval < -20)
475 niceval = -20;
476 if (niceval > 19)
477 niceval = 19;
478
479 read_lock(&tasklist_lock);
480 switch (which) {
481 case PRIO_PROCESS:
482 if (!who)
483 who = current->pid;
484 p = find_task_by_pid(who);
485 if (p)
486 error = set_one_prio(p, niceval, error);
487 break;
488 case PRIO_PGRP:
489 if (!who)
490 who = process_group(current);
491 do_each_task_pid(who, PIDTYPE_PGID, p) {
492 error = set_one_prio(p, niceval, error);
493 } while_each_task_pid(who, PIDTYPE_PGID, p);
494 break;
495 case PRIO_USER:
496 user = current->user;
497 if (!who)
498 who = current->uid;
499 else
500 if ((who != current->uid) && !(user = find_user(who)))
501 goto out_unlock; /* No processes for this user */
502
503 do_each_thread(g, p)
504 if (p->uid == who)
505 error = set_one_prio(p, niceval, error);
506 while_each_thread(g, p);
507 if (who != current->uid)
508 free_uid(user); /* For find_user() */
509 break;
510 }
511 out_unlock:
512 read_unlock(&tasklist_lock);
513 out:
514 return error;
515 }
516
517 /*
518 * Ugh. To avoid negative return values, "getpriority()" will
519 * not return the normal nice-value, but a negated value that
520 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
521 * to stay compatible.
522 */
523 asmlinkage long sys_getpriority(int which, int who)
524 {
525 struct task_struct *g, *p;
526 struct user_struct *user;
527 long niceval, retval = -ESRCH;
528
529 if (which > 2 || which < 0)
530 return -EINVAL;
531
532 read_lock(&tasklist_lock);
533 switch (which) {
534 case PRIO_PROCESS:
535 if (!who)
536 who = current->pid;
537 p = find_task_by_pid(who);
538 if (p) {
539 niceval = 20 - task_nice(p);
540 if (niceval > retval)
541 retval = niceval;
542 }
543 break;
544 case PRIO_PGRP:
545 if (!who)
546 who = process_group(current);
547 do_each_task_pid(who, PIDTYPE_PGID, p) {
548 niceval = 20 - task_nice(p);
549 if (niceval > retval)
550 retval = niceval;
551 } while_each_task_pid(who, PIDTYPE_PGID, p);
552 break;
553 case PRIO_USER:
554 user = current->user;
555 if (!who)
556 who = current->uid;
557 else
558 if ((who != current->uid) && !(user = find_user(who)))
559 goto out_unlock; /* No processes for this user */
560
561 do_each_thread(g, p)
562 if (p->uid == who) {
563 niceval = 20 - task_nice(p);
564 if (niceval > retval)
565 retval = niceval;
566 }
567 while_each_thread(g, p);
568 if (who != current->uid)
569 free_uid(user); /* for find_user() */
570 break;
571 }
572 out_unlock:
573 read_unlock(&tasklist_lock);
574
575 return retval;
576 }
577
578 /**
579 * emergency_restart - reboot the system
580 *
581 * Without shutting down any hardware or taking any locks
582 * reboot the system. This is called when we know we are in
583 * trouble so this is our best effort to reboot. This is
584 * safe to call in interrupt context.
585 */
586 void emergency_restart(void)
587 {
588 machine_emergency_restart();
589 }
590 EXPORT_SYMBOL_GPL(emergency_restart);
591
592 static void kernel_restart_prepare(char *cmd)
593 {
594 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
595 system_state = SYSTEM_RESTART;
596 device_shutdown();
597 }
598
599 /**
600 * kernel_restart - reboot the system
601 * @cmd: pointer to buffer containing command to execute for restart
602 * or %NULL
603 *
604 * Shutdown everything and perform a clean reboot.
605 * This is not safe to call in interrupt context.
606 */
607 void kernel_restart(char *cmd)
608 {
609 kernel_restart_prepare(cmd);
610 if (!cmd) {
611 printk(KERN_EMERG "Restarting system.\n");
612 } else {
613 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
614 }
615 printk(".\n");
616 machine_restart(cmd);
617 }
618 EXPORT_SYMBOL_GPL(kernel_restart);
619
620 /**
621 * kernel_kexec - reboot the system
622 *
623 * Move into place and start executing a preloaded standalone
624 * executable. If nothing was preloaded return an error.
625 */
626 static void kernel_kexec(void)
627 {
628 #ifdef CONFIG_KEXEC
629 struct kimage *image;
630 image = xchg(&kexec_image, NULL);
631 if (!image) {
632 return;
633 }
634 kernel_restart_prepare(NULL);
635 printk(KERN_EMERG "Starting new kernel\n");
636 machine_shutdown();
637 machine_kexec(image);
638 #endif
639 }
640
641 void kernel_shutdown_prepare(enum system_states state)
642 {
643 blocking_notifier_call_chain(&reboot_notifier_list,
644 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
645 system_state = state;
646 device_shutdown();
647 }
648 /**
649 * kernel_halt - halt the system
650 *
651 * Shutdown everything and perform a clean system halt.
652 */
653 void kernel_halt(void)
654 {
655 kernel_shutdown_prepare(SYSTEM_HALT);
656 printk(KERN_EMERG "System halted.\n");
657 machine_halt();
658 }
659
660 EXPORT_SYMBOL_GPL(kernel_halt);
661
662 /**
663 * kernel_power_off - power_off the system
664 *
665 * Shutdown everything and perform a clean system power_off.
666 */
667 void kernel_power_off(void)
668 {
669 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
670 printk(KERN_EMERG "Power down.\n");
671 machine_power_off();
672 }
673 EXPORT_SYMBOL_GPL(kernel_power_off);
674 /*
675 * Reboot system call: for obvious reasons only root may call it,
676 * and even root needs to set up some magic numbers in the registers
677 * so that some mistake won't make this reboot the whole machine.
678 * You can also set the meaning of the ctrl-alt-del-key here.
679 *
680 * reboot doesn't sync: do that yourself before calling this.
681 */
682 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
683 {
684 char buffer[256];
685
686 /* We only trust the superuser with rebooting the system. */
687 if (!capable(CAP_SYS_BOOT))
688 return -EPERM;
689
690 /* For safety, we require "magic" arguments. */
691 if (magic1 != LINUX_REBOOT_MAGIC1 ||
692 (magic2 != LINUX_REBOOT_MAGIC2 &&
693 magic2 != LINUX_REBOOT_MAGIC2A &&
694 magic2 != LINUX_REBOOT_MAGIC2B &&
695 magic2 != LINUX_REBOOT_MAGIC2C))
696 return -EINVAL;
697
698 /* Instead of trying to make the power_off code look like
699 * halt when pm_power_off is not set do it the easy way.
700 */
701 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
702 cmd = LINUX_REBOOT_CMD_HALT;
703
704 lock_kernel();
705 switch (cmd) {
706 case LINUX_REBOOT_CMD_RESTART:
707 kernel_restart(NULL);
708 break;
709
710 case LINUX_REBOOT_CMD_CAD_ON:
711 C_A_D = 1;
712 break;
713
714 case LINUX_REBOOT_CMD_CAD_OFF:
715 C_A_D = 0;
716 break;
717
718 case LINUX_REBOOT_CMD_HALT:
719 kernel_halt();
720 unlock_kernel();
721 do_exit(0);
722 break;
723
724 case LINUX_REBOOT_CMD_POWER_OFF:
725 kernel_power_off();
726 unlock_kernel();
727 do_exit(0);
728 break;
729
730 case LINUX_REBOOT_CMD_RESTART2:
731 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
732 unlock_kernel();
733 return -EFAULT;
734 }
735 buffer[sizeof(buffer) - 1] = '\0';
736
737 kernel_restart(buffer);
738 break;
739
740 case LINUX_REBOOT_CMD_KEXEC:
741 kernel_kexec();
742 unlock_kernel();
743 return -EINVAL;
744
745 #ifdef CONFIG_SOFTWARE_SUSPEND
746 case LINUX_REBOOT_CMD_SW_SUSPEND:
747 {
748 int ret = software_suspend();
749 unlock_kernel();
750 return ret;
751 }
752 #endif
753
754 default:
755 unlock_kernel();
756 return -EINVAL;
757 }
758 unlock_kernel();
759 return 0;
760 }
761
762 static void deferred_cad(void *dummy)
763 {
764 kernel_restart(NULL);
765 }
766
767 /*
768 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
769 * As it's called within an interrupt, it may NOT sync: the only choice
770 * is whether to reboot at once, or just ignore the ctrl-alt-del.
771 */
772 void ctrl_alt_del(void)
773 {
774 static DECLARE_WORK(cad_work, deferred_cad, NULL);
775
776 if (C_A_D)
777 schedule_work(&cad_work);
778 else
779 kill_proc(cad_pid, SIGINT, 1);
780 }
781
782
783 /*
784 * Unprivileged users may change the real gid to the effective gid
785 * or vice versa. (BSD-style)
786 *
787 * If you set the real gid at all, or set the effective gid to a value not
788 * equal to the real gid, then the saved gid is set to the new effective gid.
789 *
790 * This makes it possible for a setgid program to completely drop its
791 * privileges, which is often a useful assertion to make when you are doing
792 * a security audit over a program.
793 *
794 * The general idea is that a program which uses just setregid() will be
795 * 100% compatible with BSD. A program which uses just setgid() will be
796 * 100% compatible with POSIX with saved IDs.
797 *
798 * SMP: There are not races, the GIDs are checked only by filesystem
799 * operations (as far as semantic preservation is concerned).
800 */
801 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
802 {
803 int old_rgid = current->gid;
804 int old_egid = current->egid;
805 int new_rgid = old_rgid;
806 int new_egid = old_egid;
807 int retval;
808
809 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
810 if (retval)
811 return retval;
812
813 if (rgid != (gid_t) -1) {
814 if ((old_rgid == rgid) ||
815 (current->egid==rgid) ||
816 capable(CAP_SETGID))
817 new_rgid = rgid;
818 else
819 return -EPERM;
820 }
821 if (egid != (gid_t) -1) {
822 if ((old_rgid == egid) ||
823 (current->egid == egid) ||
824 (current->sgid == egid) ||
825 capable(CAP_SETGID))
826 new_egid = egid;
827 else {
828 return -EPERM;
829 }
830 }
831 if (new_egid != old_egid)
832 {
833 current->mm->dumpable = suid_dumpable;
834 smp_wmb();
835 }
836 if (rgid != (gid_t) -1 ||
837 (egid != (gid_t) -1 && egid != old_rgid))
838 current->sgid = new_egid;
839 current->fsgid = new_egid;
840 current->egid = new_egid;
841 current->gid = new_rgid;
842 key_fsgid_changed(current);
843 proc_id_connector(current, PROC_EVENT_GID);
844 return 0;
845 }
846
847 /*
848 * setgid() is implemented like SysV w/ SAVED_IDS
849 *
850 * SMP: Same implicit races as above.
851 */
852 asmlinkage long sys_setgid(gid_t gid)
853 {
854 int old_egid = current->egid;
855 int retval;
856
857 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
858 if (retval)
859 return retval;
860
861 if (capable(CAP_SETGID))
862 {
863 if(old_egid != gid)
864 {
865 current->mm->dumpable = suid_dumpable;
866 smp_wmb();
867 }
868 current->gid = current->egid = current->sgid = current->fsgid = gid;
869 }
870 else if ((gid == current->gid) || (gid == current->sgid))
871 {
872 if(old_egid != gid)
873 {
874 current->mm->dumpable = suid_dumpable;
875 smp_wmb();
876 }
877 current->egid = current->fsgid = gid;
878 }
879 else
880 return -EPERM;
881
882 key_fsgid_changed(current);
883 proc_id_connector(current, PROC_EVENT_GID);
884 return 0;
885 }
886
887 static int set_user(uid_t new_ruid, int dumpclear)
888 {
889 struct user_struct *new_user;
890
891 new_user = alloc_uid(new_ruid);
892 if (!new_user)
893 return -EAGAIN;
894
895 if (atomic_read(&new_user->processes) >=
896 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
897 new_user != &root_user) {
898 free_uid(new_user);
899 return -EAGAIN;
900 }
901
902 switch_uid(new_user);
903
904 if(dumpclear)
905 {
906 current->mm->dumpable = suid_dumpable;
907 smp_wmb();
908 }
909 current->uid = new_ruid;
910 return 0;
911 }
912
913 /*
914 * Unprivileged users may change the real uid to the effective uid
915 * or vice versa. (BSD-style)
916 *
917 * If you set the real uid at all, or set the effective uid to a value not
918 * equal to the real uid, then the saved uid is set to the new effective uid.
919 *
920 * This makes it possible for a setuid program to completely drop its
921 * privileges, which is often a useful assertion to make when you are doing
922 * a security audit over a program.
923 *
924 * The general idea is that a program which uses just setreuid() will be
925 * 100% compatible with BSD. A program which uses just setuid() will be
926 * 100% compatible with POSIX with saved IDs.
927 */
928 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
929 {
930 int old_ruid, old_euid, old_suid, new_ruid, new_euid;
931 int retval;
932
933 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
934 if (retval)
935 return retval;
936
937 new_ruid = old_ruid = current->uid;
938 new_euid = old_euid = current->euid;
939 old_suid = current->suid;
940
941 if (ruid != (uid_t) -1) {
942 new_ruid = ruid;
943 if ((old_ruid != ruid) &&
944 (current->euid != ruid) &&
945 !capable(CAP_SETUID))
946 return -EPERM;
947 }
948
949 if (euid != (uid_t) -1) {
950 new_euid = euid;
951 if ((old_ruid != euid) &&
952 (current->euid != euid) &&
953 (current->suid != euid) &&
954 !capable(CAP_SETUID))
955 return -EPERM;
956 }
957
958 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
959 return -EAGAIN;
960
961 if (new_euid != old_euid)
962 {
963 current->mm->dumpable = suid_dumpable;
964 smp_wmb();
965 }
966 current->fsuid = current->euid = new_euid;
967 if (ruid != (uid_t) -1 ||
968 (euid != (uid_t) -1 && euid != old_ruid))
969 current->suid = current->euid;
970 current->fsuid = current->euid;
971
972 key_fsuid_changed(current);
973 proc_id_connector(current, PROC_EVENT_UID);
974
975 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
976 }
977
978
979
980 /*
981 * setuid() is implemented like SysV with SAVED_IDS
982 *
983 * Note that SAVED_ID's is deficient in that a setuid root program
984 * like sendmail, for example, cannot set its uid to be a normal
985 * user and then switch back, because if you're root, setuid() sets
986 * the saved uid too. If you don't like this, blame the bright people
987 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
988 * will allow a root program to temporarily drop privileges and be able to
989 * regain them by swapping the real and effective uid.
990 */
991 asmlinkage long sys_setuid(uid_t uid)
992 {
993 int old_euid = current->euid;
994 int old_ruid, old_suid, new_ruid, new_suid;
995 int retval;
996
997 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
998 if (retval)
999 return retval;
1000
1001 old_ruid = new_ruid = current->uid;
1002 old_suid = current->suid;
1003 new_suid = old_suid;
1004
1005 if (capable(CAP_SETUID)) {
1006 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
1007 return -EAGAIN;
1008 new_suid = uid;
1009 } else if ((uid != current->uid) && (uid != new_suid))
1010 return -EPERM;
1011
1012 if (old_euid != uid)
1013 {
1014 current->mm->dumpable = suid_dumpable;
1015 smp_wmb();
1016 }
1017 current->fsuid = current->euid = uid;
1018 current->suid = new_suid;
1019
1020 key_fsuid_changed(current);
1021 proc_id_connector(current, PROC_EVENT_UID);
1022
1023 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
1024 }
1025
1026
1027 /*
1028 * This function implements a generic ability to update ruid, euid,
1029 * and suid. This allows you to implement the 4.4 compatible seteuid().
1030 */
1031 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
1032 {
1033 int old_ruid = current->uid;
1034 int old_euid = current->euid;
1035 int old_suid = current->suid;
1036 int retval;
1037
1038 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
1039 if (retval)
1040 return retval;
1041
1042 if (!capable(CAP_SETUID)) {
1043 if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
1044 (ruid != current->euid) && (ruid != current->suid))
1045 return -EPERM;
1046 if ((euid != (uid_t) -1) && (euid != current->uid) &&
1047 (euid != current->euid) && (euid != current->suid))
1048 return -EPERM;
1049 if ((suid != (uid_t) -1) && (suid != current->uid) &&
1050 (suid != current->euid) && (suid != current->suid))
1051 return -EPERM;
1052 }
1053 if (ruid != (uid_t) -1) {
1054 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
1055 return -EAGAIN;
1056 }
1057 if (euid != (uid_t) -1) {
1058 if (euid != current->euid)
1059 {
1060 current->mm->dumpable = suid_dumpable;
1061 smp_wmb();
1062 }
1063 current->euid = euid;
1064 }
1065 current->fsuid = current->euid;
1066 if (suid != (uid_t) -1)
1067 current->suid = suid;
1068
1069 key_fsuid_changed(current);
1070 proc_id_connector(current, PROC_EVENT_UID);
1071
1072 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
1073 }
1074
1075 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
1076 {
1077 int retval;
1078
1079 if (!(retval = put_user(current->uid, ruid)) &&
1080 !(retval = put_user(current->euid, euid)))
1081 retval = put_user(current->suid, suid);
1082
1083 return retval;
1084 }
1085
1086 /*
1087 * Same as above, but for rgid, egid, sgid.
1088 */
1089 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
1090 {
1091 int retval;
1092
1093 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
1094 if (retval)
1095 return retval;
1096
1097 if (!capable(CAP_SETGID)) {
1098 if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
1099 (rgid != current->egid) && (rgid != current->sgid))
1100 return -EPERM;
1101 if ((egid != (gid_t) -1) && (egid != current->gid) &&
1102 (egid != current->egid) && (egid != current->sgid))
1103 return -EPERM;
1104 if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
1105 (sgid != current->egid) && (sgid != current->sgid))
1106 return -EPERM;
1107 }
1108 if (egid != (gid_t) -1) {
1109 if (egid != current->egid)
1110 {
1111 current->mm->dumpable = suid_dumpable;
1112 smp_wmb();
1113 }
1114 current->egid = egid;
1115 }
1116 current->fsgid = current->egid;
1117 if (rgid != (gid_t) -1)
1118 current->gid = rgid;
1119 if (sgid != (gid_t) -1)
1120 current->sgid = sgid;
1121
1122 key_fsgid_changed(current);
1123 proc_id_connector(current, PROC_EVENT_GID);
1124 return 0;
1125 }
1126
1127 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
1128 {
1129 int retval;
1130
1131 if (!(retval = put_user(current->gid, rgid)) &&
1132 !(retval = put_user(current->egid, egid)))
1133 retval = put_user(current->sgid, sgid);
1134
1135 return retval;
1136 }
1137
1138
1139 /*
1140 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
1141 * is used for "access()" and for the NFS daemon (letting nfsd stay at
1142 * whatever uid it wants to). It normally shadows "euid", except when
1143 * explicitly set by setfsuid() or for access..
1144 */
1145 asmlinkage long sys_setfsuid(uid_t uid)
1146 {
1147 int old_fsuid;
1148
1149 old_fsuid = current->fsuid;
1150 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
1151 return old_fsuid;
1152
1153 if (uid == current->uid || uid == current->euid ||
1154 uid == current->suid || uid == current->fsuid ||
1155 capable(CAP_SETUID))
1156 {
1157 if (uid != old_fsuid)
1158 {
1159 current->mm->dumpable = suid_dumpable;
1160 smp_wmb();
1161 }
1162 current->fsuid = uid;
1163 }
1164
1165 key_fsuid_changed(current);
1166 proc_id_connector(current, PROC_EVENT_UID);
1167
1168 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
1169
1170 return old_fsuid;
1171 }
1172
1173 /*
1174 * Samma på svenska..
1175 */
1176 asmlinkage long sys_setfsgid(gid_t gid)
1177 {
1178 int old_fsgid;
1179
1180 old_fsgid = current->fsgid;
1181 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
1182 return old_fsgid;
1183
1184 if (gid == current->gid || gid == current->egid ||
1185 gid == current->sgid || gid == current->fsgid ||
1186 capable(CAP_SETGID))
1187 {
1188 if (gid != old_fsgid)
1189 {
1190 current->mm->dumpable = suid_dumpable;
1191 smp_wmb();
1192 }
1193 current->fsgid = gid;
1194 key_fsgid_changed(current);
1195 proc_id_connector(current, PROC_EVENT_GID);
1196 }
1197 return old_fsgid;
1198 }
1199
1200 asmlinkage long sys_times(struct tms __user * tbuf)
1201 {
1202 /*
1203 * In the SMP world we might just be unlucky and have one of
1204 * the times increment as we use it. Since the value is an
1205 * atomically safe type this is just fine. Conceptually its
1206 * as if the syscall took an instant longer to occur.
1207 */
1208 if (tbuf) {
1209 struct tms tmp;
1210 struct task_struct *tsk = current;
1211 struct task_struct *t;
1212 cputime_t utime, stime, cutime, cstime;
1213
1214 spin_lock_irq(&tsk->sighand->siglock);
1215 utime = tsk->signal->utime;
1216 stime = tsk->signal->stime;
1217 t = tsk;
1218 do {
1219 utime = cputime_add(utime, t->utime);
1220 stime = cputime_add(stime, t->stime);
1221 t = next_thread(t);
1222 } while (t != tsk);
1223
1224 cutime = tsk->signal->cutime;
1225 cstime = tsk->signal->cstime;
1226 spin_unlock_irq(&tsk->sighand->siglock);
1227
1228 tmp.tms_utime = cputime_to_clock_t(utime);
1229 tmp.tms_stime = cputime_to_clock_t(stime);
1230 tmp.tms_cutime = cputime_to_clock_t(cutime);
1231 tmp.tms_cstime = cputime_to_clock_t(cstime);
1232 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1233 return -EFAULT;
1234 }
1235 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1236 }
1237
1238 /*
1239 * This needs some heavy checking ...
1240 * I just haven't the stomach for it. I also don't fully
1241 * understand sessions/pgrp etc. Let somebody who does explain it.
1242 *
1243 * OK, I think I have the protection semantics right.... this is really
1244 * only important on a multi-user system anyway, to make sure one user
1245 * can't send a signal to a process owned by another. -TYT, 12/12/91
1246 *
1247 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1248 * LBT 04.03.94
1249 */
1250
1251 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1252 {
1253 struct task_struct *p;
1254 struct task_struct *group_leader = current->group_leader;
1255 int err = -EINVAL;
1256
1257 if (!pid)
1258 pid = group_leader->pid;
1259 if (!pgid)
1260 pgid = pid;
1261 if (pgid < 0)
1262 return -EINVAL;
1263
1264 /* From this point forward we keep holding onto the tasklist lock
1265 * so that our parent does not change from under us. -DaveM
1266 */
1267 write_lock_irq(&tasklist_lock);
1268
1269 err = -ESRCH;
1270 p = find_task_by_pid(pid);
1271 if (!p)
1272 goto out;
1273
1274 err = -EINVAL;
1275 if (!thread_group_leader(p))
1276 goto out;
1277
1278 if (p->real_parent == group_leader) {
1279 err = -EPERM;
1280 if (p->signal->session != group_leader->signal->session)
1281 goto out;
1282 err = -EACCES;
1283 if (p->did_exec)
1284 goto out;
1285 } else {
1286 err = -ESRCH;
1287 if (p != group_leader)
1288 goto out;
1289 }
1290
1291 err = -EPERM;
1292 if (p->signal->leader)
1293 goto out;
1294
1295 if (pgid != pid) {
1296 struct task_struct *p;
1297
1298 do_each_task_pid(pgid, PIDTYPE_PGID, p) {
1299 if (p->signal->session == group_leader->signal->session)
1300 goto ok_pgid;
1301 } while_each_task_pid(pgid, PIDTYPE_PGID, p);
1302 goto out;
1303 }
1304
1305 ok_pgid:
1306 err = security_task_setpgid(p, pgid);
1307 if (err)
1308 goto out;
1309
1310 if (process_group(p) != pgid) {
1311 detach_pid(p, PIDTYPE_PGID);
1312 p->signal->pgrp = pgid;
1313 attach_pid(p, PIDTYPE_PGID, pgid);
1314 }
1315
1316 err = 0;
1317 out:
1318 /* All paths lead to here, thus we are safe. -DaveM */
1319 write_unlock_irq(&tasklist_lock);
1320 return err;
1321 }
1322
1323 asmlinkage long sys_getpgid(pid_t pid)
1324 {
1325 if (!pid) {
1326 return process_group(current);
1327 } else {
1328 int retval;
1329 struct task_struct *p;
1330
1331 read_lock(&tasklist_lock);
1332 p = find_task_by_pid(pid);
1333
1334 retval = -ESRCH;
1335 if (p) {
1336 retval = security_task_getpgid(p);
1337 if (!retval)
1338 retval = process_group(p);
1339 }
1340 read_unlock(&tasklist_lock);
1341 return retval;
1342 }
1343 }
1344
1345 #ifdef __ARCH_WANT_SYS_GETPGRP
1346
1347 asmlinkage long sys_getpgrp(void)
1348 {
1349 /* SMP - assuming writes are word atomic this is fine */
1350 return process_group(current);
1351 }
1352
1353 #endif
1354
1355 asmlinkage long sys_getsid(pid_t pid)
1356 {
1357 if (!pid) {
1358 return current->signal->session;
1359 } else {
1360 int retval;
1361 struct task_struct *p;
1362
1363 read_lock(&tasklist_lock);
1364 p = find_task_by_pid(pid);
1365
1366 retval = -ESRCH;
1367 if(p) {
1368 retval = security_task_getsid(p);
1369 if (!retval)
1370 retval = p->signal->session;
1371 }
1372 read_unlock(&tasklist_lock);
1373 return retval;
1374 }
1375 }
1376
1377 asmlinkage long sys_setsid(void)
1378 {
1379 struct task_struct *group_leader = current->group_leader;
1380 pid_t session;
1381 int err = -EPERM;
1382
1383 mutex_lock(&tty_mutex);
1384 write_lock_irq(&tasklist_lock);
1385
1386 /* Fail if I am already a session leader */
1387 if (group_leader->signal->leader)
1388 goto out;
1389
1390 session = group_leader->pid;
1391 /* Fail if a process group id already exists that equals the
1392 * proposed session id.
1393 *
1394 * Don't check if session id == 1 because kernel threads use this
1395 * session id and so the check will always fail and make it so
1396 * init cannot successfully call setsid.
1397 */
1398 if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
1399 goto out;
1400
1401 group_leader->signal->leader = 1;
1402 __set_special_pids(session, session);
1403 group_leader->signal->tty = NULL;
1404 group_leader->signal->tty_old_pgrp = 0;
1405 err = process_group(group_leader);
1406 out:
1407 write_unlock_irq(&tasklist_lock);
1408 mutex_unlock(&tty_mutex);
1409 return err;
1410 }
1411
1412 /*
1413 * Supplementary group IDs
1414 */
1415
1416 /* init to 2 - one for init_task, one to ensure it is never freed */
1417 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1418
1419 struct group_info *groups_alloc(int gidsetsize)
1420 {
1421 struct group_info *group_info;
1422 int nblocks;
1423 int i;
1424
1425 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1426 /* Make sure we always allocate at least one indirect block pointer */
1427 nblocks = nblocks ? : 1;
1428 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1429 if (!group_info)
1430 return NULL;
1431 group_info->ngroups = gidsetsize;
1432 group_info->nblocks = nblocks;
1433 atomic_set(&group_info->usage, 1);
1434
1435 if (gidsetsize <= NGROUPS_SMALL) {
1436 group_info->blocks[0] = group_info->small_block;
1437 } else {
1438 for (i = 0; i < nblocks; i++) {
1439 gid_t *b;
1440 b = (void *)__get_free_page(GFP_USER);
1441 if (!b)
1442 goto out_undo_partial_alloc;
1443 group_info->blocks[i] = b;
1444 }
1445 }
1446 return group_info;
1447
1448 out_undo_partial_alloc:
1449 while (--i >= 0) {
1450 free_page((unsigned long)group_info->blocks[i]);
1451 }
1452 kfree(group_info);
1453 return NULL;
1454 }
1455
1456 EXPORT_SYMBOL(groups_alloc);
1457
1458 void groups_free(struct group_info *group_info)
1459 {
1460 if (group_info->blocks[0] != group_info->small_block) {
1461 int i;
1462 for (i = 0; i < group_info->nblocks; i++)
1463 free_page((unsigned long)group_info->blocks[i]);
1464 }
1465 kfree(group_info);
1466 }
1467
1468 EXPORT_SYMBOL(groups_free);
1469
1470 /* export the group_info to a user-space array */
1471 static int groups_to_user(gid_t __user *grouplist,
1472 struct group_info *group_info)
1473 {
1474 int i;
1475 int count = group_info->ngroups;
1476
1477 for (i = 0; i < group_info->nblocks; i++) {
1478 int cp_count = min(NGROUPS_PER_BLOCK, count);
1479 int off = i * NGROUPS_PER_BLOCK;
1480 int len = cp_count * sizeof(*grouplist);
1481
1482 if (copy_to_user(grouplist+off, group_info->blocks[i], len))
1483 return -EFAULT;
1484
1485 count -= cp_count;
1486 }
1487 return 0;
1488 }
1489
1490 /* fill a group_info from a user-space array - it must be allocated already */
1491 static int groups_from_user(struct group_info *group_info,
1492 gid_t __user *grouplist)
1493 {
1494 int i;
1495 int count = group_info->ngroups;
1496
1497 for (i = 0; i < group_info->nblocks; i++) {
1498 int cp_count = min(NGROUPS_PER_BLOCK, count);
1499 int off = i * NGROUPS_PER_BLOCK;
1500 int len = cp_count * sizeof(*grouplist);
1501
1502 if (copy_from_user(group_info->blocks[i], grouplist+off, len))
1503 return -EFAULT;
1504
1505 count -= cp_count;
1506 }
1507 return 0;
1508 }
1509
1510 /* a simple Shell sort */
1511 static void groups_sort(struct group_info *group_info)
1512 {
1513 int base, max, stride;
1514 int gidsetsize = group_info->ngroups;
1515
1516 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1517 ; /* nothing */
1518 stride /= 3;
1519
1520 while (stride) {
1521 max = gidsetsize - stride;
1522 for (base = 0; base < max; base++) {
1523 int left = base;
1524 int right = left + stride;
1525 gid_t tmp = GROUP_AT(group_info, right);
1526
1527 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1528 GROUP_AT(group_info, right) =
1529 GROUP_AT(group_info, left);
1530 right = left;
1531 left -= stride;
1532 }
1533 GROUP_AT(group_info, right) = tmp;
1534 }
1535 stride /= 3;
1536 }
1537 }
1538
1539 /* a simple bsearch */
1540 int groups_search(struct group_info *group_info, gid_t grp)
1541 {
1542 unsigned int left, right;
1543
1544 if (!group_info)
1545 return 0;
1546
1547 left = 0;
1548 right = group_info->ngroups;
1549 while (left < right) {
1550 unsigned int mid = (left+right)/2;
1551 int cmp = grp - GROUP_AT(group_info, mid);
1552 if (cmp > 0)
1553 left = mid + 1;
1554 else if (cmp < 0)
1555 right = mid;
1556 else
1557 return 1;
1558 }
1559 return 0;
1560 }
1561
1562 /* validate and set current->group_info */
1563 int set_current_groups(struct group_info *group_info)
1564 {
1565 int retval;
1566 struct group_info *old_info;
1567
1568 retval = security_task_setgroups(group_info);
1569 if (retval)
1570 return retval;
1571
1572 groups_sort(group_info);
1573 get_group_info(group_info);
1574
1575 task_lock(current);
1576 old_info = current->group_info;
1577 current->group_info = group_info;
1578 task_unlock(current);
1579
1580 put_group_info(old_info);
1581
1582 return 0;
1583 }
1584
1585 EXPORT_SYMBOL(set_current_groups);
1586
1587 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1588 {
1589 int i = 0;
1590
1591 /*
1592 * SMP: Nobody else can change our grouplist. Thus we are
1593 * safe.
1594 */
1595
1596 if (gidsetsize < 0)
1597 return -EINVAL;
1598
1599 /* no need to grab task_lock here; it cannot change */
1600 i = current->group_info->ngroups;
1601 if (gidsetsize) {
1602 if (i > gidsetsize) {
1603 i = -EINVAL;
1604 goto out;
1605 }
1606 if (groups_to_user(grouplist, current->group_info)) {
1607 i = -EFAULT;
1608 goto out;
1609 }
1610 }
1611 out:
1612 return i;
1613 }
1614
1615 /*
1616 * SMP: Our groups are copy-on-write. We can set them safely
1617 * without another task interfering.
1618 */
1619
1620 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1621 {
1622 struct group_info *group_info;
1623 int retval;
1624
1625 if (!capable(CAP_SETGID))
1626 return -EPERM;
1627 if ((unsigned)gidsetsize > NGROUPS_MAX)
1628 return -EINVAL;
1629
1630 group_info = groups_alloc(gidsetsize);
1631 if (!group_info)
1632 return -ENOMEM;
1633 retval = groups_from_user(group_info, grouplist);
1634 if (retval) {
1635 put_group_info(group_info);
1636 return retval;
1637 }
1638
1639 retval = set_current_groups(group_info);
1640 put_group_info(group_info);
1641
1642 return retval;
1643 }
1644
1645 /*
1646 * Check whether we're fsgid/egid or in the supplemental group..
1647 */
1648 int in_group_p(gid_t grp)
1649 {
1650 int retval = 1;
1651 if (grp != current->fsgid) {
1652 retval = groups_search(current->group_info, grp);
1653 }
1654 return retval;
1655 }
1656
1657 EXPORT_SYMBOL(in_group_p);
1658
1659 int in_egroup_p(gid_t grp)
1660 {
1661 int retval = 1;
1662 if (grp != current->egid) {
1663 retval = groups_search(current->group_info, grp);
1664 }
1665 return retval;
1666 }
1667
1668 EXPORT_SYMBOL(in_egroup_p);
1669
1670 DECLARE_RWSEM(uts_sem);
1671
1672 EXPORT_SYMBOL(uts_sem);
1673
1674 asmlinkage long sys_newuname(struct new_utsname __user * name)
1675 {
1676 int errno = 0;
1677
1678 down_read(&uts_sem);
1679 if (copy_to_user(name,&system_utsname,sizeof *name))
1680 errno = -EFAULT;
1681 up_read(&uts_sem);
1682 return errno;
1683 }
1684
1685 asmlinkage long sys_sethostname(char __user *name, int len)
1686 {
1687 int errno;
1688 char tmp[__NEW_UTS_LEN];
1689
1690 if (!capable(CAP_SYS_ADMIN))
1691 return -EPERM;
1692 if (len < 0 || len > __NEW_UTS_LEN)
1693 return -EINVAL;
1694 down_write(&uts_sem);
1695 errno = -EFAULT;
1696 if (!copy_from_user(tmp, name, len)) {
1697 memcpy(system_utsname.nodename, tmp, len);
1698 system_utsname.nodename[len] = 0;
1699 errno = 0;
1700 }
1701 up_write(&uts_sem);
1702 return errno;
1703 }
1704
1705 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1706
1707 asmlinkage long sys_gethostname(char __user *name, int len)
1708 {
1709 int i, errno;
1710
1711 if (len < 0)
1712 return -EINVAL;
1713 down_read(&uts_sem);
1714 i = 1 + strlen(system_utsname.nodename);
1715 if (i > len)
1716 i = len;
1717 errno = 0;
1718 if (copy_to_user(name, system_utsname.nodename, i))
1719 errno = -EFAULT;
1720 up_read(&uts_sem);
1721 return errno;
1722 }
1723
1724 #endif
1725
1726 /*
1727 * Only setdomainname; getdomainname can be implemented by calling
1728 * uname()
1729 */
1730 asmlinkage long sys_setdomainname(char __user *name, int len)
1731 {
1732 int errno;
1733 char tmp[__NEW_UTS_LEN];
1734
1735 if (!capable(CAP_SYS_ADMIN))
1736 return -EPERM;
1737 if (len < 0 || len > __NEW_UTS_LEN)
1738 return -EINVAL;
1739
1740 down_write(&uts_sem);
1741 errno = -EFAULT;
1742 if (!copy_from_user(tmp, name, len)) {
1743 memcpy(system_utsname.domainname, tmp, len);
1744 system_utsname.domainname[len] = 0;
1745 errno = 0;
1746 }
1747 up_write(&uts_sem);
1748 return errno;
1749 }
1750
1751 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1752 {
1753 if (resource >= RLIM_NLIMITS)
1754 return -EINVAL;
1755 else {
1756 struct rlimit value;
1757 task_lock(current->group_leader);
1758 value = current->signal->rlim[resource];
1759 task_unlock(current->group_leader);
1760 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1761 }
1762 }
1763
1764 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1765
1766 /*
1767 * Back compatibility for getrlimit. Needed for some apps.
1768 */
1769
1770 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1771 {
1772 struct rlimit x;
1773 if (resource >= RLIM_NLIMITS)
1774 return -EINVAL;
1775
1776 task_lock(current->group_leader);
1777 x = current->signal->rlim[resource];
1778 task_unlock(current->group_leader);
1779 if(x.rlim_cur > 0x7FFFFFFF)
1780 x.rlim_cur = 0x7FFFFFFF;
1781 if(x.rlim_max > 0x7FFFFFFF)
1782 x.rlim_max = 0x7FFFFFFF;
1783 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1784 }
1785
1786 #endif
1787
1788 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1789 {
1790 struct rlimit new_rlim, *old_rlim;
1791 unsigned long it_prof_secs;
1792 int retval;
1793
1794 if (resource >= RLIM_NLIMITS)
1795 return -EINVAL;
1796 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1797 return -EFAULT;
1798 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1799 return -EINVAL;
1800 old_rlim = current->signal->rlim + resource;
1801 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1802 !capable(CAP_SYS_RESOURCE))
1803 return -EPERM;
1804 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
1805 return -EPERM;
1806
1807 retval = security_task_setrlimit(resource, &new_rlim);
1808 if (retval)
1809 return retval;
1810
1811 task_lock(current->group_leader);
1812 *old_rlim = new_rlim;
1813 task_unlock(current->group_leader);
1814
1815 if (resource != RLIMIT_CPU)
1816 goto out;
1817
1818 /*
1819 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1820 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1821 * very long-standing error, and fixing it now risks breakage of
1822 * applications, so we live with it
1823 */
1824 if (new_rlim.rlim_cur == RLIM_INFINITY)
1825 goto out;
1826
1827 it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
1828 if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
1829 unsigned long rlim_cur = new_rlim.rlim_cur;
1830 cputime_t cputime;
1831
1832 if (rlim_cur == 0) {
1833 /*
1834 * The caller is asking for an immediate RLIMIT_CPU
1835 * expiry. But we use the zero value to mean "it was
1836 * never set". So let's cheat and make it one second
1837 * instead
1838 */
1839 rlim_cur = 1;
1840 }
1841 cputime = secs_to_cputime(rlim_cur);
1842 read_lock(&tasklist_lock);
1843 spin_lock_irq(&current->sighand->siglock);
1844 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
1845 spin_unlock_irq(&current->sighand->siglock);
1846 read_unlock(&tasklist_lock);
1847 }
1848 out:
1849 return 0;
1850 }
1851
1852 /*
1853 * It would make sense to put struct rusage in the task_struct,
1854 * except that would make the task_struct be *really big*. After
1855 * task_struct gets moved into malloc'ed memory, it would
1856 * make sense to do this. It will make moving the rest of the information
1857 * a lot simpler! (Which we're not doing right now because we're not
1858 * measuring them yet).
1859 *
1860 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1861 * races with threads incrementing their own counters. But since word
1862 * reads are atomic, we either get new values or old values and we don't
1863 * care which for the sums. We always take the siglock to protect reading
1864 * the c* fields from p->signal from races with exit.c updating those
1865 * fields when reaping, so a sample either gets all the additions of a
1866 * given child after it's reaped, or none so this sample is before reaping.
1867 *
1868 * Locking:
1869 * We need to take the siglock for CHILDEREN, SELF and BOTH
1870 * for the cases current multithreaded, non-current single threaded
1871 * non-current multithreaded. Thread traversal is now safe with
1872 * the siglock held.
1873 * Strictly speaking, we donot need to take the siglock if we are current and
1874 * single threaded, as no one else can take our signal_struct away, no one
1875 * else can reap the children to update signal->c* counters, and no one else
1876 * can race with the signal-> fields. If we do not take any lock, the
1877 * signal-> fields could be read out of order while another thread was just
1878 * exiting. So we should place a read memory barrier when we avoid the lock.
1879 * On the writer side, write memory barrier is implied in __exit_signal
1880 * as __exit_signal releases the siglock spinlock after updating the signal->
1881 * fields. But we don't do this yet to keep things simple.
1882 *
1883 */
1884
1885 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1886 {
1887 struct task_struct *t;
1888 unsigned long flags;
1889 cputime_t utime, stime;
1890
1891 memset((char *) r, 0, sizeof *r);
1892 utime = stime = cputime_zero;
1893
1894 rcu_read_lock();
1895 if (!lock_task_sighand(p, &flags)) {
1896 rcu_read_unlock();
1897 return;
1898 }
1899
1900 switch (who) {
1901 case RUSAGE_BOTH:
1902 case RUSAGE_CHILDREN:
1903 utime = p->signal->cutime;
1904 stime = p->signal->cstime;
1905 r->ru_nvcsw = p->signal->cnvcsw;
1906 r->ru_nivcsw = p->signal->cnivcsw;
1907 r->ru_minflt = p->signal->cmin_flt;
1908 r->ru_majflt = p->signal->cmaj_flt;
1909
1910 if (who == RUSAGE_CHILDREN)
1911 break;
1912
1913 case RUSAGE_SELF:
1914 utime = cputime_add(utime, p->signal->utime);
1915 stime = cputime_add(stime, p->signal->stime);
1916 r->ru_nvcsw += p->signal->nvcsw;
1917 r->ru_nivcsw += p->signal->nivcsw;
1918 r->ru_minflt += p->signal->min_flt;
1919 r->ru_majflt += p->signal->maj_flt;
1920 t = p;
1921 do {
1922 utime = cputime_add(utime, t->utime);
1923 stime = cputime_add(stime, t->stime);
1924 r->ru_nvcsw += t->nvcsw;
1925 r->ru_nivcsw += t->nivcsw;
1926 r->ru_minflt += t->min_flt;
1927 r->ru_majflt += t->maj_flt;
1928 t = next_thread(t);
1929 } while (t != p);
1930 break;
1931
1932 default:
1933 BUG();
1934 }
1935
1936 unlock_task_sighand(p, &flags);
1937 rcu_read_unlock();
1938
1939 cputime_to_timeval(utime, &r->ru_utime);
1940 cputime_to_timeval(stime, &r->ru_stime);
1941 }
1942
1943 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1944 {
1945 struct rusage r;
1946 k_getrusage(p, who, &r);
1947 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1948 }
1949
1950 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1951 {
1952 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1953 return -EINVAL;
1954 return getrusage(current, who, ru);
1955 }
1956
1957 asmlinkage long sys_umask(int mask)
1958 {
1959 mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1960 return mask;
1961 }
1962
1963 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1964 unsigned long arg4, unsigned long arg5)
1965 {
1966 long error;
1967
1968 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1969 if (error)
1970 return error;
1971
1972 switch (option) {
1973 case PR_SET_PDEATHSIG:
1974 if (!valid_signal(arg2)) {
1975 error = -EINVAL;
1976 break;
1977 }
1978 current->pdeath_signal = arg2;
1979 break;
1980 case PR_GET_PDEATHSIG:
1981 error = put_user(current->pdeath_signal, (int __user *)arg2);
1982 break;
1983 case PR_GET_DUMPABLE:
1984 error = current->mm->dumpable;
1985 break;
1986 case PR_SET_DUMPABLE:
1987 if (arg2 < 0 || arg2 > 2) {
1988 error = -EINVAL;
1989 break;
1990 }
1991 current->mm->dumpable = arg2;
1992 break;
1993
1994 case PR_SET_UNALIGN:
1995 error = SET_UNALIGN_CTL(current, arg2);
1996 break;
1997 case PR_GET_UNALIGN:
1998 error = GET_UNALIGN_CTL(current, arg2);
1999 break;
2000 case PR_SET_FPEMU:
2001 error = SET_FPEMU_CTL(current, arg2);
2002 break;
2003 case PR_GET_FPEMU:
2004 error = GET_FPEMU_CTL(current, arg2);
2005 break;
2006 case PR_SET_FPEXC:
2007 error = SET_FPEXC_CTL(current, arg2);
2008 break;
2009 case PR_GET_FPEXC:
2010 error = GET_FPEXC_CTL(current, arg2);
2011 break;
2012 case PR_GET_TIMING:
2013 error = PR_TIMING_STATISTICAL;
2014 break;
2015 case PR_SET_TIMING:
2016 if (arg2 == PR_TIMING_STATISTICAL)
2017 error = 0;
2018 else
2019 error = -EINVAL;
2020 break;
2021
2022 case PR_GET_KEEPCAPS:
2023 if (current->keep_capabilities)
2024 error = 1;
2025 break;
2026 case PR_SET_KEEPCAPS:
2027 if (arg2 != 0 && arg2 != 1) {
2028 error = -EINVAL;
2029 break;
2030 }
2031 current->keep_capabilities = arg2;
2032 break;
2033 case PR_SET_NAME: {
2034 struct task_struct *me = current;
2035 unsigned char ncomm[sizeof(me->comm)];
2036
2037 ncomm[sizeof(me->comm)-1] = 0;
2038 if (strncpy_from_user(ncomm, (char __user *)arg2,
2039 sizeof(me->comm)-1) < 0)
2040 return -EFAULT;
2041 set_task_comm(me, ncomm);
2042 return 0;
2043 }
2044 case PR_GET_NAME: {
2045 struct task_struct *me = current;
2046 unsigned char tcomm[sizeof(me->comm)];
2047
2048 get_task_comm(tcomm, me);
2049 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))
2050 return -EFAULT;
2051 return 0;
2052 }
2053 case PR_GET_ENDIAN:
2054 error = GET_ENDIAN(current, arg2);
2055 break;
2056 case PR_SET_ENDIAN:
2057 error = SET_ENDIAN(current, arg2);
2058 break;
2059
2060 default:
2061 error = -EINVAL;
2062 break;
2063 }
2064 return error;
2065 }