]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - ipc/sem.c
fix idr_find() locking
[mirror_ubuntu-zesty-kernel.git] / ipc / sem.c
CommitLineData
1da177e4
LT
1/*
2 * linux/ipc/sem.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
5 *
6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
7 * This code underwent a massive rewrite in order to solve some problems
8 * with the original code. In particular the original code failed to
9 * wake up processes that were waiting for semval to go to 0 if the
10 * value went to 0 and was then incremented rapidly enough. In solving
11 * this problem I have also modified the implementation so that it
12 * processes pending operations in a FIFO manner, thus give a guarantee
13 * that processes waiting for a lock on the semaphore won't starve
14 * unless another locking process fails to unlock.
15 * In addition the following two changes in behavior have been introduced:
16 * - The original implementation of semop returned the value
17 * last semaphore element examined on success. This does not
18 * match the manual page specifications, and effectively
19 * allows the user to read the semaphore even if they do not
20 * have read permissions. The implementation now returns 0
21 * on success as stated in the manual page.
22 * - There is some confusion over whether the set of undo adjustments
23 * to be performed at exit should be done in an atomic manner.
24 * That is, if we are attempting to decrement the semval should we queue
25 * up and wait until we can do so legally?
26 * The original implementation attempted to do this.
27 * The current implementation does not do so. This is because I don't
28 * think it is the right thing (TM) to do, and because I couldn't
29 * see a clean way to get the old behavior with the new design.
30 * The POSIX standard and SVID should be consulted to determine
31 * what behavior is mandated.
32 *
33 * Further notes on refinement (Christoph Rohland, December 1998):
34 * - The POSIX standard says, that the undo adjustments simply should
35 * redo. So the current implementation is o.K.
36 * - The previous code had two flaws:
37 * 1) It actively gave the semaphore to the next waiting process
38 * sleeping on the semaphore. Since this process did not have the
39 * cpu this led to many unnecessary context switches and bad
40 * performance. Now we only check which process should be able to
41 * get the semaphore and if this process wants to reduce some
42 * semaphore value we simply wake it up without doing the
43 * operation. So it has to try to get it later. Thus e.g. the
44 * running process may reacquire the semaphore during the current
45 * time slice. If it only waits for zero or increases the semaphore,
46 * we do the operation in advance and wake it up.
47 * 2) It did not wake up all zero waiting processes. We try to do
48 * better but only get the semops right which only wait for zero or
49 * increase. If there are decrement operations in the operations
50 * array we do the same as before.
51 *
52 * With the incarnation of O(1) scheduler, it becomes unnecessary to perform
53 * check/retry algorithm for waking up blocked processes as the new scheduler
54 * is better at handling thread switch than the old one.
55 *
56 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
57 *
58 * SMP-threaded, sysctl's added
624dffcb 59 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
1da177e4
LT
60 * Enforced range limit on SEM_UNDO
61 * (c) 2001 Red Hat Inc <alan@redhat.com>
62 * Lockless wakeup
63 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
073115d6
SG
64 *
65 * support for audit of ipc object properties and permission changes
66 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
e3893534
KK
67 *
68 * namespaces support
69 * OpenVZ, SWsoft Inc.
70 * Pavel Emelianov <xemul@openvz.org>
1da177e4
LT
71 */
72
1da177e4
LT
73#include <linux/slab.h>
74#include <linux/spinlock.h>
75#include <linux/init.h>
76#include <linux/proc_fs.h>
77#include <linux/time.h>
1da177e4
LT
78#include <linux/security.h>
79#include <linux/syscalls.h>
80#include <linux/audit.h>
c59ede7b 81#include <linux/capability.h>
19b4946c 82#include <linux/seq_file.h>
3e148c79 83#include <linux/rwsem.h>
e3893534 84#include <linux/nsproxy.h>
5f921ae9 85
1da177e4
LT
86#include <asm/uaccess.h>
87#include "util.h"
88
e3893534
KK
89#define sem_ids(ns) (*((ns)->ids[IPC_SEM_IDS]))
90
e3893534 91#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
e3893534
KK
92#define sem_checkid(ns, sma, semid) \
93 ipc_checkid(&sem_ids(ns),&sma->sem_perm,semid)
94#define sem_buildid(ns, id, seq) \
95 ipc_buildid(&sem_ids(ns), id, seq)
1da177e4 96
e3893534 97static struct ipc_ids init_sem_ids;
1da177e4 98
7748dbfa 99static int newary(struct ipc_namespace *, struct ipc_params *);
7ca7e564 100static void freeary(struct ipc_namespace *, struct sem_array *);
1da177e4 101#ifdef CONFIG_PROC_FS
19b4946c 102static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
1da177e4
LT
103#endif
104
105#define SEMMSL_FAST 256 /* 512 bytes on stack */
106#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
107
108/*
109 * linked list protection:
110 * sem_undo.id_next,
111 * sem_array.sem_pending{,last},
112 * sem_array.sem_undo: sem_lock() for read/write
113 * sem_undo.proc_next: only "current" is allowed to read/write that field.
114 *
115 */
116
e3893534
KK
117#define sc_semmsl sem_ctls[0]
118#define sc_semmns sem_ctls[1]
119#define sc_semopm sem_ctls[2]
120#define sc_semmni sem_ctls[3]
121
7d69a1f4 122static void __sem_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
e3893534
KK
123{
124 ns->ids[IPC_SEM_IDS] = ids;
125 ns->sc_semmsl = SEMMSL;
126 ns->sc_semmns = SEMMNS;
127 ns->sc_semopm = SEMOPM;
128 ns->sc_semmni = SEMMNI;
129 ns->used_sems = 0;
7ca7e564 130 ipc_init_ids(ids);
e3893534
KK
131}
132
e3893534
KK
133int sem_init_ns(struct ipc_namespace *ns)
134{
135 struct ipc_ids *ids;
136
137 ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL);
138 if (ids == NULL)
139 return -ENOMEM;
140
141 __sem_init_ns(ns, ids);
142 return 0;
143}
144
145void sem_exit_ns(struct ipc_namespace *ns)
146{
e3893534 147 struct sem_array *sma;
7ca7e564
ND
148 int next_id;
149 int total, in_use;
e3893534 150
3e148c79 151 down_write(&sem_ids(ns).rw_mutex);
7ca7e564
ND
152
153 in_use = sem_ids(ns).in_use;
154
155 for (total = 0, next_id = 0; total < in_use; next_id++) {
156 sma = idr_find(&sem_ids(ns).ipcs_idr, next_id);
e3893534
KK
157 if (sma == NULL)
158 continue;
7ca7e564
ND
159 ipc_lock_by_ptr(&sma->sem_perm);
160 freeary(ns, sma);
161 total++;
e3893534 162 }
3e148c79 163 up_write(&sem_ids(ns).rw_mutex);
1da177e4 164
e3893534
KK
165 kfree(ns->ids[IPC_SEM_IDS]);
166 ns->ids[IPC_SEM_IDS] = NULL;
167}
1da177e4
LT
168
169void __init sem_init (void)
170{
e3893534 171 __sem_init_ns(&init_ipc_ns, &init_sem_ids);
19b4946c
MW
172 ipc_init_proc_interface("sysvipc/sem",
173 " key semid perms nsems uid gid cuid cgid otime ctime\n",
e3893534 174 IPC_SEM_IDS, sysvipc_sem_proc_show);
1da177e4
LT
175}
176
3e148c79
ND
177/*
178 * This routine is called in the paths where the rw_mutex is held to protect
179 * access to the idr tree.
180 */
181static inline struct sem_array *sem_lock_check_down(struct ipc_namespace *ns,
182 int id)
183{
184 struct kern_ipc_perm *ipcp = ipc_lock_check_down(&sem_ids(ns), id);
185
186 return container_of(ipcp, struct sem_array, sem_perm);
187}
188
189/*
190 * sem_lock_(check_) routines are called in the paths where the rw_mutex
191 * is not held.
192 */
023a5355
ND
193static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
194{
03f02c76
ND
195 struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
196
197 return container_of(ipcp, struct sem_array, sem_perm);
023a5355
ND
198}
199
200static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
201 int id)
202{
03f02c76
ND
203 struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
204
205 return container_of(ipcp, struct sem_array, sem_perm);
023a5355
ND
206}
207
7ca7e564
ND
208static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
209{
210 ipc_rmid(&sem_ids(ns), &s->sem_perm);
211}
212
1da177e4
LT
213/*
214 * Lockless wakeup algorithm:
215 * Without the check/retry algorithm a lockless wakeup is possible:
216 * - queue.status is initialized to -EINTR before blocking.
217 * - wakeup is performed by
218 * * unlinking the queue entry from sma->sem_pending
219 * * setting queue.status to IN_WAKEUP
220 * This is the notification for the blocked thread that a
221 * result value is imminent.
222 * * call wake_up_process
223 * * set queue.status to the final value.
224 * - the previously blocked thread checks queue.status:
225 * * if it's IN_WAKEUP, then it must wait until the value changes
226 * * if it's not -EINTR, then the operation was completed by
227 * update_queue. semtimedop can return queue.status without
5f921ae9 228 * performing any operation on the sem array.
1da177e4
LT
229 * * otherwise it must acquire the spinlock and check what's up.
230 *
231 * The two-stage algorithm is necessary to protect against the following
232 * races:
233 * - if queue.status is set after wake_up_process, then the woken up idle
234 * thread could race forward and try (and fail) to acquire sma->lock
235 * before update_queue had a chance to set queue.status
236 * - if queue.status is written before wake_up_process and if the
237 * blocked process is woken up by a signal between writing
238 * queue.status and the wake_up_process, then the woken up
239 * process could return from semtimedop and die by calling
240 * sys_exit before wake_up_process is called. Then wake_up_process
241 * will oops, because the task structure is already invalid.
242 * (yes, this happened on s390 with sysv msg).
243 *
244 */
245#define IN_WAKEUP 1
246
f4566f04
ND
247/**
248 * newary - Create a new semaphore set
249 * @ns: namespace
250 * @params: ptr to the structure that contains key, semflg and nsems
251 *
3e148c79 252 * Called with sem_ids.rw_mutex held (as a writer)
f4566f04
ND
253 */
254
7748dbfa 255static int newary(struct ipc_namespace *ns, struct ipc_params *params)
1da177e4
LT
256{
257 int id;
258 int retval;
259 struct sem_array *sma;
260 int size;
7748dbfa
ND
261 key_t key = params->key;
262 int nsems = params->u.nsems;
263 int semflg = params->flg;
1da177e4
LT
264
265 if (!nsems)
266 return -EINVAL;
e3893534 267 if (ns->used_sems + nsems > ns->sc_semmns)
1da177e4
LT
268 return -ENOSPC;
269
270 size = sizeof (*sma) + nsems * sizeof (struct sem);
271 sma = ipc_rcu_alloc(size);
272 if (!sma) {
273 return -ENOMEM;
274 }
275 memset (sma, 0, size);
276
277 sma->sem_perm.mode = (semflg & S_IRWXUGO);
278 sma->sem_perm.key = key;
279
280 sma->sem_perm.security = NULL;
281 retval = security_sem_alloc(sma);
282 if (retval) {
283 ipc_rcu_putref(sma);
284 return retval;
285 }
286
e3893534 287 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
1da177e4
LT
288 if(id == -1) {
289 security_sem_free(sma);
290 ipc_rcu_putref(sma);
291 return -ENOSPC;
292 }
e3893534 293 ns->used_sems += nsems;
1da177e4 294
7ca7e564 295 sma->sem_perm.id = sem_buildid(ns, id, sma->sem_perm.seq);
1da177e4
LT
296 sma->sem_base = (struct sem *) &sma[1];
297 /* sma->sem_pending = NULL; */
298 sma->sem_pending_last = &sma->sem_pending;
299 /* sma->undo = NULL; */
300 sma->sem_nsems = nsems;
301 sma->sem_ctime = get_seconds();
302 sem_unlock(sma);
303
7ca7e564 304 return sma->sem_perm.id;
1da177e4
LT
305}
306
7748dbfa 307
f4566f04 308/*
3e148c79 309 * Called with sem_ids.rw_mutex and ipcp locked.
f4566f04 310 */
03f02c76 311static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
7748dbfa 312{
03f02c76
ND
313 struct sem_array *sma;
314
315 sma = container_of(ipcp, struct sem_array, sem_perm);
316 return security_sem_associate(sma, semflg);
7748dbfa
ND
317}
318
f4566f04 319/*
3e148c79 320 * Called with sem_ids.rw_mutex and ipcp locked.
f4566f04 321 */
03f02c76
ND
322static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
323 struct ipc_params *params)
7748dbfa 324{
03f02c76
ND
325 struct sem_array *sma;
326
327 sma = container_of(ipcp, struct sem_array, sem_perm);
328 if (params->u.nsems > sma->sem_nsems)
7748dbfa
ND
329 return -EINVAL;
330
331 return 0;
332}
333
334asmlinkage long sys_semget(key_t key, int nsems, int semflg)
1da177e4 335{
e3893534 336 struct ipc_namespace *ns;
7748dbfa
ND
337 struct ipc_ops sem_ops;
338 struct ipc_params sem_params;
e3893534
KK
339
340 ns = current->nsproxy->ipc_ns;
1da177e4 341
e3893534 342 if (nsems < 0 || nsems > ns->sc_semmsl)
1da177e4 343 return -EINVAL;
7ca7e564 344
7748dbfa
ND
345 sem_ops.getnew = newary;
346 sem_ops.associate = sem_security;
347 sem_ops.more_checks = sem_more_checks;
348
349 sem_params.key = key;
350 sem_params.flg = semflg;
351 sem_params.u.nsems = nsems;
1da177e4 352
7748dbfa 353 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
1da177e4
LT
354}
355
356/* Manage the doubly linked list sma->sem_pending as a FIFO:
357 * insert new queue elements at the tail sma->sem_pending_last.
358 */
359static inline void append_to_queue (struct sem_array * sma,
360 struct sem_queue * q)
361{
362 *(q->prev = sma->sem_pending_last) = q;
363 *(sma->sem_pending_last = &q->next) = NULL;
364}
365
366static inline void prepend_to_queue (struct sem_array * sma,
367 struct sem_queue * q)
368{
369 q->next = sma->sem_pending;
370 *(q->prev = &sma->sem_pending) = q;
371 if (q->next)
372 q->next->prev = &q->next;
373 else /* sma->sem_pending_last == &sma->sem_pending */
374 sma->sem_pending_last = &q->next;
375}
376
377static inline void remove_from_queue (struct sem_array * sma,
378 struct sem_queue * q)
379{
380 *(q->prev) = q->next;
381 if (q->next)
382 q->next->prev = q->prev;
383 else /* sma->sem_pending_last == &q->next */
384 sma->sem_pending_last = q->prev;
385 q->prev = NULL; /* mark as removed */
386}
387
388/*
389 * Determine whether a sequence of semaphore operations would succeed
390 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
391 */
392
393static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
394 int nsops, struct sem_undo *un, int pid)
395{
396 int result, sem_op;
397 struct sembuf *sop;
398 struct sem * curr;
399
400 for (sop = sops; sop < sops + nsops; sop++) {
401 curr = sma->sem_base + sop->sem_num;
402 sem_op = sop->sem_op;
403 result = curr->semval;
404
405 if (!sem_op && result)
406 goto would_block;
407
408 result += sem_op;
409 if (result < 0)
410 goto would_block;
411 if (result > SEMVMX)
412 goto out_of_range;
413 if (sop->sem_flg & SEM_UNDO) {
414 int undo = un->semadj[sop->sem_num] - sem_op;
415 /*
416 * Exceeding the undo range is an error.
417 */
418 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
419 goto out_of_range;
420 }
421 curr->semval = result;
422 }
423
424 sop--;
425 while (sop >= sops) {
426 sma->sem_base[sop->sem_num].sempid = pid;
427 if (sop->sem_flg & SEM_UNDO)
428 un->semadj[sop->sem_num] -= sop->sem_op;
429 sop--;
430 }
431
432 sma->sem_otime = get_seconds();
433 return 0;
434
435out_of_range:
436 result = -ERANGE;
437 goto undo;
438
439would_block:
440 if (sop->sem_flg & IPC_NOWAIT)
441 result = -EAGAIN;
442 else
443 result = 1;
444
445undo:
446 sop--;
447 while (sop >= sops) {
448 sma->sem_base[sop->sem_num].semval -= sop->sem_op;
449 sop--;
450 }
451
452 return result;
453}
454
455/* Go through the pending queue for the indicated semaphore
456 * looking for tasks that can be completed.
457 */
458static void update_queue (struct sem_array * sma)
459{
460 int error;
461 struct sem_queue * q;
462
463 q = sma->sem_pending;
464 while(q) {
465 error = try_atomic_semop(sma, q->sops, q->nsops,
466 q->undo, q->pid);
467
468 /* Does q->sleeper still need to sleep? */
469 if (error <= 0) {
470 struct sem_queue *n;
471 remove_from_queue(sma,q);
472 q->status = IN_WAKEUP;
473 /*
474 * Continue scanning. The next operation
475 * that must be checked depends on the type of the
476 * completed operation:
477 * - if the operation modified the array, then
478 * restart from the head of the queue and
479 * check for threads that might be waiting
480 * for semaphore values to become 0.
481 * - if the operation didn't modify the array,
482 * then just continue.
483 */
484 if (q->alter)
485 n = sma->sem_pending;
486 else
487 n = q->next;
488 wake_up_process(q->sleeper);
489 /* hands-off: q will disappear immediately after
490 * writing q->status.
491 */
1224b375 492 smp_wmb();
1da177e4
LT
493 q->status = error;
494 q = n;
495 } else {
496 q = q->next;
497 }
498 }
499}
500
501/* The following counts are associated to each semaphore:
502 * semncnt number of tasks waiting on semval being nonzero
503 * semzcnt number of tasks waiting on semval being zero
504 * This model assumes that a task waits on exactly one semaphore.
505 * Since semaphore operations are to be performed atomically, tasks actually
506 * wait on a whole sequence of semaphores simultaneously.
507 * The counts we return here are a rough approximation, but still
508 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
509 */
510static int count_semncnt (struct sem_array * sma, ushort semnum)
511{
512 int semncnt;
513 struct sem_queue * q;
514
515 semncnt = 0;
516 for (q = sma->sem_pending; q; q = q->next) {
517 struct sembuf * sops = q->sops;
518 int nsops = q->nsops;
519 int i;
520 for (i = 0; i < nsops; i++)
521 if (sops[i].sem_num == semnum
522 && (sops[i].sem_op < 0)
523 && !(sops[i].sem_flg & IPC_NOWAIT))
524 semncnt++;
525 }
526 return semncnt;
527}
528static int count_semzcnt (struct sem_array * sma, ushort semnum)
529{
530 int semzcnt;
531 struct sem_queue * q;
532
533 semzcnt = 0;
534 for (q = sma->sem_pending; q; q = q->next) {
535 struct sembuf * sops = q->sops;
536 int nsops = q->nsops;
537 int i;
538 for (i = 0; i < nsops; i++)
539 if (sops[i].sem_num == semnum
540 && (sops[i].sem_op == 0)
541 && !(sops[i].sem_flg & IPC_NOWAIT))
542 semzcnt++;
543 }
544 return semzcnt;
545}
546
3e148c79
ND
547/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
548 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
549 * remains locked on exit.
1da177e4 550 */
7ca7e564 551static void freeary(struct ipc_namespace *ns, struct sem_array *sma)
1da177e4
LT
552{
553 struct sem_undo *un;
554 struct sem_queue *q;
1da177e4
LT
555
556 /* Invalidate the existing undo structures for this semaphore set.
557 * (They will be freed without any further action in exit_sem()
558 * or during the next semop.)
559 */
560 for (un = sma->undo; un; un = un->id_next)
561 un->semid = -1;
562
563 /* Wake up all pending processes and let them fail with EIDRM. */
564 q = sma->sem_pending;
565 while(q) {
566 struct sem_queue *n;
567 /* lazy remove_from_queue: we are killing the whole queue */
568 q->prev = NULL;
569 n = q->next;
570 q->status = IN_WAKEUP;
571 wake_up_process(q->sleeper); /* doesn't sleep */
6003a93e 572 smp_wmb();
1da177e4
LT
573 q->status = -EIDRM; /* hands-off q */
574 q = n;
575 }
576
7ca7e564
ND
577 /* Remove the semaphore set from the IDR */
578 sem_rmid(ns, sma);
1da177e4
LT
579 sem_unlock(sma);
580
e3893534 581 ns->used_sems -= sma->sem_nsems;
1da177e4
LT
582 security_sem_free(sma);
583 ipc_rcu_putref(sma);
584}
585
586static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
587{
588 switch(version) {
589 case IPC_64:
590 return copy_to_user(buf, in, sizeof(*in));
591 case IPC_OLD:
592 {
593 struct semid_ds out;
594
595 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
596
597 out.sem_otime = in->sem_otime;
598 out.sem_ctime = in->sem_ctime;
599 out.sem_nsems = in->sem_nsems;
600
601 return copy_to_user(buf, &out, sizeof(out));
602 }
603 default:
604 return -EINVAL;
605 }
606}
607
e3893534
KK
608static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum,
609 int cmd, int version, union semun arg)
1da177e4
LT
610{
611 int err = -EINVAL;
612 struct sem_array *sma;
613
614 switch(cmd) {
615 case IPC_INFO:
616 case SEM_INFO:
617 {
618 struct seminfo seminfo;
619 int max_id;
620
621 err = security_sem_semctl(NULL, cmd);
622 if (err)
623 return err;
624
625 memset(&seminfo,0,sizeof(seminfo));
e3893534
KK
626 seminfo.semmni = ns->sc_semmni;
627 seminfo.semmns = ns->sc_semmns;
628 seminfo.semmsl = ns->sc_semmsl;
629 seminfo.semopm = ns->sc_semopm;
1da177e4
LT
630 seminfo.semvmx = SEMVMX;
631 seminfo.semmnu = SEMMNU;
632 seminfo.semmap = SEMMAP;
633 seminfo.semume = SEMUME;
3e148c79 634 down_read(&sem_ids(ns).rw_mutex);
1da177e4 635 if (cmd == SEM_INFO) {
e3893534
KK
636 seminfo.semusz = sem_ids(ns).in_use;
637 seminfo.semaem = ns->used_sems;
1da177e4
LT
638 } else {
639 seminfo.semusz = SEMUSZ;
640 seminfo.semaem = SEMAEM;
641 }
7ca7e564 642 max_id = ipc_get_maxid(&sem_ids(ns));
3e148c79 643 up_read(&sem_ids(ns).rw_mutex);
1da177e4
LT
644 if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
645 return -EFAULT;
646 return (max_id < 0) ? 0: max_id;
647 }
648 case SEM_STAT:
649 {
650 struct semid64_ds tbuf;
651 int id;
652
e3893534 653 sma = sem_lock(ns, semid);
023a5355
ND
654 if (IS_ERR(sma))
655 return PTR_ERR(sma);
1da177e4
LT
656
657 err = -EACCES;
658 if (ipcperms (&sma->sem_perm, S_IRUGO))
659 goto out_unlock;
660
661 err = security_sem_semctl(sma, cmd);
662 if (err)
663 goto out_unlock;
664
7ca7e564 665 id = sma->sem_perm.id;
1da177e4 666
023a5355
ND
667 memset(&tbuf, 0, sizeof(tbuf));
668
1da177e4
LT
669 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
670 tbuf.sem_otime = sma->sem_otime;
671 tbuf.sem_ctime = sma->sem_ctime;
672 tbuf.sem_nsems = sma->sem_nsems;
673 sem_unlock(sma);
674 if (copy_semid_to_user (arg.buf, &tbuf, version))
675 return -EFAULT;
676 return id;
677 }
678 default:
679 return -EINVAL;
680 }
681 return err;
682out_unlock:
683 sem_unlock(sma);
684 return err;
685}
686
e3893534
KK
687static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
688 int cmd, int version, union semun arg)
1da177e4
LT
689{
690 struct sem_array *sma;
691 struct sem* curr;
692 int err;
693 ushort fast_sem_io[SEMMSL_FAST];
694 ushort* sem_io = fast_sem_io;
695 int nsems;
696
023a5355
ND
697 sma = sem_lock_check(ns, semid);
698 if (IS_ERR(sma))
699 return PTR_ERR(sma);
1da177e4
LT
700
701 nsems = sma->sem_nsems;
702
1da177e4
LT
703 err = -EACCES;
704 if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO))
705 goto out_unlock;
706
707 err = security_sem_semctl(sma, cmd);
708 if (err)
709 goto out_unlock;
710
711 err = -EACCES;
712 switch (cmd) {
713 case GETALL:
714 {
715 ushort __user *array = arg.array;
716 int i;
717
718 if(nsems > SEMMSL_FAST) {
719 ipc_rcu_getref(sma);
720 sem_unlock(sma);
721
722 sem_io = ipc_alloc(sizeof(ushort)*nsems);
723 if(sem_io == NULL) {
724 ipc_lock_by_ptr(&sma->sem_perm);
725 ipc_rcu_putref(sma);
726 sem_unlock(sma);
727 return -ENOMEM;
728 }
729
730 ipc_lock_by_ptr(&sma->sem_perm);
731 ipc_rcu_putref(sma);
732 if (sma->sem_perm.deleted) {
733 sem_unlock(sma);
734 err = -EIDRM;
735 goto out_free;
736 }
737 }
738
739 for (i = 0; i < sma->sem_nsems; i++)
740 sem_io[i] = sma->sem_base[i].semval;
741 sem_unlock(sma);
742 err = 0;
743 if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
744 err = -EFAULT;
745 goto out_free;
746 }
747 case SETALL:
748 {
749 int i;
750 struct sem_undo *un;
751
752 ipc_rcu_getref(sma);
753 sem_unlock(sma);
754
755 if(nsems > SEMMSL_FAST) {
756 sem_io = ipc_alloc(sizeof(ushort)*nsems);
757 if(sem_io == NULL) {
758 ipc_lock_by_ptr(&sma->sem_perm);
759 ipc_rcu_putref(sma);
760 sem_unlock(sma);
761 return -ENOMEM;
762 }
763 }
764
765 if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
766 ipc_lock_by_ptr(&sma->sem_perm);
767 ipc_rcu_putref(sma);
768 sem_unlock(sma);
769 err = -EFAULT;
770 goto out_free;
771 }
772
773 for (i = 0; i < nsems; i++) {
774 if (sem_io[i] > SEMVMX) {
775 ipc_lock_by_ptr(&sma->sem_perm);
776 ipc_rcu_putref(sma);
777 sem_unlock(sma);
778 err = -ERANGE;
779 goto out_free;
780 }
781 }
782 ipc_lock_by_ptr(&sma->sem_perm);
783 ipc_rcu_putref(sma);
784 if (sma->sem_perm.deleted) {
785 sem_unlock(sma);
786 err = -EIDRM;
787 goto out_free;
788 }
789
790 for (i = 0; i < nsems; i++)
791 sma->sem_base[i].semval = sem_io[i];
792 for (un = sma->undo; un; un = un->id_next)
793 for (i = 0; i < nsems; i++)
794 un->semadj[i] = 0;
795 sma->sem_ctime = get_seconds();
796 /* maybe some queued-up processes were waiting for this */
797 update_queue(sma);
798 err = 0;
799 goto out_unlock;
800 }
801 case IPC_STAT:
802 {
803 struct semid64_ds tbuf;
804 memset(&tbuf,0,sizeof(tbuf));
805 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
806 tbuf.sem_otime = sma->sem_otime;
807 tbuf.sem_ctime = sma->sem_ctime;
808 tbuf.sem_nsems = sma->sem_nsems;
809 sem_unlock(sma);
810 if (copy_semid_to_user (arg.buf, &tbuf, version))
811 return -EFAULT;
812 return 0;
813 }
814 /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
815 }
816 err = -EINVAL;
817 if(semnum < 0 || semnum >= nsems)
818 goto out_unlock;
819
820 curr = &sma->sem_base[semnum];
821
822 switch (cmd) {
823 case GETVAL:
824 err = curr->semval;
825 goto out_unlock;
826 case GETPID:
827 err = curr->sempid;
828 goto out_unlock;
829 case GETNCNT:
830 err = count_semncnt(sma,semnum);
831 goto out_unlock;
832 case GETZCNT:
833 err = count_semzcnt(sma,semnum);
834 goto out_unlock;
835 case SETVAL:
836 {
837 int val = arg.val;
838 struct sem_undo *un;
839 err = -ERANGE;
840 if (val > SEMVMX || val < 0)
841 goto out_unlock;
842
843 for (un = sma->undo; un; un = un->id_next)
844 un->semadj[semnum] = 0;
845 curr->semval = val;
b488893a 846 curr->sempid = task_tgid_vnr(current);
1da177e4
LT
847 sma->sem_ctime = get_seconds();
848 /* maybe some queued-up processes were waiting for this */
849 update_queue(sma);
850 err = 0;
851 goto out_unlock;
852 }
853 }
854out_unlock:
855 sem_unlock(sma);
856out_free:
857 if(sem_io != fast_sem_io)
858 ipc_free(sem_io, sizeof(ushort)*nsems);
859 return err;
860}
861
862struct sem_setbuf {
863 uid_t uid;
864 gid_t gid;
865 mode_t mode;
866};
867
868static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __user *buf, int version)
869{
870 switch(version) {
871 case IPC_64:
872 {
873 struct semid64_ds tbuf;
874
875 if(copy_from_user(&tbuf, buf, sizeof(tbuf)))
876 return -EFAULT;
877
878 out->uid = tbuf.sem_perm.uid;
879 out->gid = tbuf.sem_perm.gid;
880 out->mode = tbuf.sem_perm.mode;
881
882 return 0;
883 }
884 case IPC_OLD:
885 {
886 struct semid_ds tbuf_old;
887
888 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
889 return -EFAULT;
890
891 out->uid = tbuf_old.sem_perm.uid;
892 out->gid = tbuf_old.sem_perm.gid;
893 out->mode = tbuf_old.sem_perm.mode;
894
895 return 0;
896 }
897 default:
898 return -EINVAL;
899 }
900}
901
e3893534
KK
902static int semctl_down(struct ipc_namespace *ns, int semid, int semnum,
903 int cmd, int version, union semun arg)
1da177e4
LT
904{
905 struct sem_array *sma;
906 int err;
8e1c091c 907 struct sem_setbuf uninitialized_var(setbuf);
1da177e4
LT
908 struct kern_ipc_perm *ipcp;
909
910 if(cmd == IPC_SET) {
911 if(copy_semid_from_user (&setbuf, arg.buf, version))
912 return -EFAULT;
1da177e4 913 }
3e148c79 914 sma = sem_lock_check_down(ns, semid);
023a5355
ND
915 if (IS_ERR(sma))
916 return PTR_ERR(sma);
1da177e4 917
1da177e4 918 ipcp = &sma->sem_perm;
073115d6
SG
919
920 err = audit_ipc_obj(ipcp);
921 if (err)
922 goto out_unlock;
923
ac03221a
LK
924 if (cmd == IPC_SET) {
925 err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
926 if (err)
927 goto out_unlock;
928 }
1da177e4
LT
929 if (current->euid != ipcp->cuid &&
930 current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) {
931 err=-EPERM;
932 goto out_unlock;
933 }
934
935 err = security_sem_semctl(sma, cmd);
936 if (err)
937 goto out_unlock;
938
939 switch(cmd){
940 case IPC_RMID:
7ca7e564 941 freeary(ns, sma);
1da177e4
LT
942 err = 0;
943 break;
944 case IPC_SET:
945 ipcp->uid = setbuf.uid;
946 ipcp->gid = setbuf.gid;
947 ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
948 | (setbuf.mode & S_IRWXUGO);
949 sma->sem_ctime = get_seconds();
950 sem_unlock(sma);
951 err = 0;
952 break;
953 default:
954 sem_unlock(sma);
955 err = -EINVAL;
956 break;
957 }
958 return err;
959
960out_unlock:
961 sem_unlock(sma);
962 return err;
963}
964
965asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
966{
967 int err = -EINVAL;
968 int version;
e3893534 969 struct ipc_namespace *ns;
1da177e4
LT
970
971 if (semid < 0)
972 return -EINVAL;
973
974 version = ipc_parse_version(&cmd);
e3893534 975 ns = current->nsproxy->ipc_ns;
1da177e4
LT
976
977 switch(cmd) {
978 case IPC_INFO:
979 case SEM_INFO:
980 case SEM_STAT:
e3893534 981 err = semctl_nolock(ns,semid,semnum,cmd,version,arg);
1da177e4
LT
982 return err;
983 case GETALL:
984 case GETVAL:
985 case GETPID:
986 case GETNCNT:
987 case GETZCNT:
988 case IPC_STAT:
989 case SETVAL:
990 case SETALL:
e3893534 991 err = semctl_main(ns,semid,semnum,cmd,version,arg);
1da177e4
LT
992 return err;
993 case IPC_RMID:
994 case IPC_SET:
3e148c79 995 down_write(&sem_ids(ns).rw_mutex);
e3893534 996 err = semctl_down(ns,semid,semnum,cmd,version,arg);
3e148c79 997 up_write(&sem_ids(ns).rw_mutex);
1da177e4
LT
998 return err;
999 default:
1000 return -EINVAL;
1001 }
1002}
1003
1004static inline void lock_semundo(void)
1005{
1006 struct sem_undo_list *undo_list;
1007
1008 undo_list = current->sysvsem.undo_list;
00a5dfdb 1009 if (undo_list)
1da177e4
LT
1010 spin_lock(&undo_list->lock);
1011}
1012
1013/* This code has an interaction with copy_semundo().
1014 * Consider; two tasks are sharing the undo_list. task1
1015 * acquires the undo_list lock in lock_semundo(). If task2 now
1016 * exits before task1 releases the lock (by calling
1017 * unlock_semundo()), then task1 will never call spin_unlock().
1018 * This leave the sem_undo_list in a locked state. If task1 now creats task3
1019 * and once again shares the sem_undo_list, the sem_undo_list will still be
1020 * locked, and future SEM_UNDO operations will deadlock. This case is
1021 * dealt with in copy_semundo() by having it reinitialize the spin lock when
1022 * the refcnt goes from 1 to 2.
1023 */
1024static inline void unlock_semundo(void)
1025{
1026 struct sem_undo_list *undo_list;
1027
1028 undo_list = current->sysvsem.undo_list;
00a5dfdb 1029 if (undo_list)
1da177e4
LT
1030 spin_unlock(&undo_list->lock);
1031}
1032
1033
1034/* If the task doesn't already have a undo_list, then allocate one
1035 * here. We guarantee there is only one thread using this undo list,
1036 * and current is THE ONE
1037 *
1038 * If this allocation and assignment succeeds, but later
1039 * portions of this code fail, there is no need to free the sem_undo_list.
1040 * Just let it stay associated with the task, and it'll be freed later
1041 * at exit time.
1042 *
1043 * This can block, so callers must hold no locks.
1044 */
1045static inline int get_undo_list(struct sem_undo_list **undo_listp)
1046{
1047 struct sem_undo_list *undo_list;
1da177e4
LT
1048
1049 undo_list = current->sysvsem.undo_list;
1050 if (!undo_list) {
2453a306 1051 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1da177e4
LT
1052 if (undo_list == NULL)
1053 return -ENOMEM;
00a5dfdb 1054 spin_lock_init(&undo_list->lock);
1da177e4
LT
1055 atomic_set(&undo_list->refcnt, 1);
1056 current->sysvsem.undo_list = undo_list;
1057 }
1058 *undo_listp = undo_list;
1059 return 0;
1060}
1061
1062static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1063{
1064 struct sem_undo **last, *un;
1065
1066 last = &ulp->proc_list;
1067 un = *last;
1068 while(un != NULL) {
1069 if(un->semid==semid)
1070 break;
1071 if(un->semid==-1) {
1072 *last=un->proc_next;
1073 kfree(un);
1074 } else {
1075 last=&un->proc_next;
1076 }
1077 un=*last;
1078 }
1079 return un;
1080}
1081
e3893534 1082static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
1da177e4
LT
1083{
1084 struct sem_array *sma;
1085 struct sem_undo_list *ulp;
1086 struct sem_undo *un, *new;
1087 int nsems;
1088 int error;
1089
1090 error = get_undo_list(&ulp);
1091 if (error)
1092 return ERR_PTR(error);
1093
1094 lock_semundo();
1095 un = lookup_undo(ulp, semid);
1096 unlock_semundo();
1097 if (likely(un!=NULL))
1098 goto out;
1099
1100 /* no undo structure around - allocate one. */
023a5355
ND
1101 sma = sem_lock_check(ns, semid);
1102 if (IS_ERR(sma))
1103 return ERR_PTR(PTR_ERR(sma));
1104
1da177e4
LT
1105 nsems = sma->sem_nsems;
1106 ipc_rcu_getref(sma);
1107 sem_unlock(sma);
1108
4668edc3 1109 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1da177e4
LT
1110 if (!new) {
1111 ipc_lock_by_ptr(&sma->sem_perm);
1112 ipc_rcu_putref(sma);
1113 sem_unlock(sma);
1114 return ERR_PTR(-ENOMEM);
1115 }
1da177e4
LT
1116 new->semadj = (short *) &new[1];
1117 new->semid = semid;
1118
1119 lock_semundo();
1120 un = lookup_undo(ulp, semid);
1121 if (un) {
1122 unlock_semundo();
1123 kfree(new);
1124 ipc_lock_by_ptr(&sma->sem_perm);
1125 ipc_rcu_putref(sma);
1126 sem_unlock(sma);
1127 goto out;
1128 }
1129 ipc_lock_by_ptr(&sma->sem_perm);
1130 ipc_rcu_putref(sma);
1131 if (sma->sem_perm.deleted) {
1132 sem_unlock(sma);
1133 unlock_semundo();
1134 kfree(new);
1135 un = ERR_PTR(-EIDRM);
1136 goto out;
1137 }
1138 new->proc_next = ulp->proc_list;
1139 ulp->proc_list = new;
1140 new->id_next = sma->undo;
1141 sma->undo = new;
1142 sem_unlock(sma);
1143 un = new;
1144 unlock_semundo();
1145out:
1146 return un;
1147}
1148
1149asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
1150 unsigned nsops, const struct timespec __user *timeout)
1151{
1152 int error = -EINVAL;
1153 struct sem_array *sma;
1154 struct sembuf fast_sops[SEMOPM_FAST];
1155 struct sembuf* sops = fast_sops, *sop;
1156 struct sem_undo *un;
b78755ab 1157 int undos = 0, alter = 0, max;
1da177e4
LT
1158 struct sem_queue queue;
1159 unsigned long jiffies_left = 0;
e3893534
KK
1160 struct ipc_namespace *ns;
1161
1162 ns = current->nsproxy->ipc_ns;
1da177e4
LT
1163
1164 if (nsops < 1 || semid < 0)
1165 return -EINVAL;
e3893534 1166 if (nsops > ns->sc_semopm)
1da177e4
LT
1167 return -E2BIG;
1168 if(nsops > SEMOPM_FAST) {
1169 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1170 if(sops==NULL)
1171 return -ENOMEM;
1172 }
1173 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1174 error=-EFAULT;
1175 goto out_free;
1176 }
1177 if (timeout) {
1178 struct timespec _timeout;
1179 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1180 error = -EFAULT;
1181 goto out_free;
1182 }
1183 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1184 _timeout.tv_nsec >= 1000000000L) {
1185 error = -EINVAL;
1186 goto out_free;
1187 }
1188 jiffies_left = timespec_to_jiffies(&_timeout);
1189 }
1190 max = 0;
1191 for (sop = sops; sop < sops + nsops; sop++) {
1192 if (sop->sem_num >= max)
1193 max = sop->sem_num;
1194 if (sop->sem_flg & SEM_UNDO)
b78755ab
MS
1195 undos = 1;
1196 if (sop->sem_op != 0)
1da177e4
LT
1197 alter = 1;
1198 }
1da177e4
LT
1199
1200retry_undos:
1201 if (undos) {
e3893534 1202 un = find_undo(ns, semid);
1da177e4
LT
1203 if (IS_ERR(un)) {
1204 error = PTR_ERR(un);
1205 goto out_free;
1206 }
1207 } else
1208 un = NULL;
1209
023a5355
ND
1210 sma = sem_lock_check(ns, semid);
1211 if (IS_ERR(sma)) {
1212 error = PTR_ERR(sma);
1da177e4 1213 goto out_free;
023a5355
ND
1214 }
1215
1da177e4 1216 /*
023a5355 1217 * semid identifiers are not unique - find_undo may have
1da177e4
LT
1218 * allocated an undo structure, it was invalidated by an RMID
1219 * and now a new array with received the same id. Check and retry.
1220 */
1221 if (un && un->semid == -1) {
1222 sem_unlock(sma);
1223 goto retry_undos;
1224 }
1225 error = -EFBIG;
1226 if (max >= sma->sem_nsems)
1227 goto out_unlock_free;
1228
1229 error = -EACCES;
1230 if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1231 goto out_unlock_free;
1232
1233 error = security_sem_semop(sma, sops, nsops, alter);
1234 if (error)
1235 goto out_unlock_free;
1236
b488893a 1237 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1da177e4
LT
1238 if (error <= 0) {
1239 if (alter && error == 0)
1240 update_queue (sma);
1241 goto out_unlock_free;
1242 }
1243
1244 /* We need to sleep on this operation, so we put the current
1245 * task into the pending queue and go to sleep.
1246 */
1247
1248 queue.sma = sma;
1249 queue.sops = sops;
1250 queue.nsops = nsops;
1251 queue.undo = un;
b488893a 1252 queue.pid = task_tgid_vnr(current);
1da177e4
LT
1253 queue.id = semid;
1254 queue.alter = alter;
1255 if (alter)
1256 append_to_queue(sma ,&queue);
1257 else
1258 prepend_to_queue(sma ,&queue);
1259
1260 queue.status = -EINTR;
1261 queue.sleeper = current;
1262 current->state = TASK_INTERRUPTIBLE;
1263 sem_unlock(sma);
1264
1265 if (timeout)
1266 jiffies_left = schedule_timeout(jiffies_left);
1267 else
1268 schedule();
1269
1270 error = queue.status;
1271 while(unlikely(error == IN_WAKEUP)) {
1272 cpu_relax();
1273 error = queue.status;
1274 }
1275
1276 if (error != -EINTR) {
1277 /* fast path: update_queue already obtained all requested
1278 * resources */
1279 goto out_free;
1280 }
1281
e3893534 1282 sma = sem_lock(ns, semid);
023a5355 1283 if (IS_ERR(sma)) {
27315c96 1284 BUG_ON(queue.prev != NULL);
1da177e4
LT
1285 error = -EIDRM;
1286 goto out_free;
1287 }
1288
1289 /*
1290 * If queue.status != -EINTR we are woken up by another process
1291 */
1292 error = queue.status;
1293 if (error != -EINTR) {
1294 goto out_unlock_free;
1295 }
1296
1297 /*
1298 * If an interrupt occurred we have to clean up the queue
1299 */
1300 if (timeout && jiffies_left == 0)
1301 error = -EAGAIN;
1302 remove_from_queue(sma,&queue);
1303 goto out_unlock_free;
1304
1305out_unlock_free:
1306 sem_unlock(sma);
1307out_free:
1308 if(sops != fast_sops)
1309 kfree(sops);
1310 return error;
1311}
1312
1313asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsops)
1314{
1315 return sys_semtimedop(semid, tsops, nsops, NULL);
1316}
1317
1318/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1319 * parent and child tasks.
1320 *
1321 * See the notes above unlock_semundo() regarding the spin_lock_init()
1322 * in this code. Initialize the undo_list->lock here instead of get_undo_list()
1323 * because of the reasoning in the comment above unlock_semundo.
1324 */
1325
1326int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1327{
1328 struct sem_undo_list *undo_list;
1329 int error;
1330
1331 if (clone_flags & CLONE_SYSVSEM) {
1332 error = get_undo_list(&undo_list);
1333 if (error)
1334 return error;
1da177e4
LT
1335 atomic_inc(&undo_list->refcnt);
1336 tsk->sysvsem.undo_list = undo_list;
1337 } else
1338 tsk->sysvsem.undo_list = NULL;
1339
1340 return 0;
1341}
1342
1343/*
1344 * add semadj values to semaphores, free undo structures.
1345 * undo structures are not freed when semaphore arrays are destroyed
1346 * so some of them may be out of date.
1347 * IMPLEMENTATION NOTE: There is some confusion over whether the
1348 * set of adjustments that needs to be done should be done in an atomic
1349 * manner or not. That is, if we are attempting to decrement the semval
1350 * should we queue up and wait until we can do so legally?
1351 * The original implementation attempted to do this (queue and wait).
1352 * The current implementation does not do so. The POSIX standard
1353 * and SVID should be consulted to determine what behavior is mandated.
1354 */
1355void exit_sem(struct task_struct *tsk)
1356{
1357 struct sem_undo_list *undo_list;
1358 struct sem_undo *u, **up;
e3893534 1359 struct ipc_namespace *ns;
1da177e4
LT
1360
1361 undo_list = tsk->sysvsem.undo_list;
1362 if (!undo_list)
1363 return;
1364
1365 if (!atomic_dec_and_test(&undo_list->refcnt))
1366 return;
1367
e3893534 1368 ns = tsk->nsproxy->ipc_ns;
1da177e4
LT
1369 /* There's no need to hold the semundo list lock, as current
1370 * is the last task exiting for this undo list.
1371 */
1372 for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) {
1373 struct sem_array *sma;
1374 int nsems, i;
1375 struct sem_undo *un, **unp;
1376 int semid;
1377
1378 semid = u->semid;
1379
1380 if(semid == -1)
1381 continue;
e3893534 1382 sma = sem_lock(ns, semid);
023a5355 1383 if (IS_ERR(sma))
1da177e4
LT
1384 continue;
1385
1386 if (u->semid == -1)
1387 goto next_entry;
1388
e3893534 1389 BUG_ON(sem_checkid(ns,sma,u->semid));
1da177e4
LT
1390
1391 /* remove u from the sma->undo list */
1392 for (unp = &sma->undo; (un = *unp); unp = &un->id_next) {
1393 if (u == un)
1394 goto found;
1395 }
1396 printk ("exit_sem undo list error id=%d\n", u->semid);
1397 goto next_entry;
1398found:
1399 *unp = un->id_next;
1400 /* perform adjustments registered in u */
1401 nsems = sma->sem_nsems;
1402 for (i = 0; i < nsems; i++) {
5f921ae9 1403 struct sem * semaphore = &sma->sem_base[i];
1da177e4 1404 if (u->semadj[i]) {
5f921ae9 1405 semaphore->semval += u->semadj[i];
1da177e4
LT
1406 /*
1407 * Range checks of the new semaphore value,
1408 * not defined by sus:
1409 * - Some unices ignore the undo entirely
1410 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
1411 * - some cap the value (e.g. FreeBSD caps
1412 * at 0, but doesn't enforce SEMVMX)
1413 *
1414 * Linux caps the semaphore value, both at 0
1415 * and at SEMVMX.
1416 *
1417 * Manfred <manfred@colorfullife.com>
1418 */
5f921ae9
IM
1419 if (semaphore->semval < 0)
1420 semaphore->semval = 0;
1421 if (semaphore->semval > SEMVMX)
1422 semaphore->semval = SEMVMX;
b488893a 1423 semaphore->sempid = task_tgid_vnr(current);
1da177e4
LT
1424 }
1425 }
1426 sma->sem_otime = get_seconds();
1427 /* maybe some queued-up processes were waiting for this */
1428 update_queue(sma);
1429next_entry:
1430 sem_unlock(sma);
1431 }
1432 kfree(undo_list);
1433}
1434
1435#ifdef CONFIG_PROC_FS
19b4946c 1436static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1da177e4 1437{
19b4946c
MW
1438 struct sem_array *sma = it;
1439
1440 return seq_printf(s,
1441 "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
1442 sma->sem_perm.key,
7ca7e564 1443 sma->sem_perm.id,
19b4946c
MW
1444 sma->sem_perm.mode,
1445 sma->sem_nsems,
1446 sma->sem_perm.uid,
1447 sma->sem_perm.gid,
1448 sma->sem_perm.cuid,
1449 sma->sem_perm.cgid,
1450 sma->sem_otime,
1451 sma->sem_ctime);
1da177e4
LT
1452}
1453#endif