]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - ipc/sem.c
Merge tag 'xfs-5.13-fixes-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[mirror_ubuntu-jammy-kernel.git] / ipc / sem.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/ipc/sem.c
4 * Copyright (C) 1992 Krishna Balasubramanian
5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 *
1da177e4
LT
7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
8 *
9 * SMP-threaded, sysctl's added
624dffcb 10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
1da177e4 11 * Enforced range limit on SEM_UNDO
046c6884 12 * (c) 2001 Red Hat Inc
1da177e4
LT
13 * Lockless wakeup
14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
9ae949fa 15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
c5cf6359
MS
16 * Further wakeup optimizations, documentation
17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
073115d6
SG
18 *
19 * support for audit of ipc object properties and permission changes
20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
e3893534
KK
21 *
22 * namespaces support
23 * OpenVZ, SWsoft Inc.
24 * Pavel Emelianov <xemul@openvz.org>
c5cf6359
MS
25 *
26 * Implementation notes: (May 2010)
27 * This file implements System V semaphores.
28 *
29 * User space visible behavior:
30 * - FIFO ordering for semop() operations (just FIFO, not starvation
31 * protection)
32 * - multiple semaphore operations that alter the same semaphore in
33 * one semop() are handled.
34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
35 * SETALL calls.
36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
37 * - undo adjustments at process exit are limited to 0..SEMVMX.
38 * - namespace are supported.
b1989a3d 39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtime by writing
c5cf6359
MS
40 * to /proc/sys/kernel/sem.
41 * - statistics about the usage are reported in /proc/sysvipc/sem.
42 *
43 * Internals:
44 * - scalability:
45 * - all global variables are read-mostly.
46 * - semop() calls and semctl(RMID) are synchronized by RCU.
47 * - most operations do write operations (actually: spin_lock calls) to
48 * the per-semaphore array structure.
49 * Thus: Perfect SMP scaling between independent semaphore arrays.
50 * If multiple semaphores in one array are used, then cache line
51 * trashing on the semaphore array spinlock will limit the scaling.
2f2ed41d 52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
c5cf6359
MS
53 * - the task that performs a successful semop() scans the list of all
54 * sleeping tasks and completes any pending operations that can be fulfilled.
55 * Semaphores are actively given to waiting tasks (necessary for FIFO).
56 * (see update_queue())
57 * - To improve the scalability, the actual wake-up calls are performed after
9ae949fa 58 * dropping all locks. (see wake_up_sem_queue_prepare())
c5cf6359
MS
59 * - All work is done by the waker, the woken up task does not have to do
60 * anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
62 * have been destroyed already by a semctl(RMID).
c5cf6359
MS
63 * - UNDO values are stored in an array (one per process and per
64 * semaphore array, lazily allocated). For backwards compatibility, multiple
65 * modes for the UNDO variables are supported (per process, per thread)
66 * (see copy_semundo, CLONE_SYSVSEM)
67 * - There are two lists of the pending operations: a per-array list
68 * and per-semaphore list (stored in the array). This allows to achieve FIFO
69 * ordering without always scanning all pending operations.
70 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
1da177e4
LT
71 */
72
b0d17578 73#include <linux/compat.h>
1da177e4
LT
74#include <linux/slab.h>
75#include <linux/spinlock.h>
76#include <linux/init.h>
77#include <linux/proc_fs.h>
78#include <linux/time.h>
1da177e4
LT
79#include <linux/security.h>
80#include <linux/syscalls.h>
81#include <linux/audit.h>
c59ede7b 82#include <linux/capability.h>
19b4946c 83#include <linux/seq_file.h>
3e148c79 84#include <linux/rwsem.h>
e3893534 85#include <linux/nsproxy.h>
ae5e1b22 86#include <linux/ipc_namespace.h>
84f001e1 87#include <linux/sched/wake_q.h>
ec67aaa4 88#include <linux/nospec.h>
0eb71a9d 89#include <linux/rhashtable.h>
5f921ae9 90
7153e402 91#include <linux/uaccess.h>
1da177e4
LT
92#include "util.h"
93
1a5c1349
EB
94/* One semaphore structure for each semaphore in the system. */
95struct sem {
96 int semval; /* current value */
97 /*
98 * PID of the process that last modified the semaphore. For
99 * Linux, specifically these are:
100 * - semop
101 * - semctl, via SETVAL and SETALL.
102 * - at task exit when performing undo adjustments (see exit_sem).
103 */
51d6f263 104 struct pid *sempid;
1a5c1349
EB
105 spinlock_t lock; /* spinlock for fine-grained semtimedop */
106 struct list_head pending_alter; /* pending single-sop operations */
107 /* that alter the semaphore */
108 struct list_head pending_const; /* pending single-sop operations */
109 /* that do not alter the semaphore*/
2a70b787 110 time64_t sem_otime; /* candidate for sem_otime */
1a5c1349
EB
111} ____cacheline_aligned_in_smp;
112
113/* One sem_array data structure for each set of semaphores in the system. */
114struct sem_array {
115 struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
116 time64_t sem_ctime; /* create/last semctl() time */
117 struct list_head pending_alter; /* pending operations */
118 /* that alter the array */
119 struct list_head pending_const; /* pending complex operations */
120 /* that do not alter semvals */
121 struct list_head list_id; /* undo requests on this array */
122 int sem_nsems; /* no. of semaphores in array */
123 int complex_count; /* pending complex operations */
124 unsigned int use_global_lock;/* >0: global lock required */
125
126 struct sem sems[];
127} __randomize_layout;
e57940d7
MS
128
129/* One queue for each sleeping process in the system. */
130struct sem_queue {
e57940d7
MS
131 struct list_head list; /* queue of pending operations */
132 struct task_struct *sleeper; /* this process */
133 struct sem_undo *undo; /* undo structure */
51d6f263 134 struct pid *pid; /* process id of requesting process */
e57940d7
MS
135 int status; /* completion status of operation */
136 struct sembuf *sops; /* array of pending operations */
ed247b7c 137 struct sembuf *blocking; /* the operation that blocked */
e57940d7 138 int nsops; /* number of operations */
4ce33ec2
DB
139 bool alter; /* does *sops alter the array? */
140 bool dupsop; /* sops on more than one sem_num */
e57940d7
MS
141};
142
143/* Each task has a list of undo requests. They are executed automatically
144 * when the process exits.
145 */
146struct sem_undo {
147 struct list_head list_proc; /* per-process list: *
148 * all undos from one process
149 * rcu protected */
150 struct rcu_head rcu; /* rcu struct for sem_undo */
151 struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
152 struct list_head list_id; /* per semaphore array list:
153 * all undos for one array */
154 int semid; /* semaphore set identifier */
155 short *semadj; /* array of adjustments */
156 /* one per semaphore */
157};
158
159/* sem_undo_list controls shared access to the list of sem_undo structures
160 * that may be shared among all a CLONE_SYSVSEM task group.
161 */
162struct sem_undo_list {
f74370b8 163 refcount_t refcnt;
e57940d7
MS
164 spinlock_t lock;
165 struct list_head list_proc;
166};
167
168
ed2ddbf8 169#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
e3893534 170
7748dbfa 171static int newary(struct ipc_namespace *, struct ipc_params *);
01b8b07a 172static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
1da177e4 173#ifdef CONFIG_PROC_FS
19b4946c 174static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
1da177e4
LT
175#endif
176
177#define SEMMSL_FAST 256 /* 512 bytes on stack */
178#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
179
9de5ab8a
MS
180/*
181 * Switching from the mode suitable for simple ops
182 * to the mode for complex ops is costly. Therefore:
183 * use some hysteresis
184 */
185#define USE_GLOBAL_LOCK_HYSTERESIS 10
186
1da177e4 187/*
758a6ba3 188 * Locking:
5864a2fd 189 * a) global sem_lock() for read/write
1da177e4 190 * sem_undo.id_next,
758a6ba3 191 * sem_array.complex_count,
5864a2fd
MS
192 * sem_array.pending{_alter,_const},
193 * sem_array.sem_undo
46c0a8ca 194 *
5864a2fd 195 * b) global or semaphore sem_lock() for read/write:
1a233956 196 * sem_array.sems[i].pending_{const,alter}:
5864a2fd
MS
197 *
198 * c) special:
199 * sem_undo_list.list_proc:
200 * * undo_list->lock for write
201 * * rcu for read
9de5ab8a
MS
202 * use_global_lock:
203 * * global sem_lock() for write
204 * * either local or global sem_lock() for read.
205 *
206 * Memory ordering:
207 * Most ordering is enforced by using spin_lock() and spin_unlock().
8116b54e
MS
208 *
209 * Exceptions:
210 * 1) use_global_lock: (SEM_BARRIER_1)
9de5ab8a 211 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
8116b54e
MS
212 * using smp_store_release(): Immediately after setting it to 0,
213 * a simple op can start.
9de5ab8a
MS
214 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
215 * smp_load_acquire().
216 * Setting it from 0 to non-zero must be ordered with regards to
217 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
218 * is inside a spin_lock() and after a write from 0 to non-zero a
219 * spin_lock()+spin_unlock() is done.
8116b54e
MS
220 *
221 * 2) queue.status: (SEM_BARRIER_2)
222 * Initialization is done while holding sem_lock(), so no further barrier is
223 * required.
224 * Setting it to a result code is a RELEASE, this is ensured by both a
225 * smp_store_release() (for case a) and while holding sem_lock()
226 * (for case b).
b1989a3d 227 * The ACQUIRE when reading the result code without holding sem_lock() is
8116b54e
MS
228 * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
229 * (case a above).
230 * Reading the result code while holding sem_lock() needs no further barriers,
231 * the locks inside sem_lock() enforce ordering (case b above)
232 *
233 * 3) current->state:
234 * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
235 * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may
236 * happen immediately after calling wake_q_add. As wake_q_add_safe() is called
237 * when holding sem_lock(), no further barriers are required.
238 *
239 * See also ipc/mqueue.c for more details on the covered races.
1da177e4
LT
240 */
241
e3893534
KK
242#define sc_semmsl sem_ctls[0]
243#define sc_semmns sem_ctls[1]
244#define sc_semopm sem_ctls[2]
245#define sc_semmni sem_ctls[3]
246
eae04d25 247void sem_init_ns(struct ipc_namespace *ns)
e3893534 248{
e3893534
KK
249 ns->sc_semmsl = SEMMSL;
250 ns->sc_semmns = SEMMNS;
251 ns->sc_semopm = SEMOPM;
252 ns->sc_semmni = SEMMNI;
253 ns->used_sems = 0;
eae04d25 254 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
e3893534
KK
255}
256
ae5e1b22 257#ifdef CONFIG_IPC_NS
e3893534
KK
258void sem_exit_ns(struct ipc_namespace *ns)
259{
01b8b07a 260 free_ipcs(ns, &sem_ids(ns), freeary);
7d6feeb2 261 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
0cfb6aee 262 rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
e3893534 263}
ae5e1b22 264#endif
1da177e4 265
eae04d25 266void __init sem_init(void)
1da177e4 267{
eae04d25 268 sem_init_ns(&init_ipc_ns);
19b4946c
MW
269 ipc_init_proc_interface("sysvipc/sem",
270 " key semid perms nsems uid gid cuid cgid otime ctime\n",
e3893534 271 IPC_SEM_IDS, sysvipc_sem_proc_show);
1da177e4
LT
272}
273
f269f40a
MS
274/**
275 * unmerge_queues - unmerge queues, if possible.
276 * @sma: semaphore array
277 *
278 * The function unmerges the wait queues if complex_count is 0.
279 * It must be called prior to dropping the global semaphore array lock.
280 */
281static void unmerge_queues(struct sem_array *sma)
282{
283 struct sem_queue *q, *tq;
284
285 /* complex operations still around? */
286 if (sma->complex_count)
287 return;
288 /*
289 * We will switch back to simple mode.
290 * Move all pending operation back into the per-semaphore
291 * queues.
292 */
293 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
294 struct sem *curr;
1a233956 295 curr = &sma->sems[q->sops[0].sem_num];
f269f40a
MS
296
297 list_add_tail(&q->list, &curr->pending_alter);
298 }
299 INIT_LIST_HEAD(&sma->pending_alter);
300}
301
302/**
8001c858 303 * merge_queues - merge single semop queues into global queue
f269f40a
MS
304 * @sma: semaphore array
305 *
306 * This function merges all per-semaphore queues into the global queue.
307 * It is necessary to achieve FIFO ordering for the pending single-sop
308 * operations when a multi-semop operation must sleep.
309 * Only the alter operations must be moved, the const operations can stay.
310 */
311static void merge_queues(struct sem_array *sma)
312{
313 int i;
314 for (i = 0; i < sma->sem_nsems; i++) {
1a233956 315 struct sem *sem = &sma->sems[i];
f269f40a
MS
316
317 list_splice_init(&sem->pending_alter, &sma->pending_alter);
318 }
319}
320
53dad6d3
DB
321static void sem_rcu_free(struct rcu_head *head)
322{
dba4cdd3
MS
323 struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
324 struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
53dad6d3 325
aefad959 326 security_sem_free(&sma->sem_perm);
e2029dfe 327 kvfree(sma);
53dad6d3
DB
328}
329
5e9d5275 330/*
5864a2fd 331 * Enter the mode suitable for non-simple operations:
5e9d5275 332 * Caller must own sem_perm.lock.
5e9d5275 333 */
5864a2fd 334static void complexmode_enter(struct sem_array *sma)
5e9d5275
MS
335{
336 int i;
337 struct sem *sem;
338
9de5ab8a
MS
339 if (sma->use_global_lock > 0) {
340 /*
341 * We are already in global lock mode.
342 * Nothing to do, just reset the
343 * counter until we return to simple mode.
344 */
345 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
6d07b68c
MS
346 return;
347 }
9de5ab8a 348 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
5864a2fd 349
5e9d5275 350 for (i = 0; i < sma->sem_nsems; i++) {
1a233956 351 sem = &sma->sems[i];
27d7be18
MS
352 spin_lock(&sem->lock);
353 spin_unlock(&sem->lock);
5e9d5275 354 }
5864a2fd
MS
355}
356
357/*
358 * Try to leave the mode that disallows simple operations:
359 * Caller must own sem_perm.lock.
360 */
361static void complexmode_tryleave(struct sem_array *sma)
362{
363 if (sma->complex_count) {
364 /* Complex ops are sleeping.
365 * We must stay in complex mode
366 */
367 return;
368 }
9de5ab8a 369 if (sma->use_global_lock == 1) {
8116b54e
MS
370
371 /* See SEM_BARRIER_1 for purpose/pairing */
9de5ab8a
MS
372 smp_store_release(&sma->use_global_lock, 0);
373 } else {
374 sma->use_global_lock--;
375 }
5e9d5275
MS
376}
377
5864a2fd 378#define SEM_GLOBAL_LOCK (-1)
6062a8dc
RR
379/*
380 * If the request contains only one semaphore operation, and there are
381 * no complex transactions pending, lock only the semaphore involved.
382 * Otherwise, lock the entire semaphore array, since we either have
383 * multiple semaphores in our own semops, or we need to look at
384 * semaphores from other pending complex operations.
6062a8dc
RR
385 */
386static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
387 int nsops)
388{
5e9d5275 389 struct sem *sem;
ec67aaa4 390 int idx;
6062a8dc 391
5e9d5275
MS
392 if (nsops != 1) {
393 /* Complex operation - acquire a full lock */
394 ipc_lock_object(&sma->sem_perm);
6062a8dc 395
5864a2fd
MS
396 /* Prevent parallel simple ops */
397 complexmode_enter(sma);
398 return SEM_GLOBAL_LOCK;
5e9d5275
MS
399 }
400
401 /*
402 * Only one semaphore affected - try to optimize locking.
5864a2fd
MS
403 * Optimized locking is possible if no complex operation
404 * is either enqueued or processed right now.
405 *
9de5ab8a 406 * Both facts are tracked by use_global_mode.
5e9d5275 407 */
ec67aaa4
DB
408 idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
409 sem = &sma->sems[idx];
6062a8dc 410
5864a2fd 411 /*
9de5ab8a 412 * Initial check for use_global_lock. Just an optimization,
5864a2fd
MS
413 * no locking, no memory barrier.
414 */
9de5ab8a 415 if (!sma->use_global_lock) {
6062a8dc 416 /*
5e9d5275
MS
417 * It appears that no complex operation is around.
418 * Acquire the per-semaphore lock.
6062a8dc 419 */
5e9d5275
MS
420 spin_lock(&sem->lock);
421
8116b54e 422 /* see SEM_BARRIER_1 for purpose/pairing */
9de5ab8a 423 if (!smp_load_acquire(&sma->use_global_lock)) {
5864a2fd
MS
424 /* fast path successful! */
425 return sops->sem_num;
6062a8dc 426 }
5e9d5275
MS
427 spin_unlock(&sem->lock);
428 }
429
430 /* slow path: acquire the full lock */
431 ipc_lock_object(&sma->sem_perm);
6062a8dc 432
9de5ab8a
MS
433 if (sma->use_global_lock == 0) {
434 /*
435 * The use_global_lock mode ended while we waited for
436 * sma->sem_perm.lock. Thus we must switch to locking
437 * with sem->lock.
438 * Unlike in the fast path, there is no need to recheck
439 * sma->use_global_lock after we have acquired sem->lock:
440 * We own sma->sem_perm.lock, thus use_global_lock cannot
441 * change.
5e9d5275
MS
442 */
443 spin_lock(&sem->lock);
9de5ab8a 444
5e9d5275
MS
445 ipc_unlock_object(&sma->sem_perm);
446 return sops->sem_num;
6062a8dc 447 } else {
9de5ab8a
MS
448 /*
449 * Not a false alarm, thus continue to use the global lock
450 * mode. No need for complexmode_enter(), this was done by
451 * the caller that has set use_global_mode to non-zero.
6062a8dc 452 */
5864a2fd 453 return SEM_GLOBAL_LOCK;
6062a8dc 454 }
6062a8dc
RR
455}
456
457static inline void sem_unlock(struct sem_array *sma, int locknum)
458{
5864a2fd 459 if (locknum == SEM_GLOBAL_LOCK) {
f269f40a 460 unmerge_queues(sma);
5864a2fd 461 complexmode_tryleave(sma);
cf9d5d78 462 ipc_unlock_object(&sma->sem_perm);
6062a8dc 463 } else {
1a233956 464 struct sem *sem = &sma->sems[locknum];
6062a8dc
RR
465 spin_unlock(&sem->lock);
466 }
6062a8dc
RR
467}
468
3e148c79 469/*
d9a605e4 470 * sem_lock_(check_) routines are called in the paths where the rwsem
3e148c79 471 * is not held.
321310ce
LT
472 *
473 * The caller holds the RCU read lock.
3e148c79 474 */
16df3674
DB
475static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
476{
55b7ae50 477 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
16df3674
DB
478
479 if (IS_ERR(ipcp))
480 return ERR_CAST(ipcp);
481
482 return container_of(ipcp, struct sem_array, sem_perm);
483}
484
16df3674
DB
485static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
486 int id)
487{
488 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
489
490 if (IS_ERR(ipcp))
491 return ERR_CAST(ipcp);
b1ed88b4 492
03f02c76 493 return container_of(ipcp, struct sem_array, sem_perm);
023a5355
ND
494}
495
6ff37972
PP
496static inline void sem_lock_and_putref(struct sem_array *sma)
497{
6062a8dc 498 sem_lock(sma, NULL, -1);
dba4cdd3 499 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
6ff37972
PP
500}
501
7ca7e564
ND
502static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
503{
504 ipc_rmid(&sem_ids(ns), &s->sem_perm);
505}
506
101ede01
KC
507static struct sem_array *sem_alloc(size_t nsems)
508{
509 struct sem_array *sma;
101ede01
KC
510
511 if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
512 return NULL;
513
4a2ae929 514 sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
101ede01
KC
515 if (unlikely(!sma))
516 return NULL;
517
101ede01
KC
518 return sma;
519}
520
f4566f04
ND
521/**
522 * newary - Create a new semaphore set
523 * @ns: namespace
524 * @params: ptr to the structure that contains key, semflg and nsems
525 *
d9a605e4 526 * Called with sem_ids.rwsem held (as a writer)
f4566f04 527 */
7748dbfa 528static int newary(struct ipc_namespace *ns, struct ipc_params *params)
1da177e4 529{
1da177e4
LT
530 int retval;
531 struct sem_array *sma;
7748dbfa
ND
532 key_t key = params->key;
533 int nsems = params->u.nsems;
534 int semflg = params->flg;
b97e820f 535 int i;
1da177e4
LT
536
537 if (!nsems)
538 return -EINVAL;
e3893534 539 if (ns->used_sems + nsems > ns->sc_semmns)
1da177e4
LT
540 return -ENOSPC;
541
101ede01 542 sma = sem_alloc(nsems);
3ab08fe2 543 if (!sma)
1da177e4 544 return -ENOMEM;
3ab08fe2 545
1da177e4
LT
546 sma->sem_perm.mode = (semflg & S_IRWXUGO);
547 sma->sem_perm.key = key;
548
549 sma->sem_perm.security = NULL;
aefad959 550 retval = security_sem_alloc(&sma->sem_perm);
1da177e4 551 if (retval) {
e2029dfe 552 kvfree(sma);
1da177e4
LT
553 return retval;
554 }
555
6062a8dc 556 for (i = 0; i < nsems; i++) {
1a233956
MS
557 INIT_LIST_HEAD(&sma->sems[i].pending_alter);
558 INIT_LIST_HEAD(&sma->sems[i].pending_const);
559 spin_lock_init(&sma->sems[i].lock);
6062a8dc 560 }
b97e820f
MS
561
562 sma->complex_count = 0;
9de5ab8a 563 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
1a82e9e1
MS
564 INIT_LIST_HEAD(&sma->pending_alter);
565 INIT_LIST_HEAD(&sma->pending_const);
4daa28f6 566 INIT_LIST_HEAD(&sma->list_id);
1da177e4 567 sma->sem_nsems = nsems;
e54d02b2 568 sma->sem_ctime = ktime_get_real_seconds();
e8577d1f 569
39c96a1b 570 /* ipc_addid() locks sma upon success. */
2ec55f80
MS
571 retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
572 if (retval < 0) {
39cfffd7 573 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
2ec55f80 574 return retval;
e8577d1f
MS
575 }
576 ns->used_sems += nsems;
577
6062a8dc 578 sem_unlock(sma, -1);
6d49dab8 579 rcu_read_unlock();
1da177e4 580
7ca7e564 581 return sma->sem_perm.id;
1da177e4
LT
582}
583
7748dbfa 584
f4566f04 585/*
d9a605e4 586 * Called with sem_ids.rwsem and ipcp locked.
f4566f04 587 */
00898e85 588static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
7748dbfa 589{
03f02c76
ND
590 struct sem_array *sma;
591
592 sma = container_of(ipcp, struct sem_array, sem_perm);
593 if (params->u.nsems > sma->sem_nsems)
7748dbfa
ND
594 return -EINVAL;
595
596 return 0;
597}
598
69894718 599long ksys_semget(key_t key, int nsems, int semflg)
1da177e4 600{
e3893534 601 struct ipc_namespace *ns;
eb66ec44
MK
602 static const struct ipc_ops sem_ops = {
603 .getnew = newary,
50ab44b1 604 .associate = security_sem_associate,
eb66ec44
MK
605 .more_checks = sem_more_checks,
606 };
7748dbfa 607 struct ipc_params sem_params;
e3893534
KK
608
609 ns = current->nsproxy->ipc_ns;
1da177e4 610
e3893534 611 if (nsems < 0 || nsems > ns->sc_semmsl)
1da177e4 612 return -EINVAL;
7ca7e564 613
7748dbfa
ND
614 sem_params.key = key;
615 sem_params.flg = semflg;
616 sem_params.u.nsems = nsems;
1da177e4 617
7748dbfa 618 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
1da177e4
LT
619}
620
69894718
DB
621SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
622{
623 return ksys_semget(key, nsems, semflg);
624}
625
78f5009c 626/**
4ce33ec2
DB
627 * perform_atomic_semop[_slow] - Attempt to perform semaphore
628 * operations on a given array.
758a6ba3 629 * @sma: semaphore array
d198cd6d 630 * @q: struct sem_queue that describes the operation
758a6ba3 631 *
4ce33ec2
DB
632 * Caller blocking are as follows, based the value
633 * indicated by the semaphore operation (sem_op):
634 *
635 * (1) >0 never blocks.
636 * (2) 0 (wait-for-zero operation): semval is non-zero.
637 * (3) <0 attempting to decrement semval to a value smaller than zero.
638 *
758a6ba3
MS
639 * Returns 0 if the operation was possible.
640 * Returns 1 if the operation is impossible, the caller must sleep.
4ce33ec2 641 * Returns <0 for error codes.
1da177e4 642 */
4ce33ec2 643static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
1da177e4 644{
51d6f263
EB
645 int result, sem_op, nsops;
646 struct pid *pid;
1da177e4 647 struct sembuf *sop;
239521f3 648 struct sem *curr;
d198cd6d
MS
649 struct sembuf *sops;
650 struct sem_undo *un;
651
652 sops = q->sops;
653 nsops = q->nsops;
654 un = q->undo;
1da177e4
LT
655
656 for (sop = sops; sop < sops + nsops; sop++) {
ec67aaa4
DB
657 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
658 curr = &sma->sems[idx];
1da177e4
LT
659 sem_op = sop->sem_op;
660 result = curr->semval;
78f5009c 661
1da177e4
LT
662 if (!sem_op && result)
663 goto would_block;
664
665 result += sem_op;
666 if (result < 0)
667 goto would_block;
668 if (result > SEMVMX)
669 goto out_of_range;
78f5009c 670
1da177e4
LT
671 if (sop->sem_flg & SEM_UNDO) {
672 int undo = un->semadj[sop->sem_num] - sem_op;
78f5009c 673 /* Exceeding the undo range is an error. */
1da177e4
LT
674 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
675 goto out_of_range;
78f5009c 676 un->semadj[sop->sem_num] = undo;
1da177e4 677 }
78f5009c 678
1da177e4
LT
679 curr->semval = result;
680 }
681
682 sop--;
d198cd6d 683 pid = q->pid;
1da177e4 684 while (sop >= sops) {
51d6f263 685 ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
1da177e4
LT
686 sop--;
687 }
78f5009c 688
1da177e4
LT
689 return 0;
690
691out_of_range:
692 result = -ERANGE;
693 goto undo;
694
695would_block:
ed247b7c
MS
696 q->blocking = sop;
697
1da177e4
LT
698 if (sop->sem_flg & IPC_NOWAIT)
699 result = -EAGAIN;
700 else
701 result = 1;
702
703undo:
704 sop--;
705 while (sop >= sops) {
78f5009c 706 sem_op = sop->sem_op;
1a233956 707 sma->sems[sop->sem_num].semval -= sem_op;
78f5009c
PM
708 if (sop->sem_flg & SEM_UNDO)
709 un->semadj[sop->sem_num] += sem_op;
1da177e4
LT
710 sop--;
711 }
712
713 return result;
714}
715
4ce33ec2
DB
716static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
717{
718 int result, sem_op, nsops;
719 struct sembuf *sop;
720 struct sem *curr;
721 struct sembuf *sops;
722 struct sem_undo *un;
723
724 sops = q->sops;
725 nsops = q->nsops;
726 un = q->undo;
727
728 if (unlikely(q->dupsop))
729 return perform_atomic_semop_slow(sma, q);
730
731 /*
732 * We scan the semaphore set twice, first to ensure that the entire
733 * operation can succeed, therefore avoiding any pointless writes
734 * to shared memory and having to undo such changes in order to block
735 * until the operations can go through.
736 */
737 for (sop = sops; sop < sops + nsops; sop++) {
ec67aaa4
DB
738 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
739
740 curr = &sma->sems[idx];
4ce33ec2
DB
741 sem_op = sop->sem_op;
742 result = curr->semval;
743
744 if (!sem_op && result)
745 goto would_block; /* wait-for-zero */
746
747 result += sem_op;
748 if (result < 0)
749 goto would_block;
750
751 if (result > SEMVMX)
752 return -ERANGE;
753
754 if (sop->sem_flg & SEM_UNDO) {
755 int undo = un->semadj[sop->sem_num] - sem_op;
756
757 /* Exceeding the undo range is an error. */
758 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
759 return -ERANGE;
760 }
761 }
762
763 for (sop = sops; sop < sops + nsops; sop++) {
1a233956 764 curr = &sma->sems[sop->sem_num];
4ce33ec2
DB
765 sem_op = sop->sem_op;
766 result = curr->semval;
767
768 if (sop->sem_flg & SEM_UNDO) {
769 int undo = un->semadj[sop->sem_num] - sem_op;
770
771 un->semadj[sop->sem_num] = undo;
772 }
773 curr->semval += sem_op;
51d6f263 774 ipc_update_pid(&curr->sempid, q->pid);
4ce33ec2
DB
775 }
776
777 return 0;
778
779would_block:
780 q->blocking = sop;
781 return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
782}
783
9ae949fa
DB
784static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
785 struct wake_q_head *wake_q)
0a2b9d4c 786{
a11ddb37
VG
787 struct task_struct *sleeper;
788
789 sleeper = get_task_struct(q->sleeper);
8116b54e 790
7497835f 791 /* see SEM_BARRIER_2 for purpose/pairing */
8116b54e
MS
792 smp_store_release(&q->status, error);
793
a11ddb37 794 wake_q_add_safe(wake_q, sleeper);
d4212093
NP
795}
796
b97e820f
MS
797static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
798{
799 list_del(&q->list);
9f1bc2c9 800 if (q->nsops > 1)
b97e820f
MS
801 sma->complex_count--;
802}
803
fd5db422
MS
804/** check_restart(sma, q)
805 * @sma: semaphore array
806 * @q: the operation that just completed
807 *
808 * update_queue is O(N^2) when it restarts scanning the whole queue of
809 * waiting operations. Therefore this function checks if the restart is
810 * really necessary. It is called after a previously waiting operation
1a82e9e1
MS
811 * modified the array.
812 * Note that wait-for-zero operations are handled without restart.
fd5db422 813 */
4663d3e8 814static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
fd5db422 815{
1a82e9e1
MS
816 /* pending complex alter operations are too difficult to analyse */
817 if (!list_empty(&sma->pending_alter))
fd5db422
MS
818 return 1;
819
820 /* we were a sleeping complex operation. Too difficult */
821 if (q->nsops > 1)
822 return 1;
823
1a82e9e1
MS
824 /* It is impossible that someone waits for the new value:
825 * - complex operations always restart.
b1989a3d 826 * - wait-for-zero are handled separately.
1a82e9e1
MS
827 * - q is a previously sleeping simple operation that
828 * altered the array. It must be a decrement, because
829 * simple increments never sleep.
830 * - If there are older (higher priority) decrements
831 * in the queue, then they have observed the original
832 * semval value and couldn't proceed. The operation
833 * decremented to value - thus they won't proceed either.
834 */
835 return 0;
836}
fd5db422 837
1a82e9e1 838/**
8001c858 839 * wake_const_ops - wake up non-alter tasks
1a82e9e1
MS
840 * @sma: semaphore array.
841 * @semnum: semaphore that was modified.
9ae949fa 842 * @wake_q: lockless wake-queue head.
1a82e9e1
MS
843 *
844 * wake_const_ops must be called after a semaphore in a semaphore array
845 * was set to 0. If complex const operations are pending, wake_const_ops must
846 * be called with semnum = -1, as well as with the number of each modified
847 * semaphore.
9ae949fa 848 * The tasks that must be woken up are added to @wake_q. The return code
1a82e9e1
MS
849 * is stored in q->pid.
850 * The function returns 1 if at least one operation was completed successfully.
851 */
852static int wake_const_ops(struct sem_array *sma, int semnum,
9ae949fa 853 struct wake_q_head *wake_q)
1a82e9e1 854{
f150f02c 855 struct sem_queue *q, *tmp;
1a82e9e1
MS
856 struct list_head *pending_list;
857 int semop_completed = 0;
858
859 if (semnum == -1)
860 pending_list = &sma->pending_const;
861 else
1a233956 862 pending_list = &sma->sems[semnum].pending_const;
fd5db422 863
f150f02c
DB
864 list_for_each_entry_safe(q, tmp, pending_list, list) {
865 int error = perform_atomic_semop(sma, q);
1a82e9e1 866
f150f02c
DB
867 if (error > 0)
868 continue;
869 /* operation completed, remove from queue & wakeup */
870 unlink_queue(sma, q);
1a82e9e1 871
f150f02c
DB
872 wake_up_sem_queue_prepare(q, error, wake_q);
873 if (error == 0)
874 semop_completed = 1;
1a82e9e1 875 }
f150f02c 876
1a82e9e1
MS
877 return semop_completed;
878}
879
880/**
8001c858 881 * do_smart_wakeup_zero - wakeup all wait for zero tasks
1a82e9e1
MS
882 * @sma: semaphore array
883 * @sops: operations that were performed
884 * @nsops: number of operations
9ae949fa 885 * @wake_q: lockless wake-queue head
1a82e9e1 886 *
8001c858
DB
887 * Checks all required queue for wait-for-zero operations, based
888 * on the actual changes that were performed on the semaphore array.
1a82e9e1
MS
889 * The function returns 1 if at least one operation was completed successfully.
890 */
891static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
9ae949fa 892 int nsops, struct wake_q_head *wake_q)
1a82e9e1
MS
893{
894 int i;
895 int semop_completed = 0;
896 int got_zero = 0;
897
898 /* first: the per-semaphore queues, if known */
899 if (sops) {
900 for (i = 0; i < nsops; i++) {
901 int num = sops[i].sem_num;
902
1a233956 903 if (sma->sems[num].semval == 0) {
1a82e9e1 904 got_zero = 1;
9ae949fa 905 semop_completed |= wake_const_ops(sma, num, wake_q);
1a82e9e1
MS
906 }
907 }
908 } else {
909 /*
910 * No sops means modified semaphores not known.
911 * Assume all were changed.
fd5db422 912 */
1a82e9e1 913 for (i = 0; i < sma->sem_nsems; i++) {
1a233956 914 if (sma->sems[i].semval == 0) {
1a82e9e1 915 got_zero = 1;
9ae949fa 916 semop_completed |= wake_const_ops(sma, i, wake_q);
1a82e9e1
MS
917 }
918 }
fd5db422
MS
919 }
920 /*
1a82e9e1
MS
921 * If one of the modified semaphores got 0,
922 * then check the global queue, too.
fd5db422 923 */
1a82e9e1 924 if (got_zero)
9ae949fa 925 semop_completed |= wake_const_ops(sma, -1, wake_q);
fd5db422 926
1a82e9e1 927 return semop_completed;
fd5db422
MS
928}
929
636c6be8
MS
930
931/**
8001c858 932 * update_queue - look for tasks that can be completed.
636c6be8
MS
933 * @sma: semaphore array.
934 * @semnum: semaphore that was modified.
9ae949fa 935 * @wake_q: lockless wake-queue head.
636c6be8
MS
936 *
937 * update_queue must be called after a semaphore in a semaphore array
9f1bc2c9
RR
938 * was modified. If multiple semaphores were modified, update_queue must
939 * be called with semnum = -1, as well as with the number of each modified
940 * semaphore.
9ae949fa 941 * The tasks that must be woken up are added to @wake_q. The return code
0a2b9d4c 942 * is stored in q->pid.
1a82e9e1
MS
943 * The function internally checks if const operations can now succeed.
944 *
0a2b9d4c 945 * The function return 1 if at least one semop was completed successfully.
1da177e4 946 */
9ae949fa 947static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
1da177e4 948{
f150f02c 949 struct sem_queue *q, *tmp;
636c6be8 950 struct list_head *pending_list;
0a2b9d4c 951 int semop_completed = 0;
636c6be8 952
9f1bc2c9 953 if (semnum == -1)
1a82e9e1 954 pending_list = &sma->pending_alter;
9f1bc2c9 955 else
1a233956 956 pending_list = &sma->sems[semnum].pending_alter;
9cad200c
NP
957
958again:
f150f02c 959 list_for_each_entry_safe(q, tmp, pending_list, list) {
fd5db422 960 int error, restart;
636c6be8 961
d987f8b2
MS
962 /* If we are scanning the single sop, per-semaphore list of
963 * one semaphore and that semaphore is 0, then it is not
1a82e9e1 964 * necessary to scan further: simple increments
d987f8b2
MS
965 * that affect only one entry succeed immediately and cannot
966 * be in the per semaphore pending queue, and decrements
967 * cannot be successful if the value is already 0.
968 */
1a233956 969 if (semnum != -1 && sma->sems[semnum].semval == 0)
d987f8b2
MS
970 break;
971
d198cd6d 972 error = perform_atomic_semop(sma, q);
1da177e4
LT
973
974 /* Does q->sleeper still need to sleep? */
9cad200c
NP
975 if (error > 0)
976 continue;
977
b97e820f 978 unlink_queue(sma, q);
9cad200c 979
0a2b9d4c 980 if (error) {
fd5db422 981 restart = 0;
0a2b9d4c
MS
982 } else {
983 semop_completed = 1;
9ae949fa 984 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
fd5db422 985 restart = check_restart(sma, q);
0a2b9d4c 986 }
fd5db422 987
9ae949fa 988 wake_up_sem_queue_prepare(q, error, wake_q);
fd5db422 989 if (restart)
9cad200c 990 goto again;
1da177e4 991 }
0a2b9d4c 992 return semop_completed;
1da177e4
LT
993}
994
0e8c6656 995/**
8001c858 996 * set_semotime - set sem_otime
0e8c6656
MS
997 * @sma: semaphore array
998 * @sops: operations that modified the array, may be NULL
999 *
1000 * sem_otime is replicated to avoid cache line trashing.
1001 * This function sets one instance to the current time.
1002 */
1003static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1004{
1005 if (sops == NULL) {
2a70b787 1006 sma->sems[0].sem_otime = ktime_get_real_seconds();
0e8c6656 1007 } else {
1a233956 1008 sma->sems[sops[0].sem_num].sem_otime =
2a70b787 1009 ktime_get_real_seconds();
0e8c6656
MS
1010 }
1011}
1012
0a2b9d4c 1013/**
8001c858 1014 * do_smart_update - optimized update_queue
fd5db422
MS
1015 * @sma: semaphore array
1016 * @sops: operations that were performed
1017 * @nsops: number of operations
0a2b9d4c 1018 * @otime: force setting otime
9ae949fa 1019 * @wake_q: lockless wake-queue head
fd5db422 1020 *
1a82e9e1
MS
1021 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1022 * based on the actual changes that were performed on the semaphore array.
0a2b9d4c 1023 * Note that the function does not do the actual wake-up: the caller is
9ae949fa 1024 * responsible for calling wake_up_q().
0a2b9d4c 1025 * It is safe to perform this call after dropping all locks.
fd5db422 1026 */
0a2b9d4c 1027static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
9ae949fa 1028 int otime, struct wake_q_head *wake_q)
fd5db422
MS
1029{
1030 int i;
1031
9ae949fa 1032 otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1a82e9e1 1033
f269f40a
MS
1034 if (!list_empty(&sma->pending_alter)) {
1035 /* semaphore array uses the global queue - just process it. */
9ae949fa 1036 otime |= update_queue(sma, -1, wake_q);
f269f40a
MS
1037 } else {
1038 if (!sops) {
1039 /*
1040 * No sops, thus the modified semaphores are not
1041 * known. Check all.
1042 */
1043 for (i = 0; i < sma->sem_nsems; i++)
9ae949fa 1044 otime |= update_queue(sma, i, wake_q);
f269f40a
MS
1045 } else {
1046 /*
1047 * Check the semaphores that were increased:
1048 * - No complex ops, thus all sleeping ops are
1049 * decrease.
1050 * - if we decreased the value, then any sleeping
b1989a3d 1051 * semaphore ops won't be able to run: If the
f269f40a
MS
1052 * previous value was too small, then the new
1053 * value will be too small, too.
1054 */
1055 for (i = 0; i < nsops; i++) {
1056 if (sops[i].sem_op > 0) {
1057 otime |= update_queue(sma,
9ae949fa 1058 sops[i].sem_num, wake_q);
f269f40a 1059 }
ab465df9 1060 }
9f1bc2c9 1061 }
fd5db422 1062 }
0e8c6656
MS
1063 if (otime)
1064 set_semotime(sma, sops);
fd5db422
MS
1065}
1066
2f2ed41d 1067/*
b220c57a 1068 * check_qop: Test if a queued operation sleeps on the semaphore semnum
2f2ed41d
MS
1069 */
1070static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1071 bool count_zero)
1072{
b220c57a 1073 struct sembuf *sop = q->blocking;
2f2ed41d 1074
9b44ee2e
MS
1075 /*
1076 * Linux always (since 0.99.10) reported a task as sleeping on all
1077 * semaphores. This violates SUS, therefore it was changed to the
1078 * standard compliant behavior.
1079 * Give the administrators a chance to notice that an application
1080 * might misbehave because it relies on the Linux behavior.
1081 */
1082 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1083 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1084 current->comm, task_pid_nr(current));
1085
b220c57a
MS
1086 if (sop->sem_num != semnum)
1087 return 0;
2f2ed41d 1088
b220c57a
MS
1089 if (count_zero && sop->sem_op == 0)
1090 return 1;
1091 if (!count_zero && sop->sem_op < 0)
1092 return 1;
1093
1094 return 0;
2f2ed41d
MS
1095}
1096
1da177e4
LT
1097/* The following counts are associated to each semaphore:
1098 * semncnt number of tasks waiting on semval being nonzero
1099 * semzcnt number of tasks waiting on semval being zero
b220c57a
MS
1100 *
1101 * Per definition, a task waits only on the semaphore of the first semop
1102 * that cannot proceed, even if additional operation would block, too.
1da177e4 1103 */
2f2ed41d
MS
1104static int count_semcnt(struct sem_array *sma, ushort semnum,
1105 bool count_zero)
1da177e4 1106{
2f2ed41d 1107 struct list_head *l;
239521f3 1108 struct sem_queue *q;
2f2ed41d 1109 int semcnt;
1da177e4 1110
2f2ed41d
MS
1111 semcnt = 0;
1112 /* First: check the simple operations. They are easy to evaluate */
1113 if (count_zero)
1a233956 1114 l = &sma->sems[semnum].pending_const;
2f2ed41d 1115 else
1a233956 1116 l = &sma->sems[semnum].pending_alter;
1da177e4 1117
2f2ed41d
MS
1118 list_for_each_entry(q, l, list) {
1119 /* all task on a per-semaphore list sleep on exactly
1120 * that semaphore
1121 */
1122 semcnt++;
ebc2e5e6
RR
1123 }
1124
2f2ed41d 1125 /* Then: check the complex operations. */
1994862d 1126 list_for_each_entry(q, &sma->pending_alter, list) {
2f2ed41d
MS
1127 semcnt += check_qop(sma, semnum, q, count_zero);
1128 }
1129 if (count_zero) {
1130 list_for_each_entry(q, &sma->pending_const, list) {
1131 semcnt += check_qop(sma, semnum, q, count_zero);
1132 }
1994862d 1133 }
2f2ed41d 1134 return semcnt;
1da177e4
LT
1135}
1136
d9a605e4
DB
1137/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1138 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
3e148c79 1139 * remains locked on exit.
1da177e4 1140 */
01b8b07a 1141static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1da177e4 1142{
380af1b3
MS
1143 struct sem_undo *un, *tu;
1144 struct sem_queue *q, *tq;
01b8b07a 1145 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
9f1bc2c9 1146 int i;
9ae949fa 1147 DEFINE_WAKE_Q(wake_q);
1da177e4 1148
380af1b3 1149 /* Free the existing undo structures for this semaphore set. */
cf9d5d78 1150 ipc_assert_locked_object(&sma->sem_perm);
380af1b3
MS
1151 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1152 list_del(&un->list_id);
1153 spin_lock(&un->ulp->lock);
1da177e4 1154 un->semid = -1;
380af1b3
MS
1155 list_del_rcu(&un->list_proc);
1156 spin_unlock(&un->ulp->lock);
693a8b6e 1157 kfree_rcu(un, rcu);
380af1b3 1158 }
1da177e4
LT
1159
1160 /* Wake up all pending processes and let them fail with EIDRM. */
1a82e9e1
MS
1161 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1162 unlink_queue(sma, q);
9ae949fa 1163 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1a82e9e1
MS
1164 }
1165
1166 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
b97e820f 1167 unlink_queue(sma, q);
9ae949fa 1168 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1da177e4 1169 }
9f1bc2c9 1170 for (i = 0; i < sma->sem_nsems; i++) {
1a233956 1171 struct sem *sem = &sma->sems[i];
1a82e9e1
MS
1172 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1173 unlink_queue(sma, q);
9ae949fa 1174 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1a82e9e1
MS
1175 }
1176 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
9f1bc2c9 1177 unlink_queue(sma, q);
9ae949fa 1178 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
9f1bc2c9 1179 }
51d6f263 1180 ipc_update_pid(&sem->sempid, NULL);
9f1bc2c9 1181 }
1da177e4 1182
7ca7e564
ND
1183 /* Remove the semaphore set from the IDR */
1184 sem_rmid(ns, sma);
6062a8dc 1185 sem_unlock(sma, -1);
6d49dab8 1186 rcu_read_unlock();
1da177e4 1187
9ae949fa 1188 wake_up_q(&wake_q);
e3893534 1189 ns->used_sems -= sma->sem_nsems;
dba4cdd3 1190 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1da177e4
LT
1191}
1192
1193static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1194{
239521f3 1195 switch (version) {
1da177e4
LT
1196 case IPC_64:
1197 return copy_to_user(buf, in, sizeof(*in));
1198 case IPC_OLD:
1199 {
1200 struct semid_ds out;
1201
982f7c2b
DR
1202 memset(&out, 0, sizeof(out));
1203
1da177e4
LT
1204 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1205
1206 out.sem_otime = in->sem_otime;
1207 out.sem_ctime = in->sem_ctime;
1208 out.sem_nsems = in->sem_nsems;
1209
1210 return copy_to_user(buf, &out, sizeof(out));
1211 }
1212 default:
1213 return -EINVAL;
1214 }
1215}
1216
e54d02b2 1217static time64_t get_semotime(struct sem_array *sma)
d12e1e50
MS
1218{
1219 int i;
e54d02b2 1220 time64_t res;
d12e1e50 1221
1a233956 1222 res = sma->sems[0].sem_otime;
d12e1e50 1223 for (i = 1; i < sma->sem_nsems; i++) {
e54d02b2 1224 time64_t to = sma->sems[i].sem_otime;
d12e1e50
MS
1225
1226 if (to > res)
1227 res = to;
1228 }
1229 return res;
1230}
1231
45a4a64a
AV
1232static int semctl_stat(struct ipc_namespace *ns, int semid,
1233 int cmd, struct semid64_ds *semid64)
1da177e4 1234{
1da177e4 1235 struct sem_array *sma;
c2ab975c 1236 time64_t semotime;
45a4a64a 1237 int err;
1da177e4 1238
45a4a64a 1239 memset(semid64, 0, sizeof(*semid64));
46c0a8ca 1240
45a4a64a 1241 rcu_read_lock();
a280d6dc 1242 if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
45a4a64a
AV
1243 sma = sem_obtain_object(ns, semid);
1244 if (IS_ERR(sma)) {
1245 err = PTR_ERR(sma);
1246 goto out_unlock;
1247 }
a280d6dc 1248 } else { /* IPC_STAT */
45a4a64a
AV
1249 sma = sem_obtain_object_check(ns, semid);
1250 if (IS_ERR(sma)) {
1251 err = PTR_ERR(sma);
1252 goto out_unlock;
1da177e4 1253 }
1da177e4 1254 }
1da177e4 1255
a280d6dc
DB
1256 /* see comment for SHM_STAT_ANY */
1257 if (cmd == SEM_STAT_ANY)
1258 audit_ipc_obj(&sma->sem_perm);
1259 else {
1260 err = -EACCES;
1261 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1262 goto out_unlock;
1263 }
1da177e4 1264
aefad959 1265 err = security_sem_semctl(&sma->sem_perm, cmd);
45a4a64a
AV
1266 if (err)
1267 goto out_unlock;
1da177e4 1268
87ad4b0d
PM
1269 ipc_lock_object(&sma->sem_perm);
1270
1271 if (!ipc_valid_object(&sma->sem_perm)) {
1272 ipc_unlock_object(&sma->sem_perm);
1273 err = -EIDRM;
1274 goto out_unlock;
1275 }
1276
45a4a64a 1277 kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
c2ab975c
AB
1278 semotime = get_semotime(sma);
1279 semid64->sem_otime = semotime;
45a4a64a 1280 semid64->sem_ctime = sma->sem_ctime;
c2ab975c
AB
1281#ifndef CONFIG_64BIT
1282 semid64->sem_otime_high = semotime >> 32;
1283 semid64->sem_ctime_high = sma->sem_ctime >> 32;
1284#endif
45a4a64a 1285 semid64->sem_nsems = sma->sem_nsems;
87ad4b0d 1286
615c999c
MS
1287 if (cmd == IPC_STAT) {
1288 /*
1289 * As defined in SUS:
1290 * Return 0 on success
1291 */
1292 err = 0;
1293 } else {
1294 /*
1295 * SEM_STAT and SEM_STAT_ANY (both Linux specific)
1296 * Return the full id, including the sequence number
1297 */
1298 err = sma->sem_perm.id;
1299 }
87ad4b0d 1300 ipc_unlock_object(&sma->sem_perm);
1da177e4 1301out_unlock:
16df3674 1302 rcu_read_unlock();
1da177e4
LT
1303 return err;
1304}
1305
45a4a64a
AV
1306static int semctl_info(struct ipc_namespace *ns, int semid,
1307 int cmd, void __user *p)
1308{
1309 struct seminfo seminfo;
27c331a1 1310 int max_idx;
45a4a64a
AV
1311 int err;
1312
1313 err = security_sem_semctl(NULL, cmd);
1314 if (err)
1315 return err;
1316
1317 memset(&seminfo, 0, sizeof(seminfo));
1318 seminfo.semmni = ns->sc_semmni;
1319 seminfo.semmns = ns->sc_semmns;
1320 seminfo.semmsl = ns->sc_semmsl;
1321 seminfo.semopm = ns->sc_semopm;
1322 seminfo.semvmx = SEMVMX;
1323 seminfo.semmnu = SEMMNU;
1324 seminfo.semmap = SEMMAP;
1325 seminfo.semume = SEMUME;
1326 down_read(&sem_ids(ns).rwsem);
1327 if (cmd == SEM_INFO) {
1328 seminfo.semusz = sem_ids(ns).in_use;
1329 seminfo.semaem = ns->used_sems;
1330 } else {
1331 seminfo.semusz = SEMUSZ;
1332 seminfo.semaem = SEMAEM;
1333 }
27c331a1 1334 max_idx = ipc_get_maxidx(&sem_ids(ns));
45a4a64a
AV
1335 up_read(&sem_ids(ns).rwsem);
1336 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1337 return -EFAULT;
27c331a1 1338 return (max_idx < 0) ? 0 : max_idx;
45a4a64a
AV
1339}
1340
e1fd1f49 1341static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
45a4a64a 1342 int val)
e1fd1f49
AV
1343{
1344 struct sem_undo *un;
1345 struct sem_array *sma;
239521f3 1346 struct sem *curr;
45a4a64a 1347 int err;
9ae949fa
DB
1348 DEFINE_WAKE_Q(wake_q);
1349
6062a8dc
RR
1350 if (val > SEMVMX || val < 0)
1351 return -ERANGE;
e1fd1f49 1352
6062a8dc
RR
1353 rcu_read_lock();
1354 sma = sem_obtain_object_check(ns, semid);
1355 if (IS_ERR(sma)) {
1356 rcu_read_unlock();
1357 return PTR_ERR(sma);
1358 }
1359
1360 if (semnum < 0 || semnum >= sma->sem_nsems) {
1361 rcu_read_unlock();
1362 return -EINVAL;
1363 }
1364
1365
1366 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1367 rcu_read_unlock();
1368 return -EACCES;
1369 }
e1fd1f49 1370
aefad959 1371 err = security_sem_semctl(&sma->sem_perm, SETVAL);
6062a8dc
RR
1372 if (err) {
1373 rcu_read_unlock();
1374 return -EACCES;
1375 }
e1fd1f49 1376
6062a8dc 1377 sem_lock(sma, NULL, -1);
e1fd1f49 1378
0f3d2b01 1379 if (!ipc_valid_object(&sma->sem_perm)) {
6e224f94
MS
1380 sem_unlock(sma, -1);
1381 rcu_read_unlock();
1382 return -EIDRM;
1383 }
1384
ec67aaa4 1385 semnum = array_index_nospec(semnum, sma->sem_nsems);
1a233956 1386 curr = &sma->sems[semnum];
e1fd1f49 1387
cf9d5d78 1388 ipc_assert_locked_object(&sma->sem_perm);
e1fd1f49
AV
1389 list_for_each_entry(un, &sma->list_id, list_id)
1390 un->semadj[semnum] = 0;
1391
1392 curr->semval = val;
51d6f263 1393 ipc_update_pid(&curr->sempid, task_tgid(current));
e54d02b2 1394 sma->sem_ctime = ktime_get_real_seconds();
e1fd1f49 1395 /* maybe some queued-up processes were waiting for this */
9ae949fa 1396 do_smart_update(sma, NULL, 0, 0, &wake_q);
6062a8dc 1397 sem_unlock(sma, -1);
6d49dab8 1398 rcu_read_unlock();
9ae949fa 1399 wake_up_q(&wake_q);
6062a8dc 1400 return 0;
e1fd1f49
AV
1401}
1402
e3893534 1403static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
e1fd1f49 1404 int cmd, void __user *p)
1da177e4
LT
1405{
1406 struct sem_array *sma;
239521f3 1407 struct sem *curr;
16df3674 1408 int err, nsems;
1da177e4 1409 ushort fast_sem_io[SEMMSL_FAST];
239521f3 1410 ushort *sem_io = fast_sem_io;
9ae949fa 1411 DEFINE_WAKE_Q(wake_q);
16df3674
DB
1412
1413 rcu_read_lock();
1414 sma = sem_obtain_object_check(ns, semid);
1415 if (IS_ERR(sma)) {
1416 rcu_read_unlock();
023a5355 1417 return PTR_ERR(sma);
16df3674 1418 }
1da177e4
LT
1419
1420 nsems = sma->sem_nsems;
1421
1da177e4 1422 err = -EACCES;
c728b9c8
LT
1423 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1424 goto out_rcu_wakeup;
1da177e4 1425
aefad959 1426 err = security_sem_semctl(&sma->sem_perm, cmd);
c728b9c8
LT
1427 if (err)
1428 goto out_rcu_wakeup;
1da177e4
LT
1429
1430 err = -EACCES;
1431 switch (cmd) {
1432 case GETALL:
1433 {
e1fd1f49 1434 ushort __user *array = p;
1da177e4
LT
1435 int i;
1436
ce857229 1437 sem_lock(sma, NULL, -1);
0f3d2b01 1438 if (!ipc_valid_object(&sma->sem_perm)) {
6e224f94
MS
1439 err = -EIDRM;
1440 goto out_unlock;
1441 }
239521f3 1442 if (nsems > SEMMSL_FAST) {
dba4cdd3 1443 if (!ipc_rcu_getref(&sma->sem_perm)) {
ce857229 1444 err = -EIDRM;
6e224f94 1445 goto out_unlock;
ce857229
AV
1446 }
1447 sem_unlock(sma, -1);
6d49dab8 1448 rcu_read_unlock();
f8dbe8d2
KC
1449 sem_io = kvmalloc_array(nsems, sizeof(ushort),
1450 GFP_KERNEL);
239521f3 1451 if (sem_io == NULL) {
dba4cdd3 1452 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1da177e4
LT
1453 return -ENOMEM;
1454 }
1455
4091fd94 1456 rcu_read_lock();
6ff37972 1457 sem_lock_and_putref(sma);
0f3d2b01 1458 if (!ipc_valid_object(&sma->sem_perm)) {
1da177e4 1459 err = -EIDRM;
6e224f94 1460 goto out_unlock;
1da177e4 1461 }
ce857229 1462 }
1da177e4 1463 for (i = 0; i < sma->sem_nsems; i++)
1a233956 1464 sem_io[i] = sma->sems[i].semval;
6062a8dc 1465 sem_unlock(sma, -1);
6d49dab8 1466 rcu_read_unlock();
1da177e4 1467 err = 0;
239521f3 1468 if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1da177e4
LT
1469 err = -EFAULT;
1470 goto out_free;
1471 }
1472 case SETALL:
1473 {
1474 int i;
1475 struct sem_undo *un;
1476
dba4cdd3 1477 if (!ipc_rcu_getref(&sma->sem_perm)) {
6e224f94
MS
1478 err = -EIDRM;
1479 goto out_rcu_wakeup;
6062a8dc 1480 }
16df3674 1481 rcu_read_unlock();
1da177e4 1482
239521f3 1483 if (nsems > SEMMSL_FAST) {
f8dbe8d2
KC
1484 sem_io = kvmalloc_array(nsems, sizeof(ushort),
1485 GFP_KERNEL);
239521f3 1486 if (sem_io == NULL) {
dba4cdd3 1487 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1da177e4
LT
1488 return -ENOMEM;
1489 }
1490 }
1491
239521f3 1492 if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
dba4cdd3 1493 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1da177e4
LT
1494 err = -EFAULT;
1495 goto out_free;
1496 }
1497
1498 for (i = 0; i < nsems; i++) {
1499 if (sem_io[i] > SEMVMX) {
dba4cdd3 1500 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1da177e4
LT
1501 err = -ERANGE;
1502 goto out_free;
1503 }
1504 }
4091fd94 1505 rcu_read_lock();
6ff37972 1506 sem_lock_and_putref(sma);
0f3d2b01 1507 if (!ipc_valid_object(&sma->sem_perm)) {
1da177e4 1508 err = -EIDRM;
6e224f94 1509 goto out_unlock;
1da177e4
LT
1510 }
1511
a5f4db87 1512 for (i = 0; i < nsems; i++) {
1a233956 1513 sma->sems[i].semval = sem_io[i];
51d6f263 1514 ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
a5f4db87 1515 }
4daa28f6 1516
cf9d5d78 1517 ipc_assert_locked_object(&sma->sem_perm);
4daa28f6 1518 list_for_each_entry(un, &sma->list_id, list_id) {
1da177e4
LT
1519 for (i = 0; i < nsems; i++)
1520 un->semadj[i] = 0;
4daa28f6 1521 }
e54d02b2 1522 sma->sem_ctime = ktime_get_real_seconds();
1da177e4 1523 /* maybe some queued-up processes were waiting for this */
9ae949fa 1524 do_smart_update(sma, NULL, 0, 0, &wake_q);
1da177e4
LT
1525 err = 0;
1526 goto out_unlock;
1527 }
e1fd1f49 1528 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1da177e4
LT
1529 }
1530 err = -EINVAL;
c728b9c8
LT
1531 if (semnum < 0 || semnum >= nsems)
1532 goto out_rcu_wakeup;
1da177e4 1533
6062a8dc 1534 sem_lock(sma, NULL, -1);
0f3d2b01 1535 if (!ipc_valid_object(&sma->sem_perm)) {
6e224f94
MS
1536 err = -EIDRM;
1537 goto out_unlock;
1538 }
ec67aaa4
DB
1539
1540 semnum = array_index_nospec(semnum, nsems);
1a233956 1541 curr = &sma->sems[semnum];
1da177e4
LT
1542
1543 switch (cmd) {
1544 case GETVAL:
1545 err = curr->semval;
1546 goto out_unlock;
1547 case GETPID:
51d6f263 1548 err = pid_vnr(curr->sempid);
1da177e4
LT
1549 goto out_unlock;
1550 case GETNCNT:
2f2ed41d 1551 err = count_semcnt(sma, semnum, 0);
1da177e4
LT
1552 goto out_unlock;
1553 case GETZCNT:
2f2ed41d 1554 err = count_semcnt(sma, semnum, 1);
1da177e4 1555 goto out_unlock;
1da177e4 1556 }
16df3674 1557
1da177e4 1558out_unlock:
6062a8dc 1559 sem_unlock(sma, -1);
c728b9c8 1560out_rcu_wakeup:
6d49dab8 1561 rcu_read_unlock();
9ae949fa 1562 wake_up_q(&wake_q);
1da177e4 1563out_free:
239521f3 1564 if (sem_io != fast_sem_io)
f8dbe8d2 1565 kvfree(sem_io);
1da177e4
LT
1566 return err;
1567}
1568
016d7132
PP
1569static inline unsigned long
1570copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1da177e4 1571{
239521f3 1572 switch (version) {
1da177e4 1573 case IPC_64:
016d7132 1574 if (copy_from_user(out, buf, sizeof(*out)))
1da177e4 1575 return -EFAULT;
1da177e4 1576 return 0;
1da177e4
LT
1577 case IPC_OLD:
1578 {
1579 struct semid_ds tbuf_old;
1580
239521f3 1581 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1da177e4
LT
1582 return -EFAULT;
1583
016d7132
PP
1584 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1585 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1586 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1da177e4
LT
1587
1588 return 0;
1589 }
1590 default:
1591 return -EINVAL;
1592 }
1593}
1594
522bb2a2 1595/*
d9a605e4 1596 * This function handles some semctl commands which require the rwsem
522bb2a2 1597 * to be held in write mode.
d9a605e4 1598 * NOTE: no locks must be held, the rwsem is taken inside this function.
522bb2a2 1599 */
21a4826a 1600static int semctl_down(struct ipc_namespace *ns, int semid,
45a4a64a 1601 int cmd, struct semid64_ds *semid64)
1da177e4
LT
1602{
1603 struct sem_array *sma;
1604 int err;
1da177e4
LT
1605 struct kern_ipc_perm *ipcp;
1606
d9a605e4 1607 down_write(&sem_ids(ns).rwsem);
7b4cc5d8
DB
1608 rcu_read_lock();
1609
4241c1a3 1610 ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
45a4a64a 1611 &semid64->sem_perm, 0);
7b4cc5d8
DB
1612 if (IS_ERR(ipcp)) {
1613 err = PTR_ERR(ipcp);
7b4cc5d8
DB
1614 goto out_unlock1;
1615 }
073115d6 1616
a5f75e7f 1617 sma = container_of(ipcp, struct sem_array, sem_perm);
1da177e4 1618
aefad959 1619 err = security_sem_semctl(&sma->sem_perm, cmd);
7b4cc5d8
DB
1620 if (err)
1621 goto out_unlock1;
1da177e4 1622
7b4cc5d8 1623 switch (cmd) {
1da177e4 1624 case IPC_RMID:
6062a8dc 1625 sem_lock(sma, NULL, -1);
7b4cc5d8 1626 /* freeary unlocks the ipc object and rcu */
01b8b07a 1627 freeary(ns, ipcp);
522bb2a2 1628 goto out_up;
1da177e4 1629 case IPC_SET:
6062a8dc 1630 sem_lock(sma, NULL, -1);
45a4a64a 1631 err = ipc_update_perm(&semid64->sem_perm, ipcp);
1efdb69b 1632 if (err)
7b4cc5d8 1633 goto out_unlock0;
e54d02b2 1634 sma->sem_ctime = ktime_get_real_seconds();
1da177e4
LT
1635 break;
1636 default:
1da177e4 1637 err = -EINVAL;
7b4cc5d8 1638 goto out_unlock1;
1da177e4 1639 }
1da177e4 1640
7b4cc5d8 1641out_unlock0:
6062a8dc 1642 sem_unlock(sma, -1);
7b4cc5d8 1643out_unlock1:
6d49dab8 1644 rcu_read_unlock();
522bb2a2 1645out_up:
d9a605e4 1646 up_write(&sem_ids(ns).rwsem);
1da177e4
LT
1647 return err;
1648}
1649
275f2214 1650static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
1da177e4 1651{
e3893534 1652 struct ipc_namespace *ns;
e1fd1f49 1653 void __user *p = (void __user *)arg;
45a4a64a
AV
1654 struct semid64_ds semid64;
1655 int err;
1da177e4
LT
1656
1657 if (semid < 0)
1658 return -EINVAL;
1659
e3893534 1660 ns = current->nsproxy->ipc_ns;
1da177e4 1661
239521f3 1662 switch (cmd) {
1da177e4
LT
1663 case IPC_INFO:
1664 case SEM_INFO:
45a4a64a 1665 return semctl_info(ns, semid, cmd, p);
4b9fcb0e 1666 case IPC_STAT:
1da177e4 1667 case SEM_STAT:
a280d6dc 1668 case SEM_STAT_ANY:
45a4a64a
AV
1669 err = semctl_stat(ns, semid, cmd, &semid64);
1670 if (err < 0)
1671 return err;
1672 if (copy_semid_to_user(p, &semid64, version))
1673 err = -EFAULT;
1674 return err;
1da177e4
LT
1675 case GETALL:
1676 case GETVAL:
1677 case GETPID:
1678 case GETNCNT:
1679 case GETZCNT:
1da177e4 1680 case SETALL:
e1fd1f49 1681 return semctl_main(ns, semid, semnum, cmd, p);
45a4a64a
AV
1682 case SETVAL: {
1683 int val;
1684#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1685 /* big-endian 64bit */
1686 val = arg >> 32;
1687#else
1688 /* 32bit or little-endian 64bit */
1689 val = arg;
1690#endif
1691 return semctl_setval(ns, semid, semnum, val);
1692 }
1da177e4 1693 case IPC_SET:
45a4a64a
AV
1694 if (copy_semid_from_user(&semid64, p, version))
1695 return -EFAULT;
df561f66 1696 fallthrough;
45a4a64a
AV
1697 case IPC_RMID:
1698 return semctl_down(ns, semid, cmd, &semid64);
1da177e4
LT
1699 default:
1700 return -EINVAL;
1701 }
1702}
1703
d969c6fa
DB
1704SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1705{
275f2214 1706 return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
d969c6fa
DB
1707}
1708
275f2214
AB
1709#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1710long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
1711{
1712 int version = ipc_parse_version(&cmd);
1713
1714 return ksys_semctl(semid, semnum, cmd, arg, version);
1715}
1716
1717SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1718{
1719 return ksys_old_semctl(semid, semnum, cmd, arg);
1720}
1721#endif
1722
c0ebccb6
AV
1723#ifdef CONFIG_COMPAT
1724
1725struct compat_semid_ds {
1726 struct compat_ipc_perm sem_perm;
9afc5eee
AB
1727 old_time32_t sem_otime;
1728 old_time32_t sem_ctime;
c0ebccb6
AV
1729 compat_uptr_t sem_base;
1730 compat_uptr_t sem_pending;
1731 compat_uptr_t sem_pending_last;
1732 compat_uptr_t undo;
1733 unsigned short sem_nsems;
1734};
1735
1736static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1737 int version)
1738{
1739 memset(out, 0, sizeof(*out));
1740 if (version == IPC_64) {
6aa211e8 1741 struct compat_semid64_ds __user *p = buf;
c0ebccb6
AV
1742 return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1743 } else {
6aa211e8 1744 struct compat_semid_ds __user *p = buf;
c0ebccb6
AV
1745 return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1746 }
1747}
1748
1749static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1750 int version)
1751{
1752 if (version == IPC_64) {
1753 struct compat_semid64_ds v;
1754 memset(&v, 0, sizeof(v));
1755 to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
c2ab975c
AB
1756 v.sem_otime = lower_32_bits(in->sem_otime);
1757 v.sem_otime_high = upper_32_bits(in->sem_otime);
1758 v.sem_ctime = lower_32_bits(in->sem_ctime);
1759 v.sem_ctime_high = upper_32_bits(in->sem_ctime);
c0ebccb6
AV
1760 v.sem_nsems = in->sem_nsems;
1761 return copy_to_user(buf, &v, sizeof(v));
1762 } else {
1763 struct compat_semid_ds v;
1764 memset(&v, 0, sizeof(v));
1765 to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1766 v.sem_otime = in->sem_otime;
1767 v.sem_ctime = in->sem_ctime;
1768 v.sem_nsems = in->sem_nsems;
1769 return copy_to_user(buf, &v, sizeof(v));
1770 }
1771}
1772
275f2214 1773static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
c0ebccb6
AV
1774{
1775 void __user *p = compat_ptr(arg);
1776 struct ipc_namespace *ns;
1777 struct semid64_ds semid64;
c0ebccb6
AV
1778 int err;
1779
1780 ns = current->nsproxy->ipc_ns;
1781
1782 if (semid < 0)
1783 return -EINVAL;
1784
1785 switch (cmd & (~IPC_64)) {
1786 case IPC_INFO:
1787 case SEM_INFO:
1788 return semctl_info(ns, semid, cmd, p);
1789 case IPC_STAT:
1790 case SEM_STAT:
a280d6dc 1791 case SEM_STAT_ANY:
c0ebccb6
AV
1792 err = semctl_stat(ns, semid, cmd, &semid64);
1793 if (err < 0)
1794 return err;
1795 if (copy_compat_semid_to_user(p, &semid64, version))
1796 err = -EFAULT;
1797 return err;
1798 case GETVAL:
1799 case GETPID:
1800 case GETNCNT:
1801 case GETZCNT:
1802 case GETALL:
1da177e4 1803 case SETALL:
e1fd1f49
AV
1804 return semctl_main(ns, semid, semnum, cmd, p);
1805 case SETVAL:
1806 return semctl_setval(ns, semid, semnum, arg);
1da177e4 1807 case IPC_SET:
c0ebccb6
AV
1808 if (copy_compat_semid_from_user(&semid64, p, version))
1809 return -EFAULT;
df561f66 1810 fallthrough;
c0ebccb6
AV
1811 case IPC_RMID:
1812 return semctl_down(ns, semid, cmd, &semid64);
1da177e4
LT
1813 default:
1814 return -EINVAL;
1815 }
1816}
d969c6fa
DB
1817
1818COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1819{
275f2214 1820 return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
d969c6fa 1821}
275f2214
AB
1822
1823#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1824long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
1825{
1826 int version = compat_ipc_parse_version(&cmd);
1827
1828 return compat_ksys_semctl(semid, semnum, cmd, arg, version);
1829}
1830
1831COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
1832{
1833 return compat_ksys_old_semctl(semid, semnum, cmd, arg);
1834}
1835#endif
c0ebccb6 1836#endif
1da177e4 1837
1da177e4
LT
1838/* If the task doesn't already have a undo_list, then allocate one
1839 * here. We guarantee there is only one thread using this undo list,
1840 * and current is THE ONE
1841 *
1842 * If this allocation and assignment succeeds, but later
1843 * portions of this code fail, there is no need to free the sem_undo_list.
1844 * Just let it stay associated with the task, and it'll be freed later
1845 * at exit time.
1846 *
1847 * This can block, so callers must hold no locks.
1848 */
1849static inline int get_undo_list(struct sem_undo_list **undo_listp)
1850{
1851 struct sem_undo_list *undo_list;
1da177e4
LT
1852
1853 undo_list = current->sysvsem.undo_list;
1854 if (!undo_list) {
2453a306 1855 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1da177e4
LT
1856 if (undo_list == NULL)
1857 return -ENOMEM;
00a5dfdb 1858 spin_lock_init(&undo_list->lock);
f74370b8 1859 refcount_set(&undo_list->refcnt, 1);
4daa28f6
MS
1860 INIT_LIST_HEAD(&undo_list->list_proc);
1861
1da177e4
LT
1862 current->sysvsem.undo_list = undo_list;
1863 }
1864 *undo_listp = undo_list;
1865 return 0;
1866}
1867
bf17bb71 1868static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1da177e4 1869{
bf17bb71 1870 struct sem_undo *un;
4daa28f6 1871
984035ad
JFG
1872 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
1873 spin_is_locked(&ulp->lock)) {
bf17bb71
NP
1874 if (un->semid == semid)
1875 return un;
1da177e4 1876 }
4daa28f6 1877 return NULL;
1da177e4
LT
1878}
1879
bf17bb71
NP
1880static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1881{
1882 struct sem_undo *un;
1883
239521f3 1884 assert_spin_locked(&ulp->lock);
bf17bb71
NP
1885
1886 un = __lookup_undo(ulp, semid);
1887 if (un) {
1888 list_del_rcu(&un->list_proc);
1889 list_add_rcu(&un->list_proc, &ulp->list_proc);
1890 }
1891 return un;
1892}
1893
4daa28f6 1894/**
8001c858 1895 * find_alloc_undo - lookup (and if not present create) undo array
4daa28f6
MS
1896 * @ns: namespace
1897 * @semid: semaphore array id
1898 *
1899 * The function looks up (and if not present creates) the undo structure.
1900 * The size of the undo structure depends on the size of the semaphore
1901 * array, thus the alloc path is not that straightforward.
380af1b3
MS
1902 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1903 * performs a rcu_read_lock().
4daa28f6
MS
1904 */
1905static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1da177e4
LT
1906{
1907 struct sem_array *sma;
1908 struct sem_undo_list *ulp;
1909 struct sem_undo *un, *new;
6062a8dc 1910 int nsems, error;
1da177e4
LT
1911
1912 error = get_undo_list(&ulp);
1913 if (error)
1914 return ERR_PTR(error);
1915
380af1b3 1916 rcu_read_lock();
c530c6ac 1917 spin_lock(&ulp->lock);
1da177e4 1918 un = lookup_undo(ulp, semid);
c530c6ac 1919 spin_unlock(&ulp->lock);
239521f3 1920 if (likely(un != NULL))
1da177e4
LT
1921 goto out;
1922
1923 /* no undo structure around - allocate one. */
4daa28f6 1924 /* step 1: figure out the size of the semaphore array */
16df3674
DB
1925 sma = sem_obtain_object_check(ns, semid);
1926 if (IS_ERR(sma)) {
1927 rcu_read_unlock();
4de85cd6 1928 return ERR_CAST(sma);
16df3674 1929 }
023a5355 1930
1da177e4 1931 nsems = sma->sem_nsems;
dba4cdd3 1932 if (!ipc_rcu_getref(&sma->sem_perm)) {
6062a8dc
RR
1933 rcu_read_unlock();
1934 un = ERR_PTR(-EIDRM);
1935 goto out;
1936 }
16df3674 1937 rcu_read_unlock();
1da177e4 1938
4daa28f6 1939 /* step 2: allocate new undo structure */
4668edc3 1940 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1da177e4 1941 if (!new) {
dba4cdd3 1942 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1da177e4
LT
1943 return ERR_PTR(-ENOMEM);
1944 }
1da177e4 1945
380af1b3 1946 /* step 3: Acquire the lock on semaphore array */
4091fd94 1947 rcu_read_lock();
6ff37972 1948 sem_lock_and_putref(sma);
0f3d2b01 1949 if (!ipc_valid_object(&sma->sem_perm)) {
6062a8dc 1950 sem_unlock(sma, -1);
6d49dab8 1951 rcu_read_unlock();
1da177e4
LT
1952 kfree(new);
1953 un = ERR_PTR(-EIDRM);
1954 goto out;
1955 }
380af1b3
MS
1956 spin_lock(&ulp->lock);
1957
1958 /*
1959 * step 4: check for races: did someone else allocate the undo struct?
1960 */
1961 un = lookup_undo(ulp, semid);
1962 if (un) {
1963 kfree(new);
1964 goto success;
1965 }
4daa28f6
MS
1966 /* step 5: initialize & link new undo structure */
1967 new->semadj = (short *) &new[1];
380af1b3 1968 new->ulp = ulp;
4daa28f6
MS
1969 new->semid = semid;
1970 assert_spin_locked(&ulp->lock);
380af1b3 1971 list_add_rcu(&new->list_proc, &ulp->list_proc);
cf9d5d78 1972 ipc_assert_locked_object(&sma->sem_perm);
4daa28f6 1973 list_add(&new->list_id, &sma->list_id);
380af1b3 1974 un = new;
4daa28f6 1975
380af1b3 1976success:
c530c6ac 1977 spin_unlock(&ulp->lock);
6062a8dc 1978 sem_unlock(sma, -1);
1da177e4
LT
1979out:
1980 return un;
1981}
1982
44ee4546 1983static long do_semtimedop(int semid, struct sembuf __user *tsops,
3ef56dc2 1984 unsigned nsops, const struct timespec64 *timeout)
1da177e4
LT
1985{
1986 int error = -EINVAL;
1987 struct sem_array *sma;
1988 struct sembuf fast_sops[SEMOPM_FAST];
239521f3 1989 struct sembuf *sops = fast_sops, *sop;
1da177e4 1990 struct sem_undo *un;
4ce33ec2
DB
1991 int max, locknum;
1992 bool undos = false, alter = false, dupsop = false;
1da177e4 1993 struct sem_queue queue;
4ce33ec2 1994 unsigned long dup = 0, jiffies_left = 0;
e3893534
KK
1995 struct ipc_namespace *ns;
1996
1997 ns = current->nsproxy->ipc_ns;
1da177e4
LT
1998
1999 if (nsops < 1 || semid < 0)
2000 return -EINVAL;
e3893534 2001 if (nsops > ns->sc_semopm)
1da177e4 2002 return -E2BIG;
239521f3 2003 if (nsops > SEMOPM_FAST) {
344476e1 2004 sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
239521f3 2005 if (sops == NULL)
1da177e4
LT
2006 return -ENOMEM;
2007 }
4ce33ec2 2008
239521f3
MS
2009 if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
2010 error = -EFAULT;
1da177e4
LT
2011 goto out_free;
2012 }
4ce33ec2 2013
1da177e4 2014 if (timeout) {
44ee4546
AV
2015 if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
2016 timeout->tv_nsec >= 1000000000L) {
1da177e4
LT
2017 error = -EINVAL;
2018 goto out_free;
2019 }
3ef56dc2 2020 jiffies_left = timespec64_to_jiffies(timeout);
1da177e4 2021 }
4ce33ec2 2022
1da177e4
LT
2023 max = 0;
2024 for (sop = sops; sop < sops + nsops; sop++) {
4ce33ec2
DB
2025 unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
2026
1da177e4
LT
2027 if (sop->sem_num >= max)
2028 max = sop->sem_num;
2029 if (sop->sem_flg & SEM_UNDO)
4ce33ec2
DB
2030 undos = true;
2031 if (dup & mask) {
2032 /*
2033 * There was a previous alter access that appears
2034 * to have accessed the same semaphore, thus use
2035 * the dupsop logic. "appears", because the detection
2036 * can only check % BITS_PER_LONG.
2037 */
2038 dupsop = true;
2039 }
2040 if (sop->sem_op != 0) {
2041 alter = true;
2042 dup |= mask;
2043 }
1da177e4 2044 }
1da177e4 2045
1da177e4 2046 if (undos) {
6062a8dc 2047 /* On success, find_alloc_undo takes the rcu_read_lock */
4daa28f6 2048 un = find_alloc_undo(ns, semid);
1da177e4
LT
2049 if (IS_ERR(un)) {
2050 error = PTR_ERR(un);
2051 goto out_free;
2052 }
6062a8dc 2053 } else {
1da177e4 2054 un = NULL;
6062a8dc
RR
2055 rcu_read_lock();
2056 }
1da177e4 2057
16df3674 2058 sma = sem_obtain_object_check(ns, semid);
023a5355 2059 if (IS_ERR(sma)) {
6062a8dc 2060 rcu_read_unlock();
023a5355 2061 error = PTR_ERR(sma);
1da177e4 2062 goto out_free;
023a5355
ND
2063 }
2064
16df3674 2065 error = -EFBIG;
248e7357
DB
2066 if (max >= sma->sem_nsems) {
2067 rcu_read_unlock();
2068 goto out_free;
2069 }
16df3674
DB
2070
2071 error = -EACCES;
248e7357
DB
2072 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2073 rcu_read_unlock();
2074 goto out_free;
2075 }
16df3674 2076
aefad959 2077 error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
248e7357
DB
2078 if (error) {
2079 rcu_read_unlock();
2080 goto out_free;
2081 }
16df3674 2082
6e224f94
MS
2083 error = -EIDRM;
2084 locknum = sem_lock(sma, sops, nsops);
0f3d2b01
RA
2085 /*
2086 * We eventually might perform the following check in a lockless
2087 * fashion, considering ipc_valid_object() locking constraints.
2088 * If nsops == 1 and there is no contention for sem_perm.lock, then
2089 * only a per-semaphore lock is held and it's OK to proceed with the
2090 * check below. More details on the fine grained locking scheme
2091 * entangled here and why it's RMID race safe on comments at sem_lock()
2092 */
2093 if (!ipc_valid_object(&sma->sem_perm))
6e224f94 2094 goto out_unlock_free;
1da177e4 2095 /*
4daa28f6 2096 * semid identifiers are not unique - find_alloc_undo may have
1da177e4 2097 * allocated an undo structure, it was invalidated by an RMID
4daa28f6 2098 * and now a new array with received the same id. Check and fail.
25985edc 2099 * This case can be detected checking un->semid. The existence of
380af1b3 2100 * "un" itself is guaranteed by rcu.
1da177e4 2101 */
6062a8dc
RR
2102 if (un && un->semid == -1)
2103 goto out_unlock_free;
4daa28f6 2104
d198cd6d
MS
2105 queue.sops = sops;
2106 queue.nsops = nsops;
2107 queue.undo = un;
51d6f263 2108 queue.pid = task_tgid(current);
d198cd6d 2109 queue.alter = alter;
4ce33ec2 2110 queue.dupsop = dupsop;
d198cd6d
MS
2111
2112 error = perform_atomic_semop(sma, &queue);
b1989a3d 2113 if (error == 0) { /* non-blocking successful path */
9ae949fa
DB
2114 DEFINE_WAKE_Q(wake_q);
2115
2116 /*
2117 * If the operation was successful, then do
0e8c6656
MS
2118 * the required updates.
2119 */
2120 if (alter)
9ae949fa 2121 do_smart_update(sma, sops, nsops, 1, &wake_q);
0e8c6656
MS
2122 else
2123 set_semotime(sma, sops);
9ae949fa
DB
2124
2125 sem_unlock(sma, locknum);
2126 rcu_read_unlock();
2127 wake_up_q(&wake_q);
2128
2129 goto out_free;
1da177e4 2130 }
9ae949fa 2131 if (error < 0) /* non-blocking error path */
0e8c6656 2132 goto out_unlock_free;
1da177e4 2133
9ae949fa
DB
2134 /*
2135 * We need to sleep on this operation, so we put the current
1da177e4
LT
2136 * task into the pending queue and go to sleep.
2137 */
b97e820f
MS
2138 if (nsops == 1) {
2139 struct sem *curr;
ec67aaa4
DB
2140 int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2141 curr = &sma->sems[idx];
b97e820f 2142
f269f40a
MS
2143 if (alter) {
2144 if (sma->complex_count) {
2145 list_add_tail(&queue.list,
2146 &sma->pending_alter);
2147 } else {
2148
2149 list_add_tail(&queue.list,
2150 &curr->pending_alter);
2151 }
2152 } else {
1a82e9e1 2153 list_add_tail(&queue.list, &curr->pending_const);
f269f40a 2154 }
b97e820f 2155 } else {
f269f40a
MS
2156 if (!sma->complex_count)
2157 merge_queues(sma);
2158
9f1bc2c9 2159 if (alter)
1a82e9e1 2160 list_add_tail(&queue.list, &sma->pending_alter);
9f1bc2c9 2161 else
1a82e9e1
MS
2162 list_add_tail(&queue.list, &sma->pending_const);
2163
b97e820f
MS
2164 sma->complex_count++;
2165 }
2166
b5fa01a2 2167 do {
8116b54e 2168 /* memory ordering ensured by the lock in sem_lock() */
f075faa3 2169 WRITE_ONCE(queue.status, -EINTR);
b5fa01a2 2170 queue.sleeper = current;
0b0577f6 2171
8116b54e 2172 /* memory ordering is ensured by the lock in sem_lock() */
b5fa01a2
DB
2173 __set_current_state(TASK_INTERRUPTIBLE);
2174 sem_unlock(sma, locknum);
2175 rcu_read_unlock();
1da177e4 2176
b5fa01a2
DB
2177 if (timeout)
2178 jiffies_left = schedule_timeout(jiffies_left);
2179 else
2180 schedule();
1da177e4 2181
9ae949fa 2182 /*
b5fa01a2
DB
2183 * fastpath: the semop has completed, either successfully or
2184 * not, from the syscall pov, is quite irrelevant to us at this
2185 * point; we're done.
2186 *
2187 * We _do_ care, nonetheless, about being awoken by a signal or
2188 * spuriously. The queue.status is checked again in the
2189 * slowpath (aka after taking sem_lock), such that we can detect
2190 * scenarios where we were awakened externally, during the
2191 * window between wake_q_add() and wake_up_q().
c61284e9 2192 */
b5fa01a2
DB
2193 error = READ_ONCE(queue.status);
2194 if (error != -EINTR) {
8116b54e
MS
2195 /* see SEM_BARRIER_2 for purpose/pairing */
2196 smp_acquire__after_ctrl_dep();
b5fa01a2
DB
2197 goto out_free;
2198 }
d694ad62 2199
b5fa01a2 2200 rcu_read_lock();
c626bc46 2201 locknum = sem_lock(sma, sops, nsops);
1da177e4 2202
370b262c
DB
2203 if (!ipc_valid_object(&sma->sem_perm))
2204 goto out_unlock_free;
2205
8116b54e
MS
2206 /*
2207 * No necessity for any barrier: We are protect by sem_lock()
2208 */
370b262c 2209 error = READ_ONCE(queue.status);
1da177e4 2210
b5fa01a2
DB
2211 /*
2212 * If queue.status != -EINTR we are woken up by another process.
2213 * Leave without unlink_queue(), but with sem_unlock().
2214 */
2215 if (error != -EINTR)
2216 goto out_unlock_free;
0b0577f6 2217
b5fa01a2
DB
2218 /*
2219 * If an interrupt occurred we have to clean up the queue.
2220 */
2221 if (timeout && jiffies_left == 0)
2222 error = -EAGAIN;
2223 } while (error == -EINTR && !signal_pending(current)); /* spurious */
0b0577f6 2224
b97e820f 2225 unlink_queue(sma, &queue);
1da177e4
LT
2226
2227out_unlock_free:
6062a8dc 2228 sem_unlock(sma, locknum);
6d49dab8 2229 rcu_read_unlock();
1da177e4 2230out_free:
239521f3 2231 if (sops != fast_sops)
e4243b80 2232 kvfree(sops);
1da177e4
LT
2233 return error;
2234}
2235
41f4f0e2 2236long ksys_semtimedop(int semid, struct sembuf __user *tsops,
21fc538d 2237 unsigned int nsops, const struct __kernel_timespec __user *timeout)
44ee4546
AV
2238{
2239 if (timeout) {
3ef56dc2
DD
2240 struct timespec64 ts;
2241 if (get_timespec64(&ts, timeout))
44ee4546
AV
2242 return -EFAULT;
2243 return do_semtimedop(semid, tsops, nsops, &ts);
2244 }
2245 return do_semtimedop(semid, tsops, nsops, NULL);
2246}
2247
41f4f0e2 2248SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
21fc538d 2249 unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
41f4f0e2
DB
2250{
2251 return ksys_semtimedop(semid, tsops, nsops, timeout);
2252}
2253
b0d17578 2254#ifdef CONFIG_COMPAT_32BIT_TIME
41f4f0e2
DB
2255long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2256 unsigned int nsops,
9afc5eee 2257 const struct old_timespec32 __user *timeout)
44ee4546
AV
2258{
2259 if (timeout) {
3ef56dc2 2260 struct timespec64 ts;
9afc5eee 2261 if (get_old_timespec32(&ts, timeout))
44ee4546
AV
2262 return -EFAULT;
2263 return do_semtimedop(semid, tsems, nsops, &ts);
2264 }
2265 return do_semtimedop(semid, tsems, nsops, NULL);
2266}
41f4f0e2 2267
8dabe724 2268SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
41f4f0e2 2269 unsigned int, nsops,
9afc5eee 2270 const struct old_timespec32 __user *, timeout)
41f4f0e2
DB
2271{
2272 return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2273}
44ee4546
AV
2274#endif
2275
d5460c99
HC
2276SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2277 unsigned, nsops)
1da177e4 2278{
44ee4546 2279 return do_semtimedop(semid, tsops, nsops, NULL);
1da177e4
LT
2280}
2281
2282/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2283 * parent and child tasks.
1da177e4
LT
2284 */
2285
2286int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2287{
2288 struct sem_undo_list *undo_list;
2289 int error;
2290
2291 if (clone_flags & CLONE_SYSVSEM) {
2292 error = get_undo_list(&undo_list);
2293 if (error)
2294 return error;
f74370b8 2295 refcount_inc(&undo_list->refcnt);
1da177e4 2296 tsk->sysvsem.undo_list = undo_list;
46c0a8ca 2297 } else
1da177e4
LT
2298 tsk->sysvsem.undo_list = NULL;
2299
2300 return 0;
2301}
2302
2303/*
2304 * add semadj values to semaphores, free undo structures.
2305 * undo structures are not freed when semaphore arrays are destroyed
2306 * so some of them may be out of date.
2307 * IMPLEMENTATION NOTE: There is some confusion over whether the
2308 * set of adjustments that needs to be done should be done in an atomic
2309 * manner or not. That is, if we are attempting to decrement the semval
2310 * should we queue up and wait until we can do so legally?
2311 * The original implementation attempted to do this (queue and wait).
2312 * The current implementation does not do so. The POSIX standard
2313 * and SVID should be consulted to determine what behavior is mandated.
2314 */
2315void exit_sem(struct task_struct *tsk)
2316{
4daa28f6 2317 struct sem_undo_list *ulp;
1da177e4 2318
4daa28f6
MS
2319 ulp = tsk->sysvsem.undo_list;
2320 if (!ulp)
1da177e4 2321 return;
9edff4ab 2322 tsk->sysvsem.undo_list = NULL;
1da177e4 2323
f74370b8 2324 if (!refcount_dec_and_test(&ulp->refcnt))
1da177e4
LT
2325 return;
2326
380af1b3 2327 for (;;) {
1da177e4 2328 struct sem_array *sma;
380af1b3 2329 struct sem_undo *un;
6062a8dc 2330 int semid, i;
9ae949fa 2331 DEFINE_WAKE_Q(wake_q);
4daa28f6 2332
2a1613a5
NB
2333 cond_resched();
2334
380af1b3 2335 rcu_read_lock();
05725f7e
JP
2336 un = list_entry_rcu(ulp->list_proc.next,
2337 struct sem_undo, list_proc);
602b8593
HK
2338 if (&un->list_proc == &ulp->list_proc) {
2339 /*
2340 * We must wait for freeary() before freeing this ulp,
2341 * in case we raced with last sem_undo. There is a small
2342 * possibility where we exit while freeary() didn't
2343 * finish unlocking sem_undo_list.
2344 */
e0892e08
PM
2345 spin_lock(&ulp->lock);
2346 spin_unlock(&ulp->lock);
602b8593
HK
2347 rcu_read_unlock();
2348 break;
2349 }
2350 spin_lock(&ulp->lock);
2351 semid = un->semid;
2352 spin_unlock(&ulp->lock);
4daa28f6 2353
602b8593 2354 /* exit_sem raced with IPC_RMID, nothing to do */
6062a8dc
RR
2355 if (semid == -1) {
2356 rcu_read_unlock();
602b8593 2357 continue;
6062a8dc 2358 }
1da177e4 2359
602b8593 2360 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
380af1b3 2361 /* exit_sem raced with IPC_RMID, nothing to do */
6062a8dc
RR
2362 if (IS_ERR(sma)) {
2363 rcu_read_unlock();
380af1b3 2364 continue;
6062a8dc 2365 }
1da177e4 2366
6062a8dc 2367 sem_lock(sma, NULL, -1);
6e224f94 2368 /* exit_sem raced with IPC_RMID, nothing to do */
0f3d2b01 2369 if (!ipc_valid_object(&sma->sem_perm)) {
6e224f94
MS
2370 sem_unlock(sma, -1);
2371 rcu_read_unlock();
2372 continue;
2373 }
bf17bb71 2374 un = __lookup_undo(ulp, semid);
380af1b3
MS
2375 if (un == NULL) {
2376 /* exit_sem raced with IPC_RMID+semget() that created
2377 * exactly the same semid. Nothing to do.
2378 */
6062a8dc 2379 sem_unlock(sma, -1);
6d49dab8 2380 rcu_read_unlock();
380af1b3
MS
2381 continue;
2382 }
2383
2384 /* remove un from the linked lists */
cf9d5d78 2385 ipc_assert_locked_object(&sma->sem_perm);
4daa28f6
MS
2386 list_del(&un->list_id);
2387
edf28f40 2388 spin_lock(&ulp->lock);
380af1b3 2389 list_del_rcu(&un->list_proc);
edf28f40 2390 spin_unlock(&ulp->lock);
380af1b3 2391
4daa28f6
MS
2392 /* perform adjustments registered in un */
2393 for (i = 0; i < sma->sem_nsems; i++) {
1a233956 2394 struct sem *semaphore = &sma->sems[i];
4daa28f6
MS
2395 if (un->semadj[i]) {
2396 semaphore->semval += un->semadj[i];
1da177e4
LT
2397 /*
2398 * Range checks of the new semaphore value,
2399 * not defined by sus:
2400 * - Some unices ignore the undo entirely
2401 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
2402 * - some cap the value (e.g. FreeBSD caps
2403 * at 0, but doesn't enforce SEMVMX)
2404 *
2405 * Linux caps the semaphore value, both at 0
2406 * and at SEMVMX.
2407 *
239521f3 2408 * Manfred <manfred@colorfullife.com>
1da177e4 2409 */
5f921ae9
IM
2410 if (semaphore->semval < 0)
2411 semaphore->semval = 0;
2412 if (semaphore->semval > SEMVMX)
2413 semaphore->semval = SEMVMX;
51d6f263 2414 ipc_update_pid(&semaphore->sempid, task_tgid(current));
1da177e4
LT
2415 }
2416 }
1da177e4 2417 /* maybe some queued-up processes were waiting for this */
9ae949fa 2418 do_smart_update(sma, NULL, 0, 1, &wake_q);
6062a8dc 2419 sem_unlock(sma, -1);
6d49dab8 2420 rcu_read_unlock();
9ae949fa 2421 wake_up_q(&wake_q);
380af1b3 2422
693a8b6e 2423 kfree_rcu(un, rcu);
1da177e4 2424 }
4daa28f6 2425 kfree(ulp);
1da177e4
LT
2426}
2427
2428#ifdef CONFIG_PROC_FS
19b4946c 2429static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1da177e4 2430{
1efdb69b 2431 struct user_namespace *user_ns = seq_user_ns(s);
ade9f91b
KC
2432 struct kern_ipc_perm *ipcp = it;
2433 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
e54d02b2 2434 time64_t sem_otime;
d12e1e50 2435
d8c63376
MS
2436 /*
2437 * The proc interface isn't aware of sem_lock(), it calls
2438 * ipc_lock_object() directly (in sysvipc_find_ipc).
5864a2fd
MS
2439 * In order to stay compatible with sem_lock(), we must
2440 * enter / leave complex_mode.
d8c63376 2441 */
5864a2fd 2442 complexmode_enter(sma);
d8c63376 2443
d12e1e50 2444 sem_otime = get_semotime(sma);
19b4946c 2445
7f032d6e 2446 seq_printf(s,
e54d02b2 2447 "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
7f032d6e
JP
2448 sma->sem_perm.key,
2449 sma->sem_perm.id,
2450 sma->sem_perm.mode,
2451 sma->sem_nsems,
2452 from_kuid_munged(user_ns, sma->sem_perm.uid),
2453 from_kgid_munged(user_ns, sma->sem_perm.gid),
2454 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2455 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2456 sem_otime,
2457 sma->sem_ctime);
2458
5864a2fd
MS
2459 complexmode_tryleave(sma);
2460
7f032d6e 2461 return 0;
1da177e4
LT
2462}
2463#endif