1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 1992 Krishna Balasubramanian
5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * SMP-threaded, sysctl's added
10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
11 * Enforced range limit on SEM_UNDO
12 * (c) 2001 Red Hat Inc
14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
16 * Further wakeup optimizations, documentation
17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
19 * support for audit of ipc object properties and permission changes
20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
24 * Pavel Emelianov <xemul@openvz.org>
26 * Implementation notes: (May 2010)
27 * This file implements System V semaphores.
29 * User space visible behavior:
30 * - FIFO ordering for semop() operations (just FIFO, not starvation
32 * - multiple semaphore operations that alter the same semaphore in
33 * one semop() are handled.
34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
37 * - undo adjustments at process exit are limited to 0..SEMVMX.
38 * - namespace are supported.
39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
40 * to /proc/sys/kernel/sem.
41 * - statistics about the usage are reported in /proc/sysvipc/sem.
45 * - all global variables are read-mostly.
46 * - semop() calls and semctl(RMID) are synchronized by RCU.
47 * - most operations do write operations (actually: spin_lock calls) to
48 * the per-semaphore array structure.
49 * Thus: Perfect SMP scaling between independent semaphore arrays.
50 * If multiple semaphores in one array are used, then cache line
51 * trashing on the semaphore array spinlock will limit the scaling.
52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
53 * - the task that performs a successful semop() scans the list of all
54 * sleeping tasks and completes any pending operations that can be fulfilled.
55 * Semaphores are actively given to waiting tasks (necessary for FIFO).
56 * (see update_queue())
57 * - To improve the scalability, the actual wake-up calls are performed after
58 * dropping all locks. (see wake_up_sem_queue_prepare())
59 * - All work is done by the waker, the woken up task does not have to do
60 * anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
62 * have been destroyed already by a semctl(RMID).
63 * - UNDO values are stored in an array (one per process and per
64 * semaphore array, lazily allocated). For backwards compatibility, multiple
65 * modes for the UNDO variables are supported (per process, per thread)
66 * (see copy_semundo, CLONE_SYSVSEM)
67 * - There are two lists of the pending operations: a per-array list
68 * and per-semaphore list (stored in the array). This allows to achieve FIFO
69 * ordering without always scanning all pending operations.
70 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
73 #include <linux/compat.h>
74 #include <linux/slab.h>
75 #include <linux/spinlock.h>
76 #include <linux/init.h>
77 #include <linux/proc_fs.h>
78 #include <linux/time.h>
79 #include <linux/security.h>
80 #include <linux/syscalls.h>
81 #include <linux/audit.h>
82 #include <linux/capability.h>
83 #include <linux/seq_file.h>
84 #include <linux/rwsem.h>
85 #include <linux/nsproxy.h>
86 #include <linux/ipc_namespace.h>
87 #include <linux/sched/wake_q.h>
89 #include <linux/uaccess.h>
92 /* One semaphore structure for each semaphore in the system. */
94 int semval
; /* current value */
96 * PID of the process that last modified the semaphore. For
97 * Linux, specifically these are:
99 * - semctl, via SETVAL and SETALL.
100 * - at task exit when performing undo adjustments (see exit_sem).
103 spinlock_t lock
; /* spinlock for fine-grained semtimedop */
104 struct list_head pending_alter
; /* pending single-sop operations */
105 /* that alter the semaphore */
106 struct list_head pending_const
; /* pending single-sop operations */
107 /* that do not alter the semaphore*/
108 time64_t sem_otime
; /* candidate for sem_otime */
109 } ____cacheline_aligned_in_smp
;
111 /* One sem_array data structure for each set of semaphores in the system. */
113 struct kern_ipc_perm sem_perm
; /* permissions .. see ipc.h */
114 time64_t sem_ctime
; /* create/last semctl() time */
115 struct list_head pending_alter
; /* pending operations */
116 /* that alter the array */
117 struct list_head pending_const
; /* pending complex operations */
118 /* that do not alter semvals */
119 struct list_head list_id
; /* undo requests on this array */
120 int sem_nsems
; /* no. of semaphores in array */
121 int complex_count
; /* pending complex operations */
122 unsigned int use_global_lock
;/* >0: global lock required */
125 } __randomize_layout
;
127 /* One queue for each sleeping process in the system. */
129 struct list_head list
; /* queue of pending operations */
130 struct task_struct
*sleeper
; /* this process */
131 struct sem_undo
*undo
; /* undo structure */
132 struct pid
*pid
; /* process id of requesting process */
133 int status
; /* completion status of operation */
134 struct sembuf
*sops
; /* array of pending operations */
135 struct sembuf
*blocking
; /* the operation that blocked */
136 int nsops
; /* number of operations */
137 bool alter
; /* does *sops alter the array? */
138 bool dupsop
; /* sops on more than one sem_num */
141 /* Each task has a list of undo requests. They are executed automatically
142 * when the process exits.
145 struct list_head list_proc
; /* per-process list: *
146 * all undos from one process
148 struct rcu_head rcu
; /* rcu struct for sem_undo */
149 struct sem_undo_list
*ulp
; /* back ptr to sem_undo_list */
150 struct list_head list_id
; /* per semaphore array list:
151 * all undos for one array */
152 int semid
; /* semaphore set identifier */
153 short *semadj
; /* array of adjustments */
154 /* one per semaphore */
157 /* sem_undo_list controls shared access to the list of sem_undo structures
158 * that may be shared among all a CLONE_SYSVSEM task group.
160 struct sem_undo_list
{
163 struct list_head list_proc
;
167 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
169 static int newary(struct ipc_namespace
*, struct ipc_params
*);
170 static void freeary(struct ipc_namespace
*, struct kern_ipc_perm
*);
171 #ifdef CONFIG_PROC_FS
172 static int sysvipc_sem_proc_show(struct seq_file
*s
, void *it
);
175 #define SEMMSL_FAST 256 /* 512 bytes on stack */
176 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
179 * Switching from the mode suitable for simple ops
180 * to the mode for complex ops is costly. Therefore:
181 * use some hysteresis
183 #define USE_GLOBAL_LOCK_HYSTERESIS 10
187 * a) global sem_lock() for read/write
189 * sem_array.complex_count,
190 * sem_array.pending{_alter,_const},
193 * b) global or semaphore sem_lock() for read/write:
194 * sem_array.sems[i].pending_{const,alter}:
197 * sem_undo_list.list_proc:
198 * * undo_list->lock for write
201 * * global sem_lock() for write
202 * * either local or global sem_lock() for read.
205 * Most ordering is enforced by using spin_lock() and spin_unlock().
206 * The special case is use_global_lock:
207 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
208 * using smp_store_release().
209 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
210 * smp_load_acquire().
211 * Setting it from 0 to non-zero must be ordered with regards to
212 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
213 * is inside a spin_lock() and after a write from 0 to non-zero a
214 * spin_lock()+spin_unlock() is done.
217 #define sc_semmsl sem_ctls[0]
218 #define sc_semmns sem_ctls[1]
219 #define sc_semopm sem_ctls[2]
220 #define sc_semmni sem_ctls[3]
222 int sem_init_ns(struct ipc_namespace
*ns
)
224 ns
->sc_semmsl
= SEMMSL
;
225 ns
->sc_semmns
= SEMMNS
;
226 ns
->sc_semopm
= SEMOPM
;
227 ns
->sc_semmni
= SEMMNI
;
229 return ipc_init_ids(&ns
->ids
[IPC_SEM_IDS
]);
233 void sem_exit_ns(struct ipc_namespace
*ns
)
235 free_ipcs(ns
, &sem_ids(ns
), freeary
);
236 idr_destroy(&ns
->ids
[IPC_SEM_IDS
].ipcs_idr
);
237 rhashtable_destroy(&ns
->ids
[IPC_SEM_IDS
].key_ht
);
241 int __init
sem_init(void)
243 const int err
= sem_init_ns(&init_ipc_ns
);
245 ipc_init_proc_interface("sysvipc/sem",
246 " key semid perms nsems uid gid cuid cgid otime ctime\n",
247 IPC_SEM_IDS
, sysvipc_sem_proc_show
);
252 * unmerge_queues - unmerge queues, if possible.
253 * @sma: semaphore array
255 * The function unmerges the wait queues if complex_count is 0.
256 * It must be called prior to dropping the global semaphore array lock.
258 static void unmerge_queues(struct sem_array
*sma
)
260 struct sem_queue
*q
, *tq
;
262 /* complex operations still around? */
263 if (sma
->complex_count
)
266 * We will switch back to simple mode.
267 * Move all pending operation back into the per-semaphore
270 list_for_each_entry_safe(q
, tq
, &sma
->pending_alter
, list
) {
272 curr
= &sma
->sems
[q
->sops
[0].sem_num
];
274 list_add_tail(&q
->list
, &curr
->pending_alter
);
276 INIT_LIST_HEAD(&sma
->pending_alter
);
280 * merge_queues - merge single semop queues into global queue
281 * @sma: semaphore array
283 * This function merges all per-semaphore queues into the global queue.
284 * It is necessary to achieve FIFO ordering for the pending single-sop
285 * operations when a multi-semop operation must sleep.
286 * Only the alter operations must be moved, the const operations can stay.
288 static void merge_queues(struct sem_array
*sma
)
291 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
292 struct sem
*sem
= &sma
->sems
[i
];
294 list_splice_init(&sem
->pending_alter
, &sma
->pending_alter
);
298 static void sem_rcu_free(struct rcu_head
*head
)
300 struct kern_ipc_perm
*p
= container_of(head
, struct kern_ipc_perm
, rcu
);
301 struct sem_array
*sma
= container_of(p
, struct sem_array
, sem_perm
);
303 security_sem_free(&sma
->sem_perm
);
308 * Enter the mode suitable for non-simple operations:
309 * Caller must own sem_perm.lock.
311 static void complexmode_enter(struct sem_array
*sma
)
316 if (sma
->use_global_lock
> 0) {
318 * We are already in global lock mode.
319 * Nothing to do, just reset the
320 * counter until we return to simple mode.
322 sma
->use_global_lock
= USE_GLOBAL_LOCK_HYSTERESIS
;
325 sma
->use_global_lock
= USE_GLOBAL_LOCK_HYSTERESIS
;
327 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
329 spin_lock(&sem
->lock
);
330 spin_unlock(&sem
->lock
);
335 * Try to leave the mode that disallows simple operations:
336 * Caller must own sem_perm.lock.
338 static void complexmode_tryleave(struct sem_array
*sma
)
340 if (sma
->complex_count
) {
341 /* Complex ops are sleeping.
342 * We must stay in complex mode
346 if (sma
->use_global_lock
== 1) {
348 * Immediately after setting use_global_lock to 0,
349 * a simple op can start. Thus: all memory writes
350 * performed by the current operation must be visible
351 * before we set use_global_lock to 0.
353 smp_store_release(&sma
->use_global_lock
, 0);
355 sma
->use_global_lock
--;
359 #define SEM_GLOBAL_LOCK (-1)
361 * If the request contains only one semaphore operation, and there are
362 * no complex transactions pending, lock only the semaphore involved.
363 * Otherwise, lock the entire semaphore array, since we either have
364 * multiple semaphores in our own semops, or we need to look at
365 * semaphores from other pending complex operations.
367 static inline int sem_lock(struct sem_array
*sma
, struct sembuf
*sops
,
373 /* Complex operation - acquire a full lock */
374 ipc_lock_object(&sma
->sem_perm
);
376 /* Prevent parallel simple ops */
377 complexmode_enter(sma
);
378 return SEM_GLOBAL_LOCK
;
382 * Only one semaphore affected - try to optimize locking.
383 * Optimized locking is possible if no complex operation
384 * is either enqueued or processed right now.
386 * Both facts are tracked by use_global_mode.
388 sem
= &sma
->sems
[sops
->sem_num
];
391 * Initial check for use_global_lock. Just an optimization,
392 * no locking, no memory barrier.
394 if (!sma
->use_global_lock
) {
396 * It appears that no complex operation is around.
397 * Acquire the per-semaphore lock.
399 spin_lock(&sem
->lock
);
401 /* pairs with smp_store_release() */
402 if (!smp_load_acquire(&sma
->use_global_lock
)) {
403 /* fast path successful! */
404 return sops
->sem_num
;
406 spin_unlock(&sem
->lock
);
409 /* slow path: acquire the full lock */
410 ipc_lock_object(&sma
->sem_perm
);
412 if (sma
->use_global_lock
== 0) {
414 * The use_global_lock mode ended while we waited for
415 * sma->sem_perm.lock. Thus we must switch to locking
417 * Unlike in the fast path, there is no need to recheck
418 * sma->use_global_lock after we have acquired sem->lock:
419 * We own sma->sem_perm.lock, thus use_global_lock cannot
422 spin_lock(&sem
->lock
);
424 ipc_unlock_object(&sma
->sem_perm
);
425 return sops
->sem_num
;
428 * Not a false alarm, thus continue to use the global lock
429 * mode. No need for complexmode_enter(), this was done by
430 * the caller that has set use_global_mode to non-zero.
432 return SEM_GLOBAL_LOCK
;
436 static inline void sem_unlock(struct sem_array
*sma
, int locknum
)
438 if (locknum
== SEM_GLOBAL_LOCK
) {
440 complexmode_tryleave(sma
);
441 ipc_unlock_object(&sma
->sem_perm
);
443 struct sem
*sem
= &sma
->sems
[locknum
];
444 spin_unlock(&sem
->lock
);
449 * sem_lock_(check_) routines are called in the paths where the rwsem
452 * The caller holds the RCU read lock.
454 static inline struct sem_array
*sem_obtain_object(struct ipc_namespace
*ns
, int id
)
456 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_idr(&sem_ids(ns
), id
);
459 return ERR_CAST(ipcp
);
461 return container_of(ipcp
, struct sem_array
, sem_perm
);
464 static inline struct sem_array
*sem_obtain_object_check(struct ipc_namespace
*ns
,
467 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_check(&sem_ids(ns
), id
);
470 return ERR_CAST(ipcp
);
472 return container_of(ipcp
, struct sem_array
, sem_perm
);
475 static inline void sem_lock_and_putref(struct sem_array
*sma
)
477 sem_lock(sma
, NULL
, -1);
478 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
481 static inline void sem_rmid(struct ipc_namespace
*ns
, struct sem_array
*s
)
483 ipc_rmid(&sem_ids(ns
), &s
->sem_perm
);
486 static struct sem_array
*sem_alloc(size_t nsems
)
488 struct sem_array
*sma
;
491 if (nsems
> (INT_MAX
- sizeof(*sma
)) / sizeof(sma
->sems
[0]))
494 size
= sizeof(*sma
) + nsems
* sizeof(sma
->sems
[0]);
495 sma
= kvmalloc(size
, GFP_KERNEL
);
499 memset(sma
, 0, size
);
505 * newary - Create a new semaphore set
507 * @params: ptr to the structure that contains key, semflg and nsems
509 * Called with sem_ids.rwsem held (as a writer)
511 static int newary(struct ipc_namespace
*ns
, struct ipc_params
*params
)
514 struct sem_array
*sma
;
515 key_t key
= params
->key
;
516 int nsems
= params
->u
.nsems
;
517 int semflg
= params
->flg
;
522 if (ns
->used_sems
+ nsems
> ns
->sc_semmns
)
525 sma
= sem_alloc(nsems
);
529 sma
->sem_perm
.mode
= (semflg
& S_IRWXUGO
);
530 sma
->sem_perm
.key
= key
;
532 sma
->sem_perm
.security
= NULL
;
533 retval
= security_sem_alloc(&sma
->sem_perm
);
539 for (i
= 0; i
< nsems
; i
++) {
540 INIT_LIST_HEAD(&sma
->sems
[i
].pending_alter
);
541 INIT_LIST_HEAD(&sma
->sems
[i
].pending_const
);
542 spin_lock_init(&sma
->sems
[i
].lock
);
545 sma
->complex_count
= 0;
546 sma
->use_global_lock
= USE_GLOBAL_LOCK_HYSTERESIS
;
547 INIT_LIST_HEAD(&sma
->pending_alter
);
548 INIT_LIST_HEAD(&sma
->pending_const
);
549 INIT_LIST_HEAD(&sma
->list_id
);
550 sma
->sem_nsems
= nsems
;
551 sma
->sem_ctime
= ktime_get_real_seconds();
553 /* ipc_addid() locks sma upon success. */
554 retval
= ipc_addid(&sem_ids(ns
), &sma
->sem_perm
, ns
->sc_semmni
);
556 call_rcu(&sma
->sem_perm
.rcu
, sem_rcu_free
);
559 ns
->used_sems
+= nsems
;
564 return sma
->sem_perm
.id
;
569 * Called with sem_ids.rwsem and ipcp locked.
571 static inline int sem_more_checks(struct kern_ipc_perm
*ipcp
,
572 struct ipc_params
*params
)
574 struct sem_array
*sma
;
576 sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
577 if (params
->u
.nsems
> sma
->sem_nsems
)
583 long ksys_semget(key_t key
, int nsems
, int semflg
)
585 struct ipc_namespace
*ns
;
586 static const struct ipc_ops sem_ops
= {
588 .associate
= security_sem_associate
,
589 .more_checks
= sem_more_checks
,
591 struct ipc_params sem_params
;
593 ns
= current
->nsproxy
->ipc_ns
;
595 if (nsems
< 0 || nsems
> ns
->sc_semmsl
)
598 sem_params
.key
= key
;
599 sem_params
.flg
= semflg
;
600 sem_params
.u
.nsems
= nsems
;
602 return ipcget(ns
, &sem_ids(ns
), &sem_ops
, &sem_params
);
605 SYSCALL_DEFINE3(semget
, key_t
, key
, int, nsems
, int, semflg
)
607 return ksys_semget(key
, nsems
, semflg
);
611 * perform_atomic_semop[_slow] - Attempt to perform semaphore
612 * operations on a given array.
613 * @sma: semaphore array
614 * @q: struct sem_queue that describes the operation
616 * Caller blocking are as follows, based the value
617 * indicated by the semaphore operation (sem_op):
619 * (1) >0 never blocks.
620 * (2) 0 (wait-for-zero operation): semval is non-zero.
621 * (3) <0 attempting to decrement semval to a value smaller than zero.
623 * Returns 0 if the operation was possible.
624 * Returns 1 if the operation is impossible, the caller must sleep.
625 * Returns <0 for error codes.
627 static int perform_atomic_semop_slow(struct sem_array
*sma
, struct sem_queue
*q
)
629 int result
, sem_op
, nsops
;
640 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
641 curr
= &sma
->sems
[sop
->sem_num
];
642 sem_op
= sop
->sem_op
;
643 result
= curr
->semval
;
645 if (!sem_op
&& result
)
654 if (sop
->sem_flg
& SEM_UNDO
) {
655 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
656 /* Exceeding the undo range is an error. */
657 if (undo
< (-SEMAEM
- 1) || undo
> SEMAEM
)
659 un
->semadj
[sop
->sem_num
] = undo
;
662 curr
->semval
= result
;
667 while (sop
>= sops
) {
668 ipc_update_pid(&sma
->sems
[sop
->sem_num
].sempid
, pid
);
681 if (sop
->sem_flg
& IPC_NOWAIT
)
688 while (sop
>= sops
) {
689 sem_op
= sop
->sem_op
;
690 sma
->sems
[sop
->sem_num
].semval
-= sem_op
;
691 if (sop
->sem_flg
& SEM_UNDO
)
692 un
->semadj
[sop
->sem_num
] += sem_op
;
699 static int perform_atomic_semop(struct sem_array
*sma
, struct sem_queue
*q
)
701 int result
, sem_op
, nsops
;
711 if (unlikely(q
->dupsop
))
712 return perform_atomic_semop_slow(sma
, q
);
715 * We scan the semaphore set twice, first to ensure that the entire
716 * operation can succeed, therefore avoiding any pointless writes
717 * to shared memory and having to undo such changes in order to block
718 * until the operations can go through.
720 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
721 curr
= &sma
->sems
[sop
->sem_num
];
722 sem_op
= sop
->sem_op
;
723 result
= curr
->semval
;
725 if (!sem_op
&& result
)
726 goto would_block
; /* wait-for-zero */
735 if (sop
->sem_flg
& SEM_UNDO
) {
736 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
738 /* Exceeding the undo range is an error. */
739 if (undo
< (-SEMAEM
- 1) || undo
> SEMAEM
)
744 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
745 curr
= &sma
->sems
[sop
->sem_num
];
746 sem_op
= sop
->sem_op
;
747 result
= curr
->semval
;
749 if (sop
->sem_flg
& SEM_UNDO
) {
750 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
752 un
->semadj
[sop
->sem_num
] = undo
;
754 curr
->semval
+= sem_op
;
755 ipc_update_pid(&curr
->sempid
, q
->pid
);
762 return sop
->sem_flg
& IPC_NOWAIT
? -EAGAIN
: 1;
765 static inline void wake_up_sem_queue_prepare(struct sem_queue
*q
, int error
,
766 struct wake_q_head
*wake_q
)
768 wake_q_add(wake_q
, q
->sleeper
);
770 * Rely on the above implicit barrier, such that we can
771 * ensure that we hold reference to the task before setting
772 * q->status. Otherwise we could race with do_exit if the
773 * task is awoken by an external event before calling
776 WRITE_ONCE(q
->status
, error
);
779 static void unlink_queue(struct sem_array
*sma
, struct sem_queue
*q
)
783 sma
->complex_count
--;
786 /** check_restart(sma, q)
787 * @sma: semaphore array
788 * @q: the operation that just completed
790 * update_queue is O(N^2) when it restarts scanning the whole queue of
791 * waiting operations. Therefore this function checks if the restart is
792 * really necessary. It is called after a previously waiting operation
793 * modified the array.
794 * Note that wait-for-zero operations are handled without restart.
796 static inline int check_restart(struct sem_array
*sma
, struct sem_queue
*q
)
798 /* pending complex alter operations are too difficult to analyse */
799 if (!list_empty(&sma
->pending_alter
))
802 /* we were a sleeping complex operation. Too difficult */
806 /* It is impossible that someone waits for the new value:
807 * - complex operations always restart.
808 * - wait-for-zero are handled seperately.
809 * - q is a previously sleeping simple operation that
810 * altered the array. It must be a decrement, because
811 * simple increments never sleep.
812 * - If there are older (higher priority) decrements
813 * in the queue, then they have observed the original
814 * semval value and couldn't proceed. The operation
815 * decremented to value - thus they won't proceed either.
821 * wake_const_ops - wake up non-alter tasks
822 * @sma: semaphore array.
823 * @semnum: semaphore that was modified.
824 * @wake_q: lockless wake-queue head.
826 * wake_const_ops must be called after a semaphore in a semaphore array
827 * was set to 0. If complex const operations are pending, wake_const_ops must
828 * be called with semnum = -1, as well as with the number of each modified
830 * The tasks that must be woken up are added to @wake_q. The return code
831 * is stored in q->pid.
832 * The function returns 1 if at least one operation was completed successfully.
834 static int wake_const_ops(struct sem_array
*sma
, int semnum
,
835 struct wake_q_head
*wake_q
)
837 struct sem_queue
*q
, *tmp
;
838 struct list_head
*pending_list
;
839 int semop_completed
= 0;
842 pending_list
= &sma
->pending_const
;
844 pending_list
= &sma
->sems
[semnum
].pending_const
;
846 list_for_each_entry_safe(q
, tmp
, pending_list
, list
) {
847 int error
= perform_atomic_semop(sma
, q
);
851 /* operation completed, remove from queue & wakeup */
852 unlink_queue(sma
, q
);
854 wake_up_sem_queue_prepare(q
, error
, wake_q
);
859 return semop_completed
;
863 * do_smart_wakeup_zero - wakeup all wait for zero tasks
864 * @sma: semaphore array
865 * @sops: operations that were performed
866 * @nsops: number of operations
867 * @wake_q: lockless wake-queue head
869 * Checks all required queue for wait-for-zero operations, based
870 * on the actual changes that were performed on the semaphore array.
871 * The function returns 1 if at least one operation was completed successfully.
873 static int do_smart_wakeup_zero(struct sem_array
*sma
, struct sembuf
*sops
,
874 int nsops
, struct wake_q_head
*wake_q
)
877 int semop_completed
= 0;
880 /* first: the per-semaphore queues, if known */
882 for (i
= 0; i
< nsops
; i
++) {
883 int num
= sops
[i
].sem_num
;
885 if (sma
->sems
[num
].semval
== 0) {
887 semop_completed
|= wake_const_ops(sma
, num
, wake_q
);
892 * No sops means modified semaphores not known.
893 * Assume all were changed.
895 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
896 if (sma
->sems
[i
].semval
== 0) {
898 semop_completed
|= wake_const_ops(sma
, i
, wake_q
);
903 * If one of the modified semaphores got 0,
904 * then check the global queue, too.
907 semop_completed
|= wake_const_ops(sma
, -1, wake_q
);
909 return semop_completed
;
914 * update_queue - look for tasks that can be completed.
915 * @sma: semaphore array.
916 * @semnum: semaphore that was modified.
917 * @wake_q: lockless wake-queue head.
919 * update_queue must be called after a semaphore in a semaphore array
920 * was modified. If multiple semaphores were modified, update_queue must
921 * be called with semnum = -1, as well as with the number of each modified
923 * The tasks that must be woken up are added to @wake_q. The return code
924 * is stored in q->pid.
925 * The function internally checks if const operations can now succeed.
927 * The function return 1 if at least one semop was completed successfully.
929 static int update_queue(struct sem_array
*sma
, int semnum
, struct wake_q_head
*wake_q
)
931 struct sem_queue
*q
, *tmp
;
932 struct list_head
*pending_list
;
933 int semop_completed
= 0;
936 pending_list
= &sma
->pending_alter
;
938 pending_list
= &sma
->sems
[semnum
].pending_alter
;
941 list_for_each_entry_safe(q
, tmp
, pending_list
, list
) {
944 /* If we are scanning the single sop, per-semaphore list of
945 * one semaphore and that semaphore is 0, then it is not
946 * necessary to scan further: simple increments
947 * that affect only one entry succeed immediately and cannot
948 * be in the per semaphore pending queue, and decrements
949 * cannot be successful if the value is already 0.
951 if (semnum
!= -1 && sma
->sems
[semnum
].semval
== 0)
954 error
= perform_atomic_semop(sma
, q
);
956 /* Does q->sleeper still need to sleep? */
960 unlink_queue(sma
, q
);
966 do_smart_wakeup_zero(sma
, q
->sops
, q
->nsops
, wake_q
);
967 restart
= check_restart(sma
, q
);
970 wake_up_sem_queue_prepare(q
, error
, wake_q
);
974 return semop_completed
;
978 * set_semotime - set sem_otime
979 * @sma: semaphore array
980 * @sops: operations that modified the array, may be NULL
982 * sem_otime is replicated to avoid cache line trashing.
983 * This function sets one instance to the current time.
985 static void set_semotime(struct sem_array
*sma
, struct sembuf
*sops
)
988 sma
->sems
[0].sem_otime
= ktime_get_real_seconds();
990 sma
->sems
[sops
[0].sem_num
].sem_otime
=
991 ktime_get_real_seconds();
996 * do_smart_update - optimized update_queue
997 * @sma: semaphore array
998 * @sops: operations that were performed
999 * @nsops: number of operations
1000 * @otime: force setting otime
1001 * @wake_q: lockless wake-queue head
1003 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1004 * based on the actual changes that were performed on the semaphore array.
1005 * Note that the function does not do the actual wake-up: the caller is
1006 * responsible for calling wake_up_q().
1007 * It is safe to perform this call after dropping all locks.
1009 static void do_smart_update(struct sem_array
*sma
, struct sembuf
*sops
, int nsops
,
1010 int otime
, struct wake_q_head
*wake_q
)
1014 otime
|= do_smart_wakeup_zero(sma
, sops
, nsops
, wake_q
);
1016 if (!list_empty(&sma
->pending_alter
)) {
1017 /* semaphore array uses the global queue - just process it. */
1018 otime
|= update_queue(sma
, -1, wake_q
);
1022 * No sops, thus the modified semaphores are not
1025 for (i
= 0; i
< sma
->sem_nsems
; i
++)
1026 otime
|= update_queue(sma
, i
, wake_q
);
1029 * Check the semaphores that were increased:
1030 * - No complex ops, thus all sleeping ops are
1032 * - if we decreased the value, then any sleeping
1033 * semaphore ops wont be able to run: If the
1034 * previous value was too small, then the new
1035 * value will be too small, too.
1037 for (i
= 0; i
< nsops
; i
++) {
1038 if (sops
[i
].sem_op
> 0) {
1039 otime
|= update_queue(sma
,
1040 sops
[i
].sem_num
, wake_q
);
1046 set_semotime(sma
, sops
);
1050 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1052 static int check_qop(struct sem_array
*sma
, int semnum
, struct sem_queue
*q
,
1055 struct sembuf
*sop
= q
->blocking
;
1058 * Linux always (since 0.99.10) reported a task as sleeping on all
1059 * semaphores. This violates SUS, therefore it was changed to the
1060 * standard compliant behavior.
1061 * Give the administrators a chance to notice that an application
1062 * might misbehave because it relies on the Linux behavior.
1064 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1065 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1066 current
->comm
, task_pid_nr(current
));
1068 if (sop
->sem_num
!= semnum
)
1071 if (count_zero
&& sop
->sem_op
== 0)
1073 if (!count_zero
&& sop
->sem_op
< 0)
1079 /* The following counts are associated to each semaphore:
1080 * semncnt number of tasks waiting on semval being nonzero
1081 * semzcnt number of tasks waiting on semval being zero
1083 * Per definition, a task waits only on the semaphore of the first semop
1084 * that cannot proceed, even if additional operation would block, too.
1086 static int count_semcnt(struct sem_array
*sma
, ushort semnum
,
1089 struct list_head
*l
;
1090 struct sem_queue
*q
;
1094 /* First: check the simple operations. They are easy to evaluate */
1096 l
= &sma
->sems
[semnum
].pending_const
;
1098 l
= &sma
->sems
[semnum
].pending_alter
;
1100 list_for_each_entry(q
, l
, list
) {
1101 /* all task on a per-semaphore list sleep on exactly
1107 /* Then: check the complex operations. */
1108 list_for_each_entry(q
, &sma
->pending_alter
, list
) {
1109 semcnt
+= check_qop(sma
, semnum
, q
, count_zero
);
1112 list_for_each_entry(q
, &sma
->pending_const
, list
) {
1113 semcnt
+= check_qop(sma
, semnum
, q
, count_zero
);
1119 /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1120 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1121 * remains locked on exit.
1123 static void freeary(struct ipc_namespace
*ns
, struct kern_ipc_perm
*ipcp
)
1125 struct sem_undo
*un
, *tu
;
1126 struct sem_queue
*q
, *tq
;
1127 struct sem_array
*sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
1129 DEFINE_WAKE_Q(wake_q
);
1131 /* Free the existing undo structures for this semaphore set. */
1132 ipc_assert_locked_object(&sma
->sem_perm
);
1133 list_for_each_entry_safe(un
, tu
, &sma
->list_id
, list_id
) {
1134 list_del(&un
->list_id
);
1135 spin_lock(&un
->ulp
->lock
);
1137 list_del_rcu(&un
->list_proc
);
1138 spin_unlock(&un
->ulp
->lock
);
1142 /* Wake up all pending processes and let them fail with EIDRM. */
1143 list_for_each_entry_safe(q
, tq
, &sma
->pending_const
, list
) {
1144 unlink_queue(sma
, q
);
1145 wake_up_sem_queue_prepare(q
, -EIDRM
, &wake_q
);
1148 list_for_each_entry_safe(q
, tq
, &sma
->pending_alter
, list
) {
1149 unlink_queue(sma
, q
);
1150 wake_up_sem_queue_prepare(q
, -EIDRM
, &wake_q
);
1152 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
1153 struct sem
*sem
= &sma
->sems
[i
];
1154 list_for_each_entry_safe(q
, tq
, &sem
->pending_const
, list
) {
1155 unlink_queue(sma
, q
);
1156 wake_up_sem_queue_prepare(q
, -EIDRM
, &wake_q
);
1158 list_for_each_entry_safe(q
, tq
, &sem
->pending_alter
, list
) {
1159 unlink_queue(sma
, q
);
1160 wake_up_sem_queue_prepare(q
, -EIDRM
, &wake_q
);
1162 ipc_update_pid(&sem
->sempid
, NULL
);
1165 /* Remove the semaphore set from the IDR */
1167 sem_unlock(sma
, -1);
1171 ns
->used_sems
-= sma
->sem_nsems
;
1172 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1175 static unsigned long copy_semid_to_user(void __user
*buf
, struct semid64_ds
*in
, int version
)
1179 return copy_to_user(buf
, in
, sizeof(*in
));
1182 struct semid_ds out
;
1184 memset(&out
, 0, sizeof(out
));
1186 ipc64_perm_to_ipc_perm(&in
->sem_perm
, &out
.sem_perm
);
1188 out
.sem_otime
= in
->sem_otime
;
1189 out
.sem_ctime
= in
->sem_ctime
;
1190 out
.sem_nsems
= in
->sem_nsems
;
1192 return copy_to_user(buf
, &out
, sizeof(out
));
1199 static time64_t
get_semotime(struct sem_array
*sma
)
1204 res
= sma
->sems
[0].sem_otime
;
1205 for (i
= 1; i
< sma
->sem_nsems
; i
++) {
1206 time64_t to
= sma
->sems
[i
].sem_otime
;
1214 static int semctl_stat(struct ipc_namespace
*ns
, int semid
,
1215 int cmd
, struct semid64_ds
*semid64
)
1217 struct sem_array
*sma
;
1222 memset(semid64
, 0, sizeof(*semid64
));
1225 if (cmd
== SEM_STAT
|| cmd
== SEM_STAT_ANY
) {
1226 sma
= sem_obtain_object(ns
, semid
);
1231 id
= sma
->sem_perm
.id
;
1232 } else { /* IPC_STAT */
1233 sma
= sem_obtain_object_check(ns
, semid
);
1240 /* see comment for SHM_STAT_ANY */
1241 if (cmd
== SEM_STAT_ANY
)
1242 audit_ipc_obj(&sma
->sem_perm
);
1245 if (ipcperms(ns
, &sma
->sem_perm
, S_IRUGO
))
1249 err
= security_sem_semctl(&sma
->sem_perm
, cmd
);
1253 ipc_lock_object(&sma
->sem_perm
);
1255 if (!ipc_valid_object(&sma
->sem_perm
)) {
1256 ipc_unlock_object(&sma
->sem_perm
);
1261 kernel_to_ipc64_perm(&sma
->sem_perm
, &semid64
->sem_perm
);
1262 semotime
= get_semotime(sma
);
1263 semid64
->sem_otime
= semotime
;
1264 semid64
->sem_ctime
= sma
->sem_ctime
;
1265 #ifndef CONFIG_64BIT
1266 semid64
->sem_otime_high
= semotime
>> 32;
1267 semid64
->sem_ctime_high
= sma
->sem_ctime
>> 32;
1269 semid64
->sem_nsems
= sma
->sem_nsems
;
1271 ipc_unlock_object(&sma
->sem_perm
);
1280 static int semctl_info(struct ipc_namespace
*ns
, int semid
,
1281 int cmd
, void __user
*p
)
1283 struct seminfo seminfo
;
1287 err
= security_sem_semctl(NULL
, cmd
);
1291 memset(&seminfo
, 0, sizeof(seminfo
));
1292 seminfo
.semmni
= ns
->sc_semmni
;
1293 seminfo
.semmns
= ns
->sc_semmns
;
1294 seminfo
.semmsl
= ns
->sc_semmsl
;
1295 seminfo
.semopm
= ns
->sc_semopm
;
1296 seminfo
.semvmx
= SEMVMX
;
1297 seminfo
.semmnu
= SEMMNU
;
1298 seminfo
.semmap
= SEMMAP
;
1299 seminfo
.semume
= SEMUME
;
1300 down_read(&sem_ids(ns
).rwsem
);
1301 if (cmd
== SEM_INFO
) {
1302 seminfo
.semusz
= sem_ids(ns
).in_use
;
1303 seminfo
.semaem
= ns
->used_sems
;
1305 seminfo
.semusz
= SEMUSZ
;
1306 seminfo
.semaem
= SEMAEM
;
1308 max_id
= ipc_get_maxid(&sem_ids(ns
));
1309 up_read(&sem_ids(ns
).rwsem
);
1310 if (copy_to_user(p
, &seminfo
, sizeof(struct seminfo
)))
1312 return (max_id
< 0) ? 0 : max_id
;
1315 static int semctl_setval(struct ipc_namespace
*ns
, int semid
, int semnum
,
1318 struct sem_undo
*un
;
1319 struct sem_array
*sma
;
1322 DEFINE_WAKE_Q(wake_q
);
1324 if (val
> SEMVMX
|| val
< 0)
1328 sma
= sem_obtain_object_check(ns
, semid
);
1331 return PTR_ERR(sma
);
1334 if (semnum
< 0 || semnum
>= sma
->sem_nsems
) {
1340 if (ipcperms(ns
, &sma
->sem_perm
, S_IWUGO
)) {
1345 err
= security_sem_semctl(&sma
->sem_perm
, SETVAL
);
1351 sem_lock(sma
, NULL
, -1);
1353 if (!ipc_valid_object(&sma
->sem_perm
)) {
1354 sem_unlock(sma
, -1);
1359 curr
= &sma
->sems
[semnum
];
1361 ipc_assert_locked_object(&sma
->sem_perm
);
1362 list_for_each_entry(un
, &sma
->list_id
, list_id
)
1363 un
->semadj
[semnum
] = 0;
1366 ipc_update_pid(&curr
->sempid
, task_tgid(current
));
1367 sma
->sem_ctime
= ktime_get_real_seconds();
1368 /* maybe some queued-up processes were waiting for this */
1369 do_smart_update(sma
, NULL
, 0, 0, &wake_q
);
1370 sem_unlock(sma
, -1);
1376 static int semctl_main(struct ipc_namespace
*ns
, int semid
, int semnum
,
1377 int cmd
, void __user
*p
)
1379 struct sem_array
*sma
;
1382 ushort fast_sem_io
[SEMMSL_FAST
];
1383 ushort
*sem_io
= fast_sem_io
;
1384 DEFINE_WAKE_Q(wake_q
);
1387 sma
= sem_obtain_object_check(ns
, semid
);
1390 return PTR_ERR(sma
);
1393 nsems
= sma
->sem_nsems
;
1396 if (ipcperms(ns
, &sma
->sem_perm
, cmd
== SETALL
? S_IWUGO
: S_IRUGO
))
1397 goto out_rcu_wakeup
;
1399 err
= security_sem_semctl(&sma
->sem_perm
, cmd
);
1401 goto out_rcu_wakeup
;
1407 ushort __user
*array
= p
;
1410 sem_lock(sma
, NULL
, -1);
1411 if (!ipc_valid_object(&sma
->sem_perm
)) {
1415 if (nsems
> SEMMSL_FAST
) {
1416 if (!ipc_rcu_getref(&sma
->sem_perm
)) {
1420 sem_unlock(sma
, -1);
1422 sem_io
= kvmalloc_array(nsems
, sizeof(ushort
),
1424 if (sem_io
== NULL
) {
1425 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1430 sem_lock_and_putref(sma
);
1431 if (!ipc_valid_object(&sma
->sem_perm
)) {
1436 for (i
= 0; i
< sma
->sem_nsems
; i
++)
1437 sem_io
[i
] = sma
->sems
[i
].semval
;
1438 sem_unlock(sma
, -1);
1441 if (copy_to_user(array
, sem_io
, nsems
*sizeof(ushort
)))
1448 struct sem_undo
*un
;
1450 if (!ipc_rcu_getref(&sma
->sem_perm
)) {
1452 goto out_rcu_wakeup
;
1456 if (nsems
> SEMMSL_FAST
) {
1457 sem_io
= kvmalloc_array(nsems
, sizeof(ushort
),
1459 if (sem_io
== NULL
) {
1460 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1465 if (copy_from_user(sem_io
, p
, nsems
*sizeof(ushort
))) {
1466 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1471 for (i
= 0; i
< nsems
; i
++) {
1472 if (sem_io
[i
] > SEMVMX
) {
1473 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1479 sem_lock_and_putref(sma
);
1480 if (!ipc_valid_object(&sma
->sem_perm
)) {
1485 for (i
= 0; i
< nsems
; i
++) {
1486 sma
->sems
[i
].semval
= sem_io
[i
];
1487 ipc_update_pid(&sma
->sems
[i
].sempid
, task_tgid(current
));
1490 ipc_assert_locked_object(&sma
->sem_perm
);
1491 list_for_each_entry(un
, &sma
->list_id
, list_id
) {
1492 for (i
= 0; i
< nsems
; i
++)
1495 sma
->sem_ctime
= ktime_get_real_seconds();
1496 /* maybe some queued-up processes were waiting for this */
1497 do_smart_update(sma
, NULL
, 0, 0, &wake_q
);
1501 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1504 if (semnum
< 0 || semnum
>= nsems
)
1505 goto out_rcu_wakeup
;
1507 sem_lock(sma
, NULL
, -1);
1508 if (!ipc_valid_object(&sma
->sem_perm
)) {
1512 curr
= &sma
->sems
[semnum
];
1519 err
= pid_vnr(curr
->sempid
);
1522 err
= count_semcnt(sma
, semnum
, 0);
1525 err
= count_semcnt(sma
, semnum
, 1);
1530 sem_unlock(sma
, -1);
1535 if (sem_io
!= fast_sem_io
)
1540 static inline unsigned long
1541 copy_semid_from_user(struct semid64_ds
*out
, void __user
*buf
, int version
)
1545 if (copy_from_user(out
, buf
, sizeof(*out
)))
1550 struct semid_ds tbuf_old
;
1552 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
1555 out
->sem_perm
.uid
= tbuf_old
.sem_perm
.uid
;
1556 out
->sem_perm
.gid
= tbuf_old
.sem_perm
.gid
;
1557 out
->sem_perm
.mode
= tbuf_old
.sem_perm
.mode
;
1567 * This function handles some semctl commands which require the rwsem
1568 * to be held in write mode.
1569 * NOTE: no locks must be held, the rwsem is taken inside this function.
1571 static int semctl_down(struct ipc_namespace
*ns
, int semid
,
1572 int cmd
, struct semid64_ds
*semid64
)
1574 struct sem_array
*sma
;
1576 struct kern_ipc_perm
*ipcp
;
1578 down_write(&sem_ids(ns
).rwsem
);
1581 ipcp
= ipcctl_pre_down_nolock(ns
, &sem_ids(ns
), semid
, cmd
,
1582 &semid64
->sem_perm
, 0);
1584 err
= PTR_ERR(ipcp
);
1588 sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
1590 err
= security_sem_semctl(&sma
->sem_perm
, cmd
);
1596 sem_lock(sma
, NULL
, -1);
1597 /* freeary unlocks the ipc object and rcu */
1601 sem_lock(sma
, NULL
, -1);
1602 err
= ipc_update_perm(&semid64
->sem_perm
, ipcp
);
1605 sma
->sem_ctime
= ktime_get_real_seconds();
1613 sem_unlock(sma
, -1);
1617 up_write(&sem_ids(ns
).rwsem
);
1621 long ksys_semctl(int semid
, int semnum
, int cmd
, unsigned long arg
)
1624 struct ipc_namespace
*ns
;
1625 void __user
*p
= (void __user
*)arg
;
1626 struct semid64_ds semid64
;
1632 version
= ipc_parse_version(&cmd
);
1633 ns
= current
->nsproxy
->ipc_ns
;
1638 return semctl_info(ns
, semid
, cmd
, p
);
1642 err
= semctl_stat(ns
, semid
, cmd
, &semid64
);
1645 if (copy_semid_to_user(p
, &semid64
, version
))
1654 return semctl_main(ns
, semid
, semnum
, cmd
, p
);
1657 #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1658 /* big-endian 64bit */
1661 /* 32bit or little-endian 64bit */
1664 return semctl_setval(ns
, semid
, semnum
, val
);
1667 if (copy_semid_from_user(&semid64
, p
, version
))
1670 return semctl_down(ns
, semid
, cmd
, &semid64
);
1676 SYSCALL_DEFINE4(semctl
, int, semid
, int, semnum
, int, cmd
, unsigned long, arg
)
1678 return ksys_semctl(semid
, semnum
, cmd
, arg
);
1681 #ifdef CONFIG_COMPAT
1683 struct compat_semid_ds
{
1684 struct compat_ipc_perm sem_perm
;
1685 compat_time_t sem_otime
;
1686 compat_time_t sem_ctime
;
1687 compat_uptr_t sem_base
;
1688 compat_uptr_t sem_pending
;
1689 compat_uptr_t sem_pending_last
;
1691 unsigned short sem_nsems
;
1694 static int copy_compat_semid_from_user(struct semid64_ds
*out
, void __user
*buf
,
1697 memset(out
, 0, sizeof(*out
));
1698 if (version
== IPC_64
) {
1699 struct compat_semid64_ds __user
*p
= buf
;
1700 return get_compat_ipc64_perm(&out
->sem_perm
, &p
->sem_perm
);
1702 struct compat_semid_ds __user
*p
= buf
;
1703 return get_compat_ipc_perm(&out
->sem_perm
, &p
->sem_perm
);
1707 static int copy_compat_semid_to_user(void __user
*buf
, struct semid64_ds
*in
,
1710 if (version
== IPC_64
) {
1711 struct compat_semid64_ds v
;
1712 memset(&v
, 0, sizeof(v
));
1713 to_compat_ipc64_perm(&v
.sem_perm
, &in
->sem_perm
);
1714 v
.sem_otime
= lower_32_bits(in
->sem_otime
);
1715 v
.sem_otime_high
= upper_32_bits(in
->sem_otime
);
1716 v
.sem_ctime
= lower_32_bits(in
->sem_ctime
);
1717 v
.sem_ctime_high
= upper_32_bits(in
->sem_ctime
);
1718 v
.sem_nsems
= in
->sem_nsems
;
1719 return copy_to_user(buf
, &v
, sizeof(v
));
1721 struct compat_semid_ds v
;
1722 memset(&v
, 0, sizeof(v
));
1723 to_compat_ipc_perm(&v
.sem_perm
, &in
->sem_perm
);
1724 v
.sem_otime
= in
->sem_otime
;
1725 v
.sem_ctime
= in
->sem_ctime
;
1726 v
.sem_nsems
= in
->sem_nsems
;
1727 return copy_to_user(buf
, &v
, sizeof(v
));
1731 long compat_ksys_semctl(int semid
, int semnum
, int cmd
, int arg
)
1733 void __user
*p
= compat_ptr(arg
);
1734 struct ipc_namespace
*ns
;
1735 struct semid64_ds semid64
;
1736 int version
= compat_ipc_parse_version(&cmd
);
1739 ns
= current
->nsproxy
->ipc_ns
;
1744 switch (cmd
& (~IPC_64
)) {
1747 return semctl_info(ns
, semid
, cmd
, p
);
1751 err
= semctl_stat(ns
, semid
, cmd
, &semid64
);
1754 if (copy_compat_semid_to_user(p
, &semid64
, version
))
1763 return semctl_main(ns
, semid
, semnum
, cmd
, p
);
1765 return semctl_setval(ns
, semid
, semnum
, arg
);
1767 if (copy_compat_semid_from_user(&semid64
, p
, version
))
1771 return semctl_down(ns
, semid
, cmd
, &semid64
);
1777 COMPAT_SYSCALL_DEFINE4(semctl
, int, semid
, int, semnum
, int, cmd
, int, arg
)
1779 return compat_ksys_semctl(semid
, semnum
, cmd
, arg
);
1783 /* If the task doesn't already have a undo_list, then allocate one
1784 * here. We guarantee there is only one thread using this undo list,
1785 * and current is THE ONE
1787 * If this allocation and assignment succeeds, but later
1788 * portions of this code fail, there is no need to free the sem_undo_list.
1789 * Just let it stay associated with the task, and it'll be freed later
1792 * This can block, so callers must hold no locks.
1794 static inline int get_undo_list(struct sem_undo_list
**undo_listp
)
1796 struct sem_undo_list
*undo_list
;
1798 undo_list
= current
->sysvsem
.undo_list
;
1800 undo_list
= kzalloc(sizeof(*undo_list
), GFP_KERNEL
);
1801 if (undo_list
== NULL
)
1803 spin_lock_init(&undo_list
->lock
);
1804 refcount_set(&undo_list
->refcnt
, 1);
1805 INIT_LIST_HEAD(&undo_list
->list_proc
);
1807 current
->sysvsem
.undo_list
= undo_list
;
1809 *undo_listp
= undo_list
;
1813 static struct sem_undo
*__lookup_undo(struct sem_undo_list
*ulp
, int semid
)
1815 struct sem_undo
*un
;
1817 list_for_each_entry_rcu(un
, &ulp
->list_proc
, list_proc
) {
1818 if (un
->semid
== semid
)
1824 static struct sem_undo
*lookup_undo(struct sem_undo_list
*ulp
, int semid
)
1826 struct sem_undo
*un
;
1828 assert_spin_locked(&ulp
->lock
);
1830 un
= __lookup_undo(ulp
, semid
);
1832 list_del_rcu(&un
->list_proc
);
1833 list_add_rcu(&un
->list_proc
, &ulp
->list_proc
);
1839 * find_alloc_undo - lookup (and if not present create) undo array
1841 * @semid: semaphore array id
1843 * The function looks up (and if not present creates) the undo structure.
1844 * The size of the undo structure depends on the size of the semaphore
1845 * array, thus the alloc path is not that straightforward.
1846 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1847 * performs a rcu_read_lock().
1849 static struct sem_undo
*find_alloc_undo(struct ipc_namespace
*ns
, int semid
)
1851 struct sem_array
*sma
;
1852 struct sem_undo_list
*ulp
;
1853 struct sem_undo
*un
, *new;
1856 error
= get_undo_list(&ulp
);
1858 return ERR_PTR(error
);
1861 spin_lock(&ulp
->lock
);
1862 un
= lookup_undo(ulp
, semid
);
1863 spin_unlock(&ulp
->lock
);
1864 if (likely(un
!= NULL
))
1867 /* no undo structure around - allocate one. */
1868 /* step 1: figure out the size of the semaphore array */
1869 sma
= sem_obtain_object_check(ns
, semid
);
1872 return ERR_CAST(sma
);
1875 nsems
= sma
->sem_nsems
;
1876 if (!ipc_rcu_getref(&sma
->sem_perm
)) {
1878 un
= ERR_PTR(-EIDRM
);
1883 /* step 2: allocate new undo structure */
1884 new = kzalloc(sizeof(struct sem_undo
) + sizeof(short)*nsems
, GFP_KERNEL
);
1886 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1887 return ERR_PTR(-ENOMEM
);
1890 /* step 3: Acquire the lock on semaphore array */
1892 sem_lock_and_putref(sma
);
1893 if (!ipc_valid_object(&sma
->sem_perm
)) {
1894 sem_unlock(sma
, -1);
1897 un
= ERR_PTR(-EIDRM
);
1900 spin_lock(&ulp
->lock
);
1903 * step 4: check for races: did someone else allocate the undo struct?
1905 un
= lookup_undo(ulp
, semid
);
1910 /* step 5: initialize & link new undo structure */
1911 new->semadj
= (short *) &new[1];
1914 assert_spin_locked(&ulp
->lock
);
1915 list_add_rcu(&new->list_proc
, &ulp
->list_proc
);
1916 ipc_assert_locked_object(&sma
->sem_perm
);
1917 list_add(&new->list_id
, &sma
->list_id
);
1921 spin_unlock(&ulp
->lock
);
1922 sem_unlock(sma
, -1);
1927 static long do_semtimedop(int semid
, struct sembuf __user
*tsops
,
1928 unsigned nsops
, const struct timespec64
*timeout
)
1930 int error
= -EINVAL
;
1931 struct sem_array
*sma
;
1932 struct sembuf fast_sops
[SEMOPM_FAST
];
1933 struct sembuf
*sops
= fast_sops
, *sop
;
1934 struct sem_undo
*un
;
1936 bool undos
= false, alter
= false, dupsop
= false;
1937 struct sem_queue queue
;
1938 unsigned long dup
= 0, jiffies_left
= 0;
1939 struct ipc_namespace
*ns
;
1941 ns
= current
->nsproxy
->ipc_ns
;
1943 if (nsops
< 1 || semid
< 0)
1945 if (nsops
> ns
->sc_semopm
)
1947 if (nsops
> SEMOPM_FAST
) {
1948 sops
= kvmalloc(sizeof(*sops
)*nsops
, GFP_KERNEL
);
1953 if (copy_from_user(sops
, tsops
, nsops
* sizeof(*tsops
))) {
1959 if (timeout
->tv_sec
< 0 || timeout
->tv_nsec
< 0 ||
1960 timeout
->tv_nsec
>= 1000000000L) {
1964 jiffies_left
= timespec64_to_jiffies(timeout
);
1968 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
1969 unsigned long mask
= 1ULL << ((sop
->sem_num
) % BITS_PER_LONG
);
1971 if (sop
->sem_num
>= max
)
1973 if (sop
->sem_flg
& SEM_UNDO
)
1977 * There was a previous alter access that appears
1978 * to have accessed the same semaphore, thus use
1979 * the dupsop logic. "appears", because the detection
1980 * can only check % BITS_PER_LONG.
1984 if (sop
->sem_op
!= 0) {
1991 /* On success, find_alloc_undo takes the rcu_read_lock */
1992 un
= find_alloc_undo(ns
, semid
);
1994 error
= PTR_ERR(un
);
2002 sma
= sem_obtain_object_check(ns
, semid
);
2005 error
= PTR_ERR(sma
);
2010 if (max
>= sma
->sem_nsems
) {
2016 if (ipcperms(ns
, &sma
->sem_perm
, alter
? S_IWUGO
: S_IRUGO
)) {
2021 error
= security_sem_semop(&sma
->sem_perm
, sops
, nsops
, alter
);
2028 locknum
= sem_lock(sma
, sops
, nsops
);
2030 * We eventually might perform the following check in a lockless
2031 * fashion, considering ipc_valid_object() locking constraints.
2032 * If nsops == 1 and there is no contention for sem_perm.lock, then
2033 * only a per-semaphore lock is held and it's OK to proceed with the
2034 * check below. More details on the fine grained locking scheme
2035 * entangled here and why it's RMID race safe on comments at sem_lock()
2037 if (!ipc_valid_object(&sma
->sem_perm
))
2038 goto out_unlock_free
;
2040 * semid identifiers are not unique - find_alloc_undo may have
2041 * allocated an undo structure, it was invalidated by an RMID
2042 * and now a new array with received the same id. Check and fail.
2043 * This case can be detected checking un->semid. The existence of
2044 * "un" itself is guaranteed by rcu.
2046 if (un
&& un
->semid
== -1)
2047 goto out_unlock_free
;
2050 queue
.nsops
= nsops
;
2052 queue
.pid
= task_tgid(current
);
2053 queue
.alter
= alter
;
2054 queue
.dupsop
= dupsop
;
2056 error
= perform_atomic_semop(sma
, &queue
);
2057 if (error
== 0) { /* non-blocking succesfull path */
2058 DEFINE_WAKE_Q(wake_q
);
2061 * If the operation was successful, then do
2062 * the required updates.
2065 do_smart_update(sma
, sops
, nsops
, 1, &wake_q
);
2067 set_semotime(sma
, sops
);
2069 sem_unlock(sma
, locknum
);
2075 if (error
< 0) /* non-blocking error path */
2076 goto out_unlock_free
;
2079 * We need to sleep on this operation, so we put the current
2080 * task into the pending queue and go to sleep.
2084 curr
= &sma
->sems
[sops
->sem_num
];
2087 if (sma
->complex_count
) {
2088 list_add_tail(&queue
.list
,
2089 &sma
->pending_alter
);
2092 list_add_tail(&queue
.list
,
2093 &curr
->pending_alter
);
2096 list_add_tail(&queue
.list
, &curr
->pending_const
);
2099 if (!sma
->complex_count
)
2103 list_add_tail(&queue
.list
, &sma
->pending_alter
);
2105 list_add_tail(&queue
.list
, &sma
->pending_const
);
2107 sma
->complex_count
++;
2111 queue
.status
= -EINTR
;
2112 queue
.sleeper
= current
;
2114 __set_current_state(TASK_INTERRUPTIBLE
);
2115 sem_unlock(sma
, locknum
);
2119 jiffies_left
= schedule_timeout(jiffies_left
);
2124 * fastpath: the semop has completed, either successfully or
2125 * not, from the syscall pov, is quite irrelevant to us at this
2126 * point; we're done.
2128 * We _do_ care, nonetheless, about being awoken by a signal or
2129 * spuriously. The queue.status is checked again in the
2130 * slowpath (aka after taking sem_lock), such that we can detect
2131 * scenarios where we were awakened externally, during the
2132 * window between wake_q_add() and wake_up_q().
2134 error
= READ_ONCE(queue
.status
);
2135 if (error
!= -EINTR
) {
2137 * User space could assume that semop() is a memory
2138 * barrier: Without the mb(), the cpu could
2139 * speculatively read in userspace stale data that was
2140 * overwritten by the previous owner of the semaphore.
2147 locknum
= sem_lock(sma
, sops
, nsops
);
2149 if (!ipc_valid_object(&sma
->sem_perm
))
2150 goto out_unlock_free
;
2152 error
= READ_ONCE(queue
.status
);
2155 * If queue.status != -EINTR we are woken up by another process.
2156 * Leave without unlink_queue(), but with sem_unlock().
2158 if (error
!= -EINTR
)
2159 goto out_unlock_free
;
2162 * If an interrupt occurred we have to clean up the queue.
2164 if (timeout
&& jiffies_left
== 0)
2166 } while (error
== -EINTR
&& !signal_pending(current
)); /* spurious */
2168 unlink_queue(sma
, &queue
);
2171 sem_unlock(sma
, locknum
);
2174 if (sops
!= fast_sops
)
2179 long ksys_semtimedop(int semid
, struct sembuf __user
*tsops
,
2180 unsigned int nsops
, const struct __kernel_timespec __user
*timeout
)
2183 struct timespec64 ts
;
2184 if (get_timespec64(&ts
, timeout
))
2186 return do_semtimedop(semid
, tsops
, nsops
, &ts
);
2188 return do_semtimedop(semid
, tsops
, nsops
, NULL
);
2191 SYSCALL_DEFINE4(semtimedop
, int, semid
, struct sembuf __user
*, tsops
,
2192 unsigned int, nsops
, const struct __kernel_timespec __user
*, timeout
)
2194 return ksys_semtimedop(semid
, tsops
, nsops
, timeout
);
2197 #ifdef CONFIG_COMPAT_32BIT_TIME
2198 long compat_ksys_semtimedop(int semid
, struct sembuf __user
*tsems
,
2200 const struct compat_timespec __user
*timeout
)
2203 struct timespec64 ts
;
2204 if (compat_get_timespec64(&ts
, timeout
))
2206 return do_semtimedop(semid
, tsems
, nsops
, &ts
);
2208 return do_semtimedop(semid
, tsems
, nsops
, NULL
);
2211 COMPAT_SYSCALL_DEFINE4(semtimedop
, int, semid
, struct sembuf __user
*, tsems
,
2212 unsigned int, nsops
,
2213 const struct compat_timespec __user
*, timeout
)
2215 return compat_ksys_semtimedop(semid
, tsems
, nsops
, timeout
);
2219 SYSCALL_DEFINE3(semop
, int, semid
, struct sembuf __user
*, tsops
,
2222 return do_semtimedop(semid
, tsops
, nsops
, NULL
);
2225 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2226 * parent and child tasks.
2229 int copy_semundo(unsigned long clone_flags
, struct task_struct
*tsk
)
2231 struct sem_undo_list
*undo_list
;
2234 if (clone_flags
& CLONE_SYSVSEM
) {
2235 error
= get_undo_list(&undo_list
);
2238 refcount_inc(&undo_list
->refcnt
);
2239 tsk
->sysvsem
.undo_list
= undo_list
;
2241 tsk
->sysvsem
.undo_list
= NULL
;
2247 * add semadj values to semaphores, free undo structures.
2248 * undo structures are not freed when semaphore arrays are destroyed
2249 * so some of them may be out of date.
2250 * IMPLEMENTATION NOTE: There is some confusion over whether the
2251 * set of adjustments that needs to be done should be done in an atomic
2252 * manner or not. That is, if we are attempting to decrement the semval
2253 * should we queue up and wait until we can do so legally?
2254 * The original implementation attempted to do this (queue and wait).
2255 * The current implementation does not do so. The POSIX standard
2256 * and SVID should be consulted to determine what behavior is mandated.
2258 void exit_sem(struct task_struct
*tsk
)
2260 struct sem_undo_list
*ulp
;
2262 ulp
= tsk
->sysvsem
.undo_list
;
2265 tsk
->sysvsem
.undo_list
= NULL
;
2267 if (!refcount_dec_and_test(&ulp
->refcnt
))
2271 struct sem_array
*sma
;
2272 struct sem_undo
*un
;
2274 DEFINE_WAKE_Q(wake_q
);
2279 un
= list_entry_rcu(ulp
->list_proc
.next
,
2280 struct sem_undo
, list_proc
);
2281 if (&un
->list_proc
== &ulp
->list_proc
) {
2283 * We must wait for freeary() before freeing this ulp,
2284 * in case we raced with last sem_undo. There is a small
2285 * possibility where we exit while freeary() didn't
2286 * finish unlocking sem_undo_list.
2288 spin_lock(&ulp
->lock
);
2289 spin_unlock(&ulp
->lock
);
2293 spin_lock(&ulp
->lock
);
2295 spin_unlock(&ulp
->lock
);
2297 /* exit_sem raced with IPC_RMID, nothing to do */
2303 sma
= sem_obtain_object_check(tsk
->nsproxy
->ipc_ns
, semid
);
2304 /* exit_sem raced with IPC_RMID, nothing to do */
2310 sem_lock(sma
, NULL
, -1);
2311 /* exit_sem raced with IPC_RMID, nothing to do */
2312 if (!ipc_valid_object(&sma
->sem_perm
)) {
2313 sem_unlock(sma
, -1);
2317 un
= __lookup_undo(ulp
, semid
);
2319 /* exit_sem raced with IPC_RMID+semget() that created
2320 * exactly the same semid. Nothing to do.
2322 sem_unlock(sma
, -1);
2327 /* remove un from the linked lists */
2328 ipc_assert_locked_object(&sma
->sem_perm
);
2329 list_del(&un
->list_id
);
2331 /* we are the last process using this ulp, acquiring ulp->lock
2332 * isn't required. Besides that, we are also protected against
2333 * IPC_RMID as we hold sma->sem_perm lock now
2335 list_del_rcu(&un
->list_proc
);
2337 /* perform adjustments registered in un */
2338 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
2339 struct sem
*semaphore
= &sma
->sems
[i
];
2340 if (un
->semadj
[i
]) {
2341 semaphore
->semval
+= un
->semadj
[i
];
2343 * Range checks of the new semaphore value,
2344 * not defined by sus:
2345 * - Some unices ignore the undo entirely
2346 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
2347 * - some cap the value (e.g. FreeBSD caps
2348 * at 0, but doesn't enforce SEMVMX)
2350 * Linux caps the semaphore value, both at 0
2353 * Manfred <manfred@colorfullife.com>
2355 if (semaphore
->semval
< 0)
2356 semaphore
->semval
= 0;
2357 if (semaphore
->semval
> SEMVMX
)
2358 semaphore
->semval
= SEMVMX
;
2359 ipc_update_pid(&semaphore
->sempid
, task_tgid(current
));
2362 /* maybe some queued-up processes were waiting for this */
2363 do_smart_update(sma
, NULL
, 0, 1, &wake_q
);
2364 sem_unlock(sma
, -1);
2373 #ifdef CONFIG_PROC_FS
2374 static int sysvipc_sem_proc_show(struct seq_file
*s
, void *it
)
2376 struct user_namespace
*user_ns
= seq_user_ns(s
);
2377 struct kern_ipc_perm
*ipcp
= it
;
2378 struct sem_array
*sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
2382 * The proc interface isn't aware of sem_lock(), it calls
2383 * ipc_lock_object() directly (in sysvipc_find_ipc).
2384 * In order to stay compatible with sem_lock(), we must
2385 * enter / leave complex_mode.
2387 complexmode_enter(sma
);
2389 sem_otime
= get_semotime(sma
);
2392 "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2397 from_kuid_munged(user_ns
, sma
->sem_perm
.uid
),
2398 from_kgid_munged(user_ns
, sma
->sem_perm
.gid
),
2399 from_kuid_munged(user_ns
, sma
->sem_perm
.cuid
),
2400 from_kgid_munged(user_ns
, sma
->sem_perm
.cgid
),
2404 complexmode_tryleave(sma
);