]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/locking/semaphore.c
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008 Intel Corporation
4 * Author: Matthew Wilcox <willy@linux.intel.com>
6 * This file implements counting semaphores.
7 * A counting semaphore may be acquired 'n' times before sleeping.
8 * See mutex.c for single-acquisition sleeping locks which enforce
9 * rules which allow code to be debugged more easily.
13 * Some notes on the implementation:
15 * The spinlock controls access to the other members of the semaphore.
16 * down_trylock() and up() can be called from interrupt context, so we
17 * have to disable interrupts when taking the lock. It turns out various
18 * parts of the kernel expect to be able to use down() on a semaphore in
19 * interrupt context when they know it will succeed, so we have to use
20 * irqsave variants for down(), down_interruptible() and down_killable()
23 * The ->count variable represents how many more tasks can acquire this
24 * semaphore. If it's zero, there may be tasks waiting on the wait_list.
27 #include <linux/compiler.h>
28 #include <linux/kernel.h>
29 #include <linux/export.h>
30 #include <linux/sched.h>
31 #include <linux/sched/debug.h>
32 #include <linux/semaphore.h>
33 #include <linux/spinlock.h>
34 #include <linux/ftrace.h>
36 static noinline
void __down(struct semaphore
*sem
);
37 static noinline
int __down_interruptible(struct semaphore
*sem
);
38 static noinline
int __down_killable(struct semaphore
*sem
);
39 static noinline
int __down_timeout(struct semaphore
*sem
, long timeout
);
40 static noinline
void __up(struct semaphore
*sem
);
43 * down - acquire the semaphore
44 * @sem: the semaphore to be acquired
46 * Acquires the semaphore. If no more tasks are allowed to acquire the
47 * semaphore, calling this function will put the task to sleep until the
48 * semaphore is released.
50 * Use of this function is deprecated, please use down_interruptible() or
51 * down_killable() instead.
53 void down(struct semaphore
*sem
)
58 raw_spin_lock_irqsave(&sem
->lock
, flags
);
59 if (likely(sem
->count
> 0))
63 raw_spin_unlock_irqrestore(&sem
->lock
, flags
);
68 * down_interruptible - acquire the semaphore unless interrupted
69 * @sem: the semaphore to be acquired
71 * Attempts to acquire the semaphore. If no more tasks are allowed to
72 * acquire the semaphore, calling this function will put the task to sleep.
73 * If the sleep is interrupted by a signal, this function will return -EINTR.
74 * If the semaphore is successfully acquired, this function returns 0.
76 int down_interruptible(struct semaphore
*sem
)
82 raw_spin_lock_irqsave(&sem
->lock
, flags
);
83 if (likely(sem
->count
> 0))
86 result
= __down_interruptible(sem
);
87 raw_spin_unlock_irqrestore(&sem
->lock
, flags
);
91 EXPORT_SYMBOL(down_interruptible
);
94 * down_killable - acquire the semaphore unless killed
95 * @sem: the semaphore to be acquired
97 * Attempts to acquire the semaphore. If no more tasks are allowed to
98 * acquire the semaphore, calling this function will put the task to sleep.
99 * If the sleep is interrupted by a fatal signal, this function will return
100 * -EINTR. If the semaphore is successfully acquired, this function returns
103 int down_killable(struct semaphore
*sem
)
109 raw_spin_lock_irqsave(&sem
->lock
, flags
);
110 if (likely(sem
->count
> 0))
113 result
= __down_killable(sem
);
114 raw_spin_unlock_irqrestore(&sem
->lock
, flags
);
118 EXPORT_SYMBOL(down_killable
);
121 * down_trylock - try to acquire the semaphore, without waiting
122 * @sem: the semaphore to be acquired
124 * Try to acquire the semaphore atomically. Returns 0 if the semaphore has
125 * been acquired successfully or 1 if it cannot be acquired.
127 * NOTE: This return value is inverted from both spin_trylock and
128 * mutex_trylock! Be careful about this when converting code.
130 * Unlike mutex_trylock, this function can be used from interrupt context,
131 * and the semaphore can be released by any task or interrupt.
133 int down_trylock(struct semaphore
*sem
)
138 raw_spin_lock_irqsave(&sem
->lock
, flags
);
139 count
= sem
->count
- 1;
140 if (likely(count
>= 0))
142 raw_spin_unlock_irqrestore(&sem
->lock
, flags
);
146 EXPORT_SYMBOL(down_trylock
);
149 * down_timeout - acquire the semaphore within a specified time
150 * @sem: the semaphore to be acquired
151 * @timeout: how long to wait before failing
153 * Attempts to acquire the semaphore. If no more tasks are allowed to
154 * acquire the semaphore, calling this function will put the task to sleep.
155 * If the semaphore is not released within the specified number of jiffies,
156 * this function returns -ETIME. It returns 0 if the semaphore was acquired.
158 int down_timeout(struct semaphore
*sem
, long timeout
)
164 raw_spin_lock_irqsave(&sem
->lock
, flags
);
165 if (likely(sem
->count
> 0))
168 result
= __down_timeout(sem
, timeout
);
169 raw_spin_unlock_irqrestore(&sem
->lock
, flags
);
173 EXPORT_SYMBOL(down_timeout
);
176 * up - release the semaphore
177 * @sem: the semaphore to release
179 * Release the semaphore. Unlike mutexes, up() may be called from any
180 * context and even by tasks which have never called down().
182 void up(struct semaphore
*sem
)
186 raw_spin_lock_irqsave(&sem
->lock
, flags
);
187 if (likely(list_empty(&sem
->wait_list
)))
191 raw_spin_unlock_irqrestore(&sem
->lock
, flags
);
195 /* Functions for the contended case */
197 struct semaphore_waiter
{
198 struct list_head list
;
199 struct task_struct
*task
;
204 * Because this function is inlined, the 'state' parameter will be
205 * constant, and thus optimised away by the compiler. Likewise the
206 * 'timeout' parameter for the cases without timeouts.
208 static inline int __sched
__down_common(struct semaphore
*sem
, long state
,
211 struct semaphore_waiter waiter
;
213 list_add_tail(&waiter
.list
, &sem
->wait_list
);
214 waiter
.task
= current
;
218 if (signal_pending_state(state
, current
))
220 if (unlikely(timeout
<= 0))
222 __set_current_state(state
);
223 raw_spin_unlock_irq(&sem
->lock
);
224 timeout
= schedule_timeout(timeout
);
225 raw_spin_lock_irq(&sem
->lock
);
231 list_del(&waiter
.list
);
235 list_del(&waiter
.list
);
239 static noinline
void __sched
__down(struct semaphore
*sem
)
241 __down_common(sem
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
244 static noinline
int __sched
__down_interruptible(struct semaphore
*sem
)
246 return __down_common(sem
, TASK_INTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
249 static noinline
int __sched
__down_killable(struct semaphore
*sem
)
251 return __down_common(sem
, TASK_KILLABLE
, MAX_SCHEDULE_TIMEOUT
);
254 static noinline
int __sched
__down_timeout(struct semaphore
*sem
, long timeout
)
256 return __down_common(sem
, TASK_UNINTERRUPTIBLE
, timeout
);
259 static noinline
void __sched
__up(struct semaphore
*sem
)
261 struct semaphore_waiter
*waiter
= list_first_entry(&sem
->wait_list
,
262 struct semaphore_waiter
, list
);
263 list_del(&waiter
->list
);
265 wake_up_process(waiter
->task
);