]>
Commit | Line | Data |
---|---|---|
6053ee3b | 1 | /* |
67a6de49 | 2 | * kernel/locking/mutex.c |
6053ee3b IM |
3 | * |
4 | * Mutexes: blocking mutual exclusion locks | |
5 | * | |
6 | * Started by Ingo Molnar: | |
7 | * | |
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
9 | * | |
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | |
11 | * David Howells for suggestions and improvements. | |
12 | * | |
0d66bf6d PZ |
13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
14 | * from the -rt tree, where it was originally implemented for rtmutexes | |
15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale | |
16 | * and Sven Dietrich. | |
17 | * | |
214e0aed | 18 | * Also see Documentation/locking/mutex-design.txt. |
6053ee3b IM |
19 | */ |
20 | #include <linux/mutex.h> | |
1b375dc3 | 21 | #include <linux/ww_mutex.h> |
6053ee3b | 22 | #include <linux/sched.h> |
8bd75c77 | 23 | #include <linux/sched/rt.h> |
9984de1a | 24 | #include <linux/export.h> |
6053ee3b IM |
25 | #include <linux/spinlock.h> |
26 | #include <linux/interrupt.h> | |
9a11b49a | 27 | #include <linux/debug_locks.h> |
7a215f89 | 28 | #include <linux/osq_lock.h> |
6053ee3b | 29 | |
6053ee3b IM |
30 | #ifdef CONFIG_DEBUG_MUTEXES |
31 | # include "mutex-debug.h" | |
6053ee3b IM |
32 | #else |
33 | # include "mutex.h" | |
6053ee3b IM |
34 | #endif |
35 | ||
ef5d4707 IM |
36 | void |
37 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |
6053ee3b | 38 | { |
3ca0ff57 | 39 | atomic_long_set(&lock->owner, 0); |
6053ee3b IM |
40 | spin_lock_init(&lock->wait_lock); |
41 | INIT_LIST_HEAD(&lock->wait_list); | |
2bd2c92c | 42 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
4d9d951e | 43 | osq_lock_init(&lock->osq); |
2bd2c92c | 44 | #endif |
6053ee3b | 45 | |
ef5d4707 | 46 | debug_mutex_init(lock, name, key); |
6053ee3b | 47 | } |
6053ee3b IM |
48 | EXPORT_SYMBOL(__mutex_init); |
49 | ||
3ca0ff57 PZ |
50 | /* |
51 | * @owner: contains: 'struct task_struct *' to the current lock owner, | |
52 | * NULL means not owned. Since task_struct pointers are aligned at | |
53 | * ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low | |
54 | * bits to store extra state. | |
55 | * | |
56 | * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. | |
9d659ae1 | 57 | * Bit1 indicates unlock needs to hand the lock to the top-waiter |
3ca0ff57 PZ |
58 | */ |
59 | #define MUTEX_FLAG_WAITERS 0x01 | |
9d659ae1 | 60 | #define MUTEX_FLAG_HANDOFF 0x02 |
3ca0ff57 PZ |
61 | |
62 | #define MUTEX_FLAGS 0x03 | |
63 | ||
64 | static inline struct task_struct *__owner_task(unsigned long owner) | |
65 | { | |
66 | return (struct task_struct *)(owner & ~MUTEX_FLAGS); | |
67 | } | |
68 | ||
69 | static inline unsigned long __owner_flags(unsigned long owner) | |
70 | { | |
71 | return owner & MUTEX_FLAGS; | |
72 | } | |
73 | ||
74 | /* | |
75 | * Actual trylock that will work on any unlocked state. | |
9d659ae1 PZ |
76 | * |
77 | * When setting the owner field, we must preserve the low flag bits. | |
78 | * | |
79 | * Be careful with @handoff, only set that in a wait-loop (where you set | |
80 | * HANDOFF) to avoid recursive lock attempts. | |
3ca0ff57 | 81 | */ |
9d659ae1 | 82 | static inline bool __mutex_trylock(struct mutex *lock, const bool handoff) |
3ca0ff57 PZ |
83 | { |
84 | unsigned long owner, curr = (unsigned long)current; | |
85 | ||
86 | owner = atomic_long_read(&lock->owner); | |
87 | for (;;) { /* must loop, can race against a flag */ | |
9d659ae1 PZ |
88 | unsigned long old, flags = __owner_flags(owner); |
89 | ||
90 | if (__owner_task(owner)) { | |
91 | if (handoff && unlikely(__owner_task(owner) == current)) { | |
92 | /* | |
93 | * Provide ACQUIRE semantics for the lock-handoff. | |
94 | * | |
95 | * We cannot easily use load-acquire here, since | |
96 | * the actual load is a failed cmpxchg, which | |
97 | * doesn't imply any barriers. | |
98 | * | |
99 | * Also, this is a fairly unlikely scenario, and | |
100 | * this contains the cost. | |
101 | */ | |
102 | smp_mb(); /* ACQUIRE */ | |
103 | return true; | |
104 | } | |
3ca0ff57 | 105 | |
3ca0ff57 | 106 | return false; |
9d659ae1 PZ |
107 | } |
108 | ||
109 | /* | |
110 | * We set the HANDOFF bit, we must make sure it doesn't live | |
111 | * past the point where we acquire it. This would be possible | |
112 | * if we (accidentally) set the bit on an unlocked mutex. | |
113 | */ | |
114 | if (handoff) | |
115 | flags &= ~MUTEX_FLAG_HANDOFF; | |
3ca0ff57 | 116 | |
9d659ae1 | 117 | old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); |
3ca0ff57 PZ |
118 | if (old == owner) |
119 | return true; | |
120 | ||
121 | owner = old; | |
122 | } | |
123 | } | |
124 | ||
125 | #ifndef CONFIG_DEBUG_LOCK_ALLOC | |
126 | /* | |
127 | * Lockdep annotations are contained to the slow paths for simplicity. | |
128 | * There is nothing that would stop spreading the lockdep annotations outwards | |
129 | * except more code. | |
130 | */ | |
131 | ||
132 | /* | |
133 | * Optimistic trylock that only works in the uncontended case. Make sure to | |
134 | * follow with a __mutex_trylock() before failing. | |
135 | */ | |
136 | static __always_inline bool __mutex_trylock_fast(struct mutex *lock) | |
137 | { | |
138 | unsigned long curr = (unsigned long)current; | |
139 | ||
140 | if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr)) | |
141 | return true; | |
142 | ||
143 | return false; | |
144 | } | |
145 | ||
146 | static __always_inline bool __mutex_unlock_fast(struct mutex *lock) | |
147 | { | |
148 | unsigned long curr = (unsigned long)current; | |
149 | ||
150 | if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) | |
151 | return true; | |
152 | ||
153 | return false; | |
154 | } | |
155 | #endif | |
156 | ||
157 | static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) | |
158 | { | |
159 | atomic_long_or(flag, &lock->owner); | |
160 | } | |
161 | ||
162 | static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) | |
163 | { | |
164 | atomic_long_andnot(flag, &lock->owner); | |
165 | } | |
166 | ||
9d659ae1 PZ |
167 | static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) |
168 | { | |
169 | return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; | |
170 | } | |
171 | ||
172 | /* | |
173 | * Give up ownership to a specific task, when @task = NULL, this is equivalent | |
174 | * to a regular unlock. Clears HANDOFF, preserves WAITERS. Provides RELEASE | |
175 | * semantics like a regular unlock, the __mutex_trylock() provides matching | |
176 | * ACQUIRE semantics for the handoff. | |
177 | */ | |
178 | static void __mutex_handoff(struct mutex *lock, struct task_struct *task) | |
179 | { | |
180 | unsigned long owner = atomic_long_read(&lock->owner); | |
181 | ||
182 | for (;;) { | |
183 | unsigned long old, new; | |
184 | ||
185 | #ifdef CONFIG_DEBUG_MUTEXES | |
186 | DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); | |
187 | #endif | |
188 | ||
189 | new = (owner & MUTEX_FLAG_WAITERS); | |
190 | new |= (unsigned long)task; | |
191 | ||
192 | old = atomic_long_cmpxchg_release(&lock->owner, owner, new); | |
193 | if (old == owner) | |
194 | break; | |
195 | ||
196 | owner = old; | |
197 | } | |
198 | } | |
199 | ||
e4564f79 | 200 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
201 | /* |
202 | * We split the mutex lock/unlock logic into separate fastpath and | |
203 | * slowpath functions, to reduce the register pressure on the fastpath. | |
204 | * We also put the fastpath first in the kernel image, to make sure the | |
205 | * branch is predicted by the CPU as default-untaken. | |
206 | */ | |
3ca0ff57 | 207 | static void __sched __mutex_lock_slowpath(struct mutex *lock); |
6053ee3b | 208 | |
ef5dc121 | 209 | /** |
6053ee3b IM |
210 | * mutex_lock - acquire the mutex |
211 | * @lock: the mutex to be acquired | |
212 | * | |
213 | * Lock the mutex exclusively for this task. If the mutex is not | |
214 | * available right now, it will sleep until it can get it. | |
215 | * | |
216 | * The mutex must later on be released by the same task that | |
217 | * acquired it. Recursive locking is not allowed. The task | |
218 | * may not exit without first unlocking the mutex. Also, kernel | |
139b6fd2 | 219 | * memory where the mutex resides must not be freed with |
6053ee3b IM |
220 | * the mutex still locked. The mutex must first be initialized |
221 | * (or statically defined) before it can be locked. memset()-ing | |
222 | * the mutex to 0 is not allowed. | |
223 | * | |
224 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging | |
225 | * checks that will enforce the restrictions and will also do | |
226 | * deadlock debugging. ) | |
227 | * | |
228 | * This function is similar to (but not equivalent to) down(). | |
229 | */ | |
b09d2501 | 230 | void __sched mutex_lock(struct mutex *lock) |
6053ee3b | 231 | { |
c544bdb1 | 232 | might_sleep(); |
6053ee3b | 233 | |
3ca0ff57 PZ |
234 | if (!__mutex_trylock_fast(lock)) |
235 | __mutex_lock_slowpath(lock); | |
236 | } | |
6053ee3b | 237 | EXPORT_SYMBOL(mutex_lock); |
e4564f79 | 238 | #endif |
6053ee3b | 239 | |
76916515 DB |
240 | static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, |
241 | struct ww_acquire_ctx *ww_ctx) | |
242 | { | |
243 | #ifdef CONFIG_DEBUG_MUTEXES | |
244 | /* | |
245 | * If this WARN_ON triggers, you used ww_mutex_lock to acquire, | |
246 | * but released with a normal mutex_unlock in this call. | |
247 | * | |
248 | * This should never happen, always use ww_mutex_unlock. | |
249 | */ | |
250 | DEBUG_LOCKS_WARN_ON(ww->ctx); | |
251 | ||
252 | /* | |
253 | * Not quite done after calling ww_acquire_done() ? | |
254 | */ | |
255 | DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); | |
256 | ||
257 | if (ww_ctx->contending_lock) { | |
258 | /* | |
259 | * After -EDEADLK you tried to | |
260 | * acquire a different ww_mutex? Bad! | |
261 | */ | |
262 | DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); | |
263 | ||
264 | /* | |
265 | * You called ww_mutex_lock after receiving -EDEADLK, | |
266 | * but 'forgot' to unlock everything else first? | |
267 | */ | |
268 | DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); | |
269 | ww_ctx->contending_lock = NULL; | |
270 | } | |
271 | ||
272 | /* | |
273 | * Naughty, using a different class will lead to undefined behavior! | |
274 | */ | |
275 | DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); | |
276 | #endif | |
277 | ww_ctx->acquired++; | |
278 | } | |
279 | ||
280 | /* | |
4bd19084 | 281 | * After acquiring lock with fastpath or when we lost out in contested |
76916515 | 282 | * slowpath, set ctx and wake up any waiters so they can recheck. |
76916515 DB |
283 | */ |
284 | static __always_inline void | |
285 | ww_mutex_set_context_fastpath(struct ww_mutex *lock, | |
286 | struct ww_acquire_ctx *ctx) | |
287 | { | |
288 | unsigned long flags; | |
289 | struct mutex_waiter *cur; | |
290 | ||
291 | ww_mutex_lock_acquired(lock, ctx); | |
292 | ||
293 | lock->ctx = ctx; | |
294 | ||
295 | /* | |
296 | * The lock->ctx update should be visible on all cores before | |
297 | * the atomic read is done, otherwise contended waiters might be | |
298 | * missed. The contended waiters will either see ww_ctx == NULL | |
299 | * and keep spinning, or it will acquire wait_lock, add itself | |
300 | * to waiter list and sleep. | |
301 | */ | |
302 | smp_mb(); /* ^^^ */ | |
303 | ||
304 | /* | |
305 | * Check if lock is contended, if not there is nobody to wake up | |
306 | */ | |
3ca0ff57 | 307 | if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) |
76916515 DB |
308 | return; |
309 | ||
310 | /* | |
311 | * Uh oh, we raced in fastpath, wake up everyone in this case, | |
312 | * so they can see the new lock->ctx. | |
313 | */ | |
314 | spin_lock_mutex(&lock->base.wait_lock, flags); | |
315 | list_for_each_entry(cur, &lock->base.wait_list, list) { | |
316 | debug_mutex_wake_waiter(&lock->base, cur); | |
317 | wake_up_process(cur->task); | |
318 | } | |
319 | spin_unlock_mutex(&lock->base.wait_lock, flags); | |
320 | } | |
321 | ||
4bd19084 DB |
322 | /* |
323 | * After acquiring lock in the slowpath set ctx and wake up any | |
324 | * waiters so they can recheck. | |
325 | * | |
326 | * Callers must hold the mutex wait_lock. | |
327 | */ | |
328 | static __always_inline void | |
329 | ww_mutex_set_context_slowpath(struct ww_mutex *lock, | |
330 | struct ww_acquire_ctx *ctx) | |
331 | { | |
332 | struct mutex_waiter *cur; | |
333 | ||
334 | ww_mutex_lock_acquired(lock, ctx); | |
335 | lock->ctx = ctx; | |
336 | ||
337 | /* | |
338 | * Give any possible sleeping processes the chance to wake up, | |
339 | * so they can recheck if they have to back off. | |
340 | */ | |
341 | list_for_each_entry(cur, &lock->base.wait_list, list) { | |
342 | debug_mutex_wake_waiter(&lock->base, cur); | |
343 | wake_up_process(cur->task); | |
344 | } | |
345 | } | |
76916515 | 346 | |
41fcb9f2 | 347 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
41fcb9f2 WL |
348 | /* |
349 | * Look out! "owner" is an entirely speculative pointer | |
350 | * access and not reliable. | |
351 | */ | |
352 | static noinline | |
be1f7bf2 | 353 | bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) |
41fcb9f2 | 354 | { |
01ac33c1 | 355 | bool ret = true; |
be1f7bf2 | 356 | |
41fcb9f2 | 357 | rcu_read_lock(); |
3ca0ff57 | 358 | while (__mutex_owner(lock) == owner) { |
be1f7bf2 JL |
359 | /* |
360 | * Ensure we emit the owner->on_cpu, dereference _after_ | |
01ac33c1 JL |
361 | * checking lock->owner still matches owner. If that fails, |
362 | * owner might point to freed memory. If it still matches, | |
be1f7bf2 JL |
363 | * the rcu_read_lock() ensures the memory stays valid. |
364 | */ | |
365 | barrier(); | |
366 | ||
05ffc951 PX |
367 | /* |
368 | * Use vcpu_is_preempted to detect lock holder preemption issue. | |
369 | */ | |
370 | if (!owner->on_cpu || need_resched() || | |
371 | vcpu_is_preempted(task_cpu(owner))) { | |
be1f7bf2 JL |
372 | ret = false; |
373 | break; | |
374 | } | |
41fcb9f2 | 375 | |
f2f09a4c | 376 | cpu_relax(); |
41fcb9f2 WL |
377 | } |
378 | rcu_read_unlock(); | |
379 | ||
be1f7bf2 | 380 | return ret; |
41fcb9f2 | 381 | } |
2bd2c92c WL |
382 | |
383 | /* | |
384 | * Initial check for entering the mutex spinning loop | |
385 | */ | |
386 | static inline int mutex_can_spin_on_owner(struct mutex *lock) | |
387 | { | |
1e40c2ed | 388 | struct task_struct *owner; |
2bd2c92c WL |
389 | int retval = 1; |
390 | ||
46af29e4 JL |
391 | if (need_resched()) |
392 | return 0; | |
393 | ||
2bd2c92c | 394 | rcu_read_lock(); |
3ca0ff57 | 395 | owner = __mutex_owner(lock); |
05ffc951 PX |
396 | |
397 | /* | |
398 | * As lock holder preemption issue, we both skip spinning if task is not | |
399 | * on cpu or its cpu is preempted | |
400 | */ | |
1e40c2ed | 401 | if (owner) |
05ffc951 | 402 | retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); |
2bd2c92c | 403 | rcu_read_unlock(); |
3ca0ff57 | 404 | |
2bd2c92c | 405 | /* |
3ca0ff57 PZ |
406 | * If lock->owner is not set, the mutex has been released. Return true |
407 | * such that we'll trylock in the spin path, which is a faster option | |
408 | * than the blocking slow path. | |
2bd2c92c WL |
409 | */ |
410 | return retval; | |
411 | } | |
76916515 | 412 | |
76916515 DB |
413 | /* |
414 | * Optimistic spinning. | |
415 | * | |
416 | * We try to spin for acquisition when we find that the lock owner | |
417 | * is currently running on a (different) CPU and while we don't | |
418 | * need to reschedule. The rationale is that if the lock owner is | |
419 | * running, it is likely to release the lock soon. | |
420 | * | |
76916515 DB |
421 | * The mutex spinners are queued up using MCS lock so that only one |
422 | * spinner can compete for the mutex. However, if mutex spinning isn't | |
423 | * going to happen, there is no point in going through the lock/unlock | |
424 | * overhead. | |
425 | * | |
426 | * Returns true when the lock was taken, otherwise false, indicating | |
427 | * that we need to jump to the slowpath and sleep. | |
b341afb3 WL |
428 | * |
429 | * The waiter flag is set to true if the spinner is a waiter in the wait | |
430 | * queue. The waiter-spinner will spin on the lock directly and concurrently | |
431 | * with the spinner at the head of the OSQ, if present, until the owner is | |
432 | * changed to itself. | |
76916515 DB |
433 | */ |
434 | static bool mutex_optimistic_spin(struct mutex *lock, | |
b341afb3 WL |
435 | struct ww_acquire_ctx *ww_ctx, |
436 | const bool use_ww_ctx, const bool waiter) | |
76916515 DB |
437 | { |
438 | struct task_struct *task = current; | |
439 | ||
b341afb3 WL |
440 | if (!waiter) { |
441 | /* | |
442 | * The purpose of the mutex_can_spin_on_owner() function is | |
443 | * to eliminate the overhead of osq_lock() and osq_unlock() | |
444 | * in case spinning isn't possible. As a waiter-spinner | |
445 | * is not going to take OSQ lock anyway, there is no need | |
446 | * to call mutex_can_spin_on_owner(). | |
447 | */ | |
448 | if (!mutex_can_spin_on_owner(lock)) | |
449 | goto fail; | |
76916515 | 450 | |
b341afb3 WL |
451 | /* |
452 | * In order to avoid a stampede of mutex spinners trying to | |
453 | * acquire the mutex all at once, the spinners need to take a | |
454 | * MCS (queued) lock first before spinning on the owner field. | |
455 | */ | |
456 | if (!osq_lock(&lock->osq)) | |
457 | goto fail; | |
458 | } | |
76916515 | 459 | |
b341afb3 | 460 | for (;;) { |
76916515 DB |
461 | struct task_struct *owner; |
462 | ||
463 | if (use_ww_ctx && ww_ctx->acquired > 0) { | |
464 | struct ww_mutex *ww; | |
465 | ||
466 | ww = container_of(lock, struct ww_mutex, base); | |
467 | /* | |
468 | * If ww->ctx is set the contents are undefined, only | |
469 | * by acquiring wait_lock there is a guarantee that | |
470 | * they are not invalid when reading. | |
471 | * | |
472 | * As such, when deadlock detection needs to be | |
473 | * performed the optimistic spinning cannot be done. | |
474 | */ | |
4d3199e4 | 475 | if (READ_ONCE(ww->ctx)) |
b341afb3 | 476 | goto fail_unlock; |
76916515 DB |
477 | } |
478 | ||
479 | /* | |
480 | * If there's an owner, wait for it to either | |
481 | * release the lock or go to sleep. | |
482 | */ | |
3ca0ff57 | 483 | owner = __mutex_owner(lock); |
b341afb3 WL |
484 | if (owner) { |
485 | if (waiter && owner == task) { | |
486 | smp_mb(); /* ACQUIRE */ | |
487 | break; | |
488 | } | |
76916515 | 489 | |
b341afb3 WL |
490 | if (!mutex_spin_on_owner(lock, owner)) |
491 | goto fail_unlock; | |
76916515 DB |
492 | } |
493 | ||
b341afb3 WL |
494 | /* Try to acquire the mutex if it is unlocked. */ |
495 | if (__mutex_trylock(lock, waiter)) | |
496 | break; | |
497 | ||
76916515 DB |
498 | /* |
499 | * The cpu_relax() call is a compiler barrier which forces | |
500 | * everything in this loop to be re-loaded. We don't need | |
501 | * memory barriers as we'll eventually observe the right | |
502 | * values at the cost of a few extra spins. | |
503 | */ | |
f2f09a4c | 504 | cpu_relax(); |
76916515 DB |
505 | } |
506 | ||
b341afb3 WL |
507 | if (!waiter) |
508 | osq_unlock(&lock->osq); | |
509 | ||
510 | return true; | |
511 | ||
512 | ||
513 | fail_unlock: | |
514 | if (!waiter) | |
515 | osq_unlock(&lock->osq); | |
516 | ||
517 | fail: | |
76916515 DB |
518 | /* |
519 | * If we fell out of the spin path because of need_resched(), | |
520 | * reschedule now, before we try-lock the mutex. This avoids getting | |
521 | * scheduled out right after we obtained the mutex. | |
522 | */ | |
6f942a1f PZ |
523 | if (need_resched()) { |
524 | /* | |
525 | * We _should_ have TASK_RUNNING here, but just in case | |
526 | * we do not, make it so, otherwise we might get stuck. | |
527 | */ | |
528 | __set_current_state(TASK_RUNNING); | |
76916515 | 529 | schedule_preempt_disabled(); |
6f942a1f | 530 | } |
76916515 DB |
531 | |
532 | return false; | |
533 | } | |
534 | #else | |
535 | static bool mutex_optimistic_spin(struct mutex *lock, | |
b341afb3 WL |
536 | struct ww_acquire_ctx *ww_ctx, |
537 | const bool use_ww_ctx, const bool waiter) | |
76916515 DB |
538 | { |
539 | return false; | |
540 | } | |
41fcb9f2 WL |
541 | #endif |
542 | ||
3ca0ff57 | 543 | static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); |
6053ee3b | 544 | |
ef5dc121 | 545 | /** |
6053ee3b IM |
546 | * mutex_unlock - release the mutex |
547 | * @lock: the mutex to be released | |
548 | * | |
549 | * Unlock a mutex that has been locked by this task previously. | |
550 | * | |
551 | * This function must not be used in interrupt context. Unlocking | |
552 | * of a not locked mutex is not allowed. | |
553 | * | |
554 | * This function is similar to (but not equivalent to) up(). | |
555 | */ | |
7ad5b3a5 | 556 | void __sched mutex_unlock(struct mutex *lock) |
6053ee3b | 557 | { |
3ca0ff57 PZ |
558 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
559 | if (__mutex_unlock_fast(lock)) | |
560 | return; | |
0d66bf6d | 561 | #endif |
3ca0ff57 | 562 | __mutex_unlock_slowpath(lock, _RET_IP_); |
6053ee3b | 563 | } |
6053ee3b IM |
564 | EXPORT_SYMBOL(mutex_unlock); |
565 | ||
040a0a37 ML |
566 | /** |
567 | * ww_mutex_unlock - release the w/w mutex | |
568 | * @lock: the mutex to be released | |
569 | * | |
570 | * Unlock a mutex that has been locked by this task previously with any of the | |
571 | * ww_mutex_lock* functions (with or without an acquire context). It is | |
572 | * forbidden to release the locks after releasing the acquire context. | |
573 | * | |
574 | * This function must not be used in interrupt context. Unlocking | |
575 | * of a unlocked mutex is not allowed. | |
576 | */ | |
577 | void __sched ww_mutex_unlock(struct ww_mutex *lock) | |
578 | { | |
579 | /* | |
580 | * The unlocking fastpath is the 0->1 transition from 'locked' | |
581 | * into 'unlocked' state: | |
582 | */ | |
583 | if (lock->ctx) { | |
584 | #ifdef CONFIG_DEBUG_MUTEXES | |
585 | DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); | |
586 | #endif | |
587 | if (lock->ctx->acquired > 0) | |
588 | lock->ctx->acquired--; | |
589 | lock->ctx = NULL; | |
590 | } | |
591 | ||
3ca0ff57 | 592 | mutex_unlock(&lock->base); |
040a0a37 ML |
593 | } |
594 | EXPORT_SYMBOL(ww_mutex_unlock); | |
595 | ||
596 | static inline int __sched | |
63dc47e9 | 597 | __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) |
040a0a37 ML |
598 | { |
599 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); | |
4d3199e4 | 600 | struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); |
040a0a37 ML |
601 | |
602 | if (!hold_ctx) | |
603 | return 0; | |
604 | ||
040a0a37 ML |
605 | if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && |
606 | (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { | |
607 | #ifdef CONFIG_DEBUG_MUTEXES | |
608 | DEBUG_LOCKS_WARN_ON(ctx->contending_lock); | |
609 | ctx->contending_lock = ww; | |
610 | #endif | |
611 | return -EDEADLK; | |
612 | } | |
613 | ||
614 | return 0; | |
615 | } | |
616 | ||
6053ee3b IM |
617 | /* |
618 | * Lock a mutex (possibly interruptible), slowpath: | |
619 | */ | |
040a0a37 | 620 | static __always_inline int __sched |
e4564f79 | 621 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
040a0a37 | 622 | struct lockdep_map *nest_lock, unsigned long ip, |
b0267507 | 623 | struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) |
6053ee3b IM |
624 | { |
625 | struct task_struct *task = current; | |
626 | struct mutex_waiter waiter; | |
1fb00c6c | 627 | unsigned long flags; |
9d659ae1 | 628 | bool first = false; |
a40ca565 | 629 | struct ww_mutex *ww; |
040a0a37 | 630 | int ret; |
6053ee3b | 631 | |
0422e83d | 632 | if (use_ww_ctx) { |
a40ca565 | 633 | ww = container_of(lock, struct ww_mutex, base); |
0422e83d CW |
634 | if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) |
635 | return -EALREADY; | |
636 | } | |
637 | ||
41719b03 | 638 | preempt_disable(); |
e4c70a66 | 639 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
c0226027 | 640 | |
9d659ae1 | 641 | if (__mutex_trylock(lock, false) || |
b341afb3 | 642 | mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) { |
76916515 | 643 | /* got the lock, yay! */ |
3ca0ff57 | 644 | lock_acquired(&lock->dep_map, ip); |
a40ca565 | 645 | if (use_ww_ctx) |
3ca0ff57 | 646 | ww_mutex_set_context_fastpath(ww, ww_ctx); |
76916515 DB |
647 | preempt_enable(); |
648 | return 0; | |
0d66bf6d | 649 | } |
76916515 | 650 | |
1fb00c6c | 651 | spin_lock_mutex(&lock->wait_lock, flags); |
1e820c96 | 652 | /* |
3ca0ff57 | 653 | * After waiting to acquire the wait_lock, try again. |
1e820c96 | 654 | */ |
9d659ae1 | 655 | if (__mutex_trylock(lock, false)) |
ec83f425 DB |
656 | goto skip_wait; |
657 | ||
9a11b49a | 658 | debug_mutex_lock_common(lock, &waiter); |
6720a305 | 659 | debug_mutex_add_waiter(lock, &waiter, task); |
6053ee3b IM |
660 | |
661 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | |
662 | list_add_tail(&waiter.list, &lock->wait_list); | |
663 | waiter.task = task; | |
664 | ||
9d659ae1 | 665 | if (__mutex_waiter_is_first(lock, &waiter)) |
3ca0ff57 PZ |
666 | __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); |
667 | ||
e4564f79 | 668 | lock_contended(&lock->dep_map, ip); |
4fe87745 | 669 | |
5bbd7e64 | 670 | set_task_state(task, state); |
6053ee3b | 671 | for (;;) { |
5bbd7e64 PZ |
672 | /* |
673 | * Once we hold wait_lock, we're serialized against | |
674 | * mutex_unlock() handing the lock off to us, do a trylock | |
675 | * before testing the error conditions to make sure we pick up | |
676 | * the handoff. | |
677 | */ | |
9d659ae1 | 678 | if (__mutex_trylock(lock, first)) |
5bbd7e64 | 679 | goto acquired; |
6053ee3b IM |
680 | |
681 | /* | |
5bbd7e64 PZ |
682 | * Check for signals and wound conditions while holding |
683 | * wait_lock. This ensures the lock cancellation is ordered | |
684 | * against mutex_unlock() and wake-ups do not go missing. | |
6053ee3b | 685 | */ |
6ad36762 | 686 | if (unlikely(signal_pending_state(state, task))) { |
040a0a37 ML |
687 | ret = -EINTR; |
688 | goto err; | |
689 | } | |
6053ee3b | 690 | |
b0267507 | 691 | if (use_ww_ctx && ww_ctx->acquired > 0) { |
63dc47e9 | 692 | ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); |
040a0a37 ML |
693 | if (ret) |
694 | goto err; | |
6053ee3b | 695 | } |
040a0a37 | 696 | |
1fb00c6c | 697 | spin_unlock_mutex(&lock->wait_lock, flags); |
bd2f5536 | 698 | schedule_preempt_disabled(); |
9d659ae1 PZ |
699 | |
700 | if (!first && __mutex_waiter_is_first(lock, &waiter)) { | |
701 | first = true; | |
702 | __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); | |
703 | } | |
5bbd7e64 PZ |
704 | |
705 | set_task_state(task, state); | |
706 | /* | |
707 | * Here we order against unlock; we must either see it change | |
708 | * state back to RUNNING and fall through the next schedule(), | |
709 | * or we must see its unlock and acquire. | |
710 | */ | |
b341afb3 WL |
711 | if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) || |
712 | __mutex_trylock(lock, first)) | |
5bbd7e64 PZ |
713 | break; |
714 | ||
715 | spin_lock_mutex(&lock->wait_lock, flags); | |
6053ee3b | 716 | } |
5bbd7e64 PZ |
717 | spin_lock_mutex(&lock->wait_lock, flags); |
718 | acquired: | |
51587bcf DB |
719 | __set_task_state(task, TASK_RUNNING); |
720 | ||
6720a305 | 721 | mutex_remove_waiter(lock, &waiter, task); |
ec83f425 | 722 | if (likely(list_empty(&lock->wait_list))) |
9d659ae1 | 723 | __mutex_clear_flag(lock, MUTEX_FLAGS); |
3ca0ff57 | 724 | |
ec83f425 | 725 | debug_mutex_free_waiter(&waiter); |
6053ee3b | 726 | |
ec83f425 DB |
727 | skip_wait: |
728 | /* got the lock - cleanup and rejoice! */ | |
c7e78cff | 729 | lock_acquired(&lock->dep_map, ip); |
6053ee3b | 730 | |
a40ca565 | 731 | if (use_ww_ctx) |
4bd19084 | 732 | ww_mutex_set_context_slowpath(ww, ww_ctx); |
040a0a37 | 733 | |
1fb00c6c | 734 | spin_unlock_mutex(&lock->wait_lock, flags); |
41719b03 | 735 | preempt_enable(); |
6053ee3b | 736 | return 0; |
040a0a37 ML |
737 | |
738 | err: | |
5bbd7e64 | 739 | __set_task_state(task, TASK_RUNNING); |
6720a305 | 740 | mutex_remove_waiter(lock, &waiter, task); |
040a0a37 ML |
741 | spin_unlock_mutex(&lock->wait_lock, flags); |
742 | debug_mutex_free_waiter(&waiter); | |
743 | mutex_release(&lock->dep_map, 1, ip); | |
744 | preempt_enable(); | |
745 | return ret; | |
6053ee3b IM |
746 | } |
747 | ||
ef5d4707 IM |
748 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
749 | void __sched | |
750 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | |
751 | { | |
752 | might_sleep(); | |
040a0a37 | 753 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
b0267507 | 754 | subclass, NULL, _RET_IP_, NULL, 0); |
ef5d4707 IM |
755 | } |
756 | ||
757 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | |
d63a5a74 | 758 | |
e4c70a66 PZ |
759 | void __sched |
760 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) | |
761 | { | |
762 | might_sleep(); | |
040a0a37 | 763 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
b0267507 | 764 | 0, nest, _RET_IP_, NULL, 0); |
e4c70a66 | 765 | } |
e4c70a66 PZ |
766 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
767 | ||
ad776537 LH |
768 | int __sched |
769 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | |
770 | { | |
771 | might_sleep(); | |
040a0a37 | 772 | return __mutex_lock_common(lock, TASK_KILLABLE, |
b0267507 | 773 | subclass, NULL, _RET_IP_, NULL, 0); |
ad776537 LH |
774 | } |
775 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | |
776 | ||
d63a5a74 N |
777 | int __sched |
778 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |
779 | { | |
780 | might_sleep(); | |
0d66bf6d | 781 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
b0267507 | 782 | subclass, NULL, _RET_IP_, NULL, 0); |
d63a5a74 | 783 | } |
d63a5a74 | 784 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
040a0a37 | 785 | |
23010027 DV |
786 | static inline int |
787 | ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
788 | { | |
789 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | |
790 | unsigned tmp; | |
791 | ||
792 | if (ctx->deadlock_inject_countdown-- == 0) { | |
793 | tmp = ctx->deadlock_inject_interval; | |
794 | if (tmp > UINT_MAX/4) | |
795 | tmp = UINT_MAX; | |
796 | else | |
797 | tmp = tmp*2 + tmp + tmp/2; | |
798 | ||
799 | ctx->deadlock_inject_interval = tmp; | |
800 | ctx->deadlock_inject_countdown = tmp; | |
801 | ctx->contending_lock = lock; | |
802 | ||
803 | ww_mutex_unlock(lock); | |
804 | ||
805 | return -EDEADLK; | |
806 | } | |
807 | #endif | |
808 | ||
809 | return 0; | |
810 | } | |
040a0a37 ML |
811 | |
812 | int __sched | |
813 | __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
814 | { | |
23010027 DV |
815 | int ret; |
816 | ||
040a0a37 | 817 | might_sleep(); |
23010027 | 818 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
b0267507 | 819 | 0, &ctx->dep_map, _RET_IP_, ctx, 1); |
85f48961 | 820 | if (!ret && ctx->acquired > 1) |
23010027 DV |
821 | return ww_mutex_deadlock_injection(lock, ctx); |
822 | ||
823 | return ret; | |
040a0a37 ML |
824 | } |
825 | EXPORT_SYMBOL_GPL(__ww_mutex_lock); | |
826 | ||
827 | int __sched | |
828 | __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
829 | { | |
23010027 DV |
830 | int ret; |
831 | ||
040a0a37 | 832 | might_sleep(); |
23010027 | 833 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
b0267507 | 834 | 0, &ctx->dep_map, _RET_IP_, ctx, 1); |
23010027 | 835 | |
85f48961 | 836 | if (!ret && ctx->acquired > 1) |
23010027 DV |
837 | return ww_mutex_deadlock_injection(lock, ctx); |
838 | ||
839 | return ret; | |
040a0a37 ML |
840 | } |
841 | EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); | |
842 | ||
ef5d4707 IM |
843 | #endif |
844 | ||
6053ee3b IM |
845 | /* |
846 | * Release the lock, slowpath: | |
847 | */ | |
3ca0ff57 | 848 | static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) |
6053ee3b | 849 | { |
9d659ae1 | 850 | struct task_struct *next = NULL; |
3ca0ff57 | 851 | unsigned long owner, flags; |
194a6b5b | 852 | DEFINE_WAKE_Q(wake_q); |
6053ee3b | 853 | |
3ca0ff57 PZ |
854 | mutex_release(&lock->dep_map, 1, ip); |
855 | ||
6053ee3b | 856 | /* |
9d659ae1 PZ |
857 | * Release the lock before (potentially) taking the spinlock such that |
858 | * other contenders can get on with things ASAP. | |
859 | * | |
860 | * Except when HANDOFF, in that case we must not clear the owner field, | |
861 | * but instead set it to the top waiter. | |
6053ee3b | 862 | */ |
9d659ae1 PZ |
863 | owner = atomic_long_read(&lock->owner); |
864 | for (;;) { | |
865 | unsigned long old; | |
866 | ||
867 | #ifdef CONFIG_DEBUG_MUTEXES | |
868 | DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); | |
869 | #endif | |
870 | ||
871 | if (owner & MUTEX_FLAG_HANDOFF) | |
872 | break; | |
873 | ||
874 | old = atomic_long_cmpxchg_release(&lock->owner, owner, | |
875 | __owner_flags(owner)); | |
876 | if (old == owner) { | |
877 | if (owner & MUTEX_FLAG_WAITERS) | |
878 | break; | |
879 | ||
880 | return; | |
881 | } | |
882 | ||
883 | owner = old; | |
884 | } | |
6053ee3b | 885 | |
1d8fe7dc | 886 | spin_lock_mutex(&lock->wait_lock, flags); |
1d8fe7dc | 887 | debug_mutex_unlock(lock); |
6053ee3b IM |
888 | if (!list_empty(&lock->wait_list)) { |
889 | /* get the first entry from the wait-list: */ | |
890 | struct mutex_waiter *waiter = | |
9d659ae1 PZ |
891 | list_first_entry(&lock->wait_list, |
892 | struct mutex_waiter, list); | |
893 | ||
894 | next = waiter->task; | |
6053ee3b IM |
895 | |
896 | debug_mutex_wake_waiter(lock, waiter); | |
9d659ae1 | 897 | wake_q_add(&wake_q, next); |
6053ee3b IM |
898 | } |
899 | ||
9d659ae1 PZ |
900 | if (owner & MUTEX_FLAG_HANDOFF) |
901 | __mutex_handoff(lock, next); | |
902 | ||
1fb00c6c | 903 | spin_unlock_mutex(&lock->wait_lock, flags); |
9d659ae1 | 904 | |
1329ce6f | 905 | wake_up_q(&wake_q); |
6053ee3b IM |
906 | } |
907 | ||
e4564f79 | 908 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
909 | /* |
910 | * Here come the less common (and hence less performance-critical) APIs: | |
911 | * mutex_lock_interruptible() and mutex_trylock(). | |
912 | */ | |
7ad5b3a5 | 913 | static noinline int __sched |
a41b56ef | 914 | __mutex_lock_killable_slowpath(struct mutex *lock); |
ad776537 | 915 | |
7ad5b3a5 | 916 | static noinline int __sched |
a41b56ef | 917 | __mutex_lock_interruptible_slowpath(struct mutex *lock); |
6053ee3b | 918 | |
ef5dc121 RD |
919 | /** |
920 | * mutex_lock_interruptible - acquire the mutex, interruptible | |
6053ee3b IM |
921 | * @lock: the mutex to be acquired |
922 | * | |
923 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | |
924 | * been acquired or sleep until the mutex becomes available. If a | |
925 | * signal arrives while waiting for the lock then this function | |
926 | * returns -EINTR. | |
927 | * | |
928 | * This function is similar to (but not equivalent to) down_interruptible(). | |
929 | */ | |
7ad5b3a5 | 930 | int __sched mutex_lock_interruptible(struct mutex *lock) |
6053ee3b | 931 | { |
c544bdb1 | 932 | might_sleep(); |
3ca0ff57 PZ |
933 | |
934 | if (__mutex_trylock_fast(lock)) | |
a41b56ef | 935 | return 0; |
3ca0ff57 PZ |
936 | |
937 | return __mutex_lock_interruptible_slowpath(lock); | |
6053ee3b IM |
938 | } |
939 | ||
940 | EXPORT_SYMBOL(mutex_lock_interruptible); | |
941 | ||
7ad5b3a5 | 942 | int __sched mutex_lock_killable(struct mutex *lock) |
ad776537 LH |
943 | { |
944 | might_sleep(); | |
3ca0ff57 PZ |
945 | |
946 | if (__mutex_trylock_fast(lock)) | |
a41b56ef | 947 | return 0; |
3ca0ff57 PZ |
948 | |
949 | return __mutex_lock_killable_slowpath(lock); | |
ad776537 LH |
950 | } |
951 | EXPORT_SYMBOL(mutex_lock_killable); | |
952 | ||
3ca0ff57 PZ |
953 | static noinline void __sched |
954 | __mutex_lock_slowpath(struct mutex *lock) | |
e4564f79 | 955 | { |
040a0a37 | 956 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, |
b0267507 | 957 | NULL, _RET_IP_, NULL, 0); |
e4564f79 PZ |
958 | } |
959 | ||
7ad5b3a5 | 960 | static noinline int __sched |
a41b56ef | 961 | __mutex_lock_killable_slowpath(struct mutex *lock) |
ad776537 | 962 | { |
040a0a37 | 963 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, |
b0267507 | 964 | NULL, _RET_IP_, NULL, 0); |
ad776537 LH |
965 | } |
966 | ||
7ad5b3a5 | 967 | static noinline int __sched |
a41b56ef | 968 | __mutex_lock_interruptible_slowpath(struct mutex *lock) |
6053ee3b | 969 | { |
040a0a37 | 970 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, |
b0267507 | 971 | NULL, _RET_IP_, NULL, 0); |
040a0a37 ML |
972 | } |
973 | ||
974 | static noinline int __sched | |
975 | __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
976 | { | |
977 | return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, | |
b0267507 | 978 | NULL, _RET_IP_, ctx, 1); |
6053ee3b | 979 | } |
040a0a37 ML |
980 | |
981 | static noinline int __sched | |
982 | __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, | |
983 | struct ww_acquire_ctx *ctx) | |
984 | { | |
985 | return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, | |
b0267507 | 986 | NULL, _RET_IP_, ctx, 1); |
040a0a37 ML |
987 | } |
988 | ||
e4564f79 | 989 | #endif |
6053ee3b | 990 | |
ef5dc121 RD |
991 | /** |
992 | * mutex_trylock - try to acquire the mutex, without waiting | |
6053ee3b IM |
993 | * @lock: the mutex to be acquired |
994 | * | |
995 | * Try to acquire the mutex atomically. Returns 1 if the mutex | |
996 | * has been acquired successfully, and 0 on contention. | |
997 | * | |
998 | * NOTE: this function follows the spin_trylock() convention, so | |
ef5dc121 | 999 | * it is negated from the down_trylock() return values! Be careful |
6053ee3b IM |
1000 | * about this when converting semaphore users to mutexes. |
1001 | * | |
1002 | * This function must not be used in interrupt context. The | |
1003 | * mutex must be released by the same task that acquired it. | |
1004 | */ | |
7ad5b3a5 | 1005 | int __sched mutex_trylock(struct mutex *lock) |
6053ee3b | 1006 | { |
9d659ae1 | 1007 | bool locked = __mutex_trylock(lock, false); |
0d66bf6d | 1008 | |
3ca0ff57 PZ |
1009 | if (locked) |
1010 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | |
0d66bf6d | 1011 | |
3ca0ff57 | 1012 | return locked; |
6053ee3b | 1013 | } |
6053ee3b | 1014 | EXPORT_SYMBOL(mutex_trylock); |
a511e3f9 | 1015 | |
040a0a37 ML |
1016 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
1017 | int __sched | |
1018 | __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
1019 | { | |
040a0a37 ML |
1020 | might_sleep(); |
1021 | ||
3ca0ff57 | 1022 | if (__mutex_trylock_fast(&lock->base)) { |
040a0a37 | 1023 | ww_mutex_set_context_fastpath(lock, ctx); |
3ca0ff57 PZ |
1024 | return 0; |
1025 | } | |
1026 | ||
1027 | return __ww_mutex_lock_slowpath(lock, ctx); | |
040a0a37 ML |
1028 | } |
1029 | EXPORT_SYMBOL(__ww_mutex_lock); | |
1030 | ||
1031 | int __sched | |
1032 | __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
1033 | { | |
040a0a37 ML |
1034 | might_sleep(); |
1035 | ||
3ca0ff57 | 1036 | if (__mutex_trylock_fast(&lock->base)) { |
040a0a37 | 1037 | ww_mutex_set_context_fastpath(lock, ctx); |
3ca0ff57 PZ |
1038 | return 0; |
1039 | } | |
1040 | ||
1041 | return __ww_mutex_lock_interruptible_slowpath(lock, ctx); | |
040a0a37 ML |
1042 | } |
1043 | EXPORT_SYMBOL(__ww_mutex_lock_interruptible); | |
1044 | ||
1045 | #endif | |
1046 | ||
a511e3f9 AM |
1047 | /** |
1048 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 | |
1049 | * @cnt: the atomic which we are to dec | |
1050 | * @lock: the mutex to return holding if we dec to 0 | |
1051 | * | |
1052 | * return true and hold lock if we dec to 0, return false otherwise | |
1053 | */ | |
1054 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | |
1055 | { | |
1056 | /* dec if we can't possibly hit 0 */ | |
1057 | if (atomic_add_unless(cnt, -1, 1)) | |
1058 | return 0; | |
1059 | /* we might hit 0, so take the lock */ | |
1060 | mutex_lock(lock); | |
1061 | if (!atomic_dec_and_test(cnt)) { | |
1062 | /* when we actually did the dec, we didn't hit 0 */ | |
1063 | mutex_unlock(lock); | |
1064 | return 0; | |
1065 | } | |
1066 | /* we hit 0, and we hold the lock */ | |
1067 | return 1; | |
1068 | } | |
1069 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |