]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
23f78d4a IM |
2 | /* |
3 | * RT-Mutexes: simple blocking mutual exclusion locks with PI support | |
4 | * | |
5 | * started by Ingo Molnar and Thomas Gleixner. | |
6 | * | |
7 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
8 | * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | |
9 | * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt | |
10 | * Copyright (C) 2006 Esben Nielsen | |
d07fe82c | 11 | * |
387b1468 | 12 | * See Documentation/locking/rt-mutex-design.rst for details. |
23f78d4a IM |
13 | */ |
14 | #include <linux/spinlock.h> | |
9984de1a | 15 | #include <linux/export.h> |
174cd4b1 | 16 | #include <linux/sched/signal.h> |
8bd75c77 | 17 | #include <linux/sched/rt.h> |
fb00aca4 | 18 | #include <linux/sched/deadline.h> |
84f001e1 | 19 | #include <linux/sched/wake_q.h> |
b17b0153 | 20 | #include <linux/sched/debug.h> |
23f78d4a IM |
21 | #include <linux/timer.h> |
22 | ||
23 | #include "rtmutex_common.h" | |
24 | ||
23f78d4a IM |
25 | /* |
26 | * lock->owner state tracking: | |
27 | * | |
8161239a LJ |
28 | * lock->owner holds the task_struct pointer of the owner. Bit 0 |
29 | * is used to keep track of the "lock has waiters" state. | |
23f78d4a | 30 | * |
8161239a LJ |
31 | * owner bit0 |
32 | * NULL 0 lock is free (fast acquire possible) | |
33 | * NULL 1 lock is free and has waiters and the top waiter | |
34 | * is going to take the lock* | |
35 | * taskpointer 0 lock is held (fast release possible) | |
36 | * taskpointer 1 lock is held and has waiters** | |
23f78d4a IM |
37 | * |
38 | * The fast atomic compare exchange based acquire and release is only | |
8161239a LJ |
39 | * possible when bit 0 of lock->owner is 0. |
40 | * | |
41 | * (*) It also can be a transitional state when grabbing the lock | |
42 | * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, | |
43 | * we need to set the bit0 before looking at the lock, and the owner may be | |
44 | * NULL in this small time, hence this can be a transitional state. | |
23f78d4a | 45 | * |
8161239a LJ |
46 | * (**) There is a small time when bit 0 is set but there are no |
47 | * waiters. This can happen when grabbing the lock in the slow path. | |
48 | * To prevent a cmpxchg of the owner releasing the lock, we need to | |
49 | * set this bit before looking at the lock. | |
23f78d4a IM |
50 | */ |
51 | ||
bd197234 | 52 | static void |
8161239a | 53 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) |
23f78d4a | 54 | { |
8161239a | 55 | unsigned long val = (unsigned long)owner; |
23f78d4a IM |
56 | |
57 | if (rt_mutex_has_waiters(lock)) | |
58 | val |= RT_MUTEX_HAS_WAITERS; | |
59 | ||
0050c7b2 | 60 | WRITE_ONCE(lock->owner, (struct task_struct *)val); |
23f78d4a IM |
61 | } |
62 | ||
63 | static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) | |
64 | { | |
65 | lock->owner = (struct task_struct *) | |
66 | ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); | |
67 | } | |
68 | ||
69 | static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | |
70 | { | |
dbb26055 TG |
71 | unsigned long owner, *p = (unsigned long *) &lock->owner; |
72 | ||
73 | if (rt_mutex_has_waiters(lock)) | |
74 | return; | |
75 | ||
76 | /* | |
77 | * The rbtree has no waiters enqueued, now make sure that the | |
78 | * lock->owner still has the waiters bit set, otherwise the | |
79 | * following can happen: | |
80 | * | |
81 | * CPU 0 CPU 1 CPU2 | |
82 | * l->owner=T1 | |
83 | * rt_mutex_lock(l) | |
84 | * lock(l->lock) | |
85 | * l->owner = T1 | HAS_WAITERS; | |
86 | * enqueue(T2) | |
87 | * boost() | |
88 | * unlock(l->lock) | |
89 | * block() | |
90 | * | |
91 | * rt_mutex_lock(l) | |
92 | * lock(l->lock) | |
93 | * l->owner = T1 | HAS_WAITERS; | |
94 | * enqueue(T3) | |
95 | * boost() | |
96 | * unlock(l->lock) | |
97 | * block() | |
98 | * signal(->T2) signal(->T3) | |
99 | * lock(l->lock) | |
100 | * dequeue(T2) | |
101 | * deboost() | |
102 | * unlock(l->lock) | |
103 | * lock(l->lock) | |
104 | * dequeue(T3) | |
105 | * ==> wait list is empty | |
106 | * deboost() | |
107 | * unlock(l->lock) | |
108 | * lock(l->lock) | |
109 | * fixup_rt_mutex_waiters() | |
110 | * if (wait_list_empty(l) { | |
111 | * l->owner = owner | |
112 | * owner = l->owner & ~HAS_WAITERS; | |
113 | * ==> l->owner = T1 | |
114 | * } | |
115 | * lock(l->lock) | |
116 | * rt_mutex_unlock(l) fixup_rt_mutex_waiters() | |
117 | * if (wait_list_empty(l) { | |
118 | * owner = l->owner & ~HAS_WAITERS; | |
119 | * cmpxchg(l->owner, T1, NULL) | |
120 | * ===> Success (l->owner = NULL) | |
121 | * | |
122 | * l->owner = owner | |
123 | * ==> l->owner = T1 | |
124 | * } | |
125 | * | |
126 | * With the check for the waiter bit in place T3 on CPU2 will not | |
127 | * overwrite. All tasks fiddling with the waiters bit are | |
128 | * serialized by l->lock, so nothing else can modify the waiters | |
129 | * bit. If the bit is set then nothing can change l->owner either | |
130 | * so the simple RMW is safe. The cmpxchg() will simply fail if it | |
131 | * happens in the middle of the RMW because the waiters bit is | |
132 | * still set. | |
133 | */ | |
134 | owner = READ_ONCE(*p); | |
135 | if (owner & RT_MUTEX_HAS_WAITERS) | |
136 | WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); | |
23f78d4a IM |
137 | } |
138 | ||
bd197234 | 139 | /* |
cede8841 SAS |
140 | * We can speed up the acquire/release, if there's no debugging state to be |
141 | * set up. | |
bd197234 | 142 | */ |
cede8841 | 143 | #ifndef CONFIG_DEBUG_RT_MUTEXES |
700318d1 DB |
144 | # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c) |
145 | # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c) | |
146 | ||
147 | /* | |
148 | * Callers must hold the ->wait_lock -- which is the whole purpose as we force | |
149 | * all future threads that attempt to [Rmw] the lock to the slowpath. As such | |
150 | * relaxed semantics suffice. | |
151 | */ | |
bd197234 TG |
152 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) |
153 | { | |
154 | unsigned long owner, *p = (unsigned long *) &lock->owner; | |
155 | ||
156 | do { | |
157 | owner = *p; | |
700318d1 DB |
158 | } while (cmpxchg_relaxed(p, owner, |
159 | owner | RT_MUTEX_HAS_WAITERS) != owner); | |
bd197234 | 160 | } |
27e35715 TG |
161 | |
162 | /* | |
163 | * Safe fastpath aware unlock: | |
164 | * 1) Clear the waiters bit | |
165 | * 2) Drop lock->wait_lock | |
166 | * 3) Try to unlock the lock with cmpxchg | |
167 | */ | |
b4abf910 TG |
168 | static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, |
169 | unsigned long flags) | |
27e35715 TG |
170 | __releases(lock->wait_lock) |
171 | { | |
172 | struct task_struct *owner = rt_mutex_owner(lock); | |
173 | ||
174 | clear_rt_mutex_waiters(lock); | |
b4abf910 | 175 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
27e35715 TG |
176 | /* |
177 | * If a new waiter comes in between the unlock and the cmpxchg | |
178 | * we have two situations: | |
179 | * | |
180 | * unlock(wait_lock); | |
181 | * lock(wait_lock); | |
182 | * cmpxchg(p, owner, 0) == owner | |
183 | * mark_rt_mutex_waiters(lock); | |
184 | * acquire(lock); | |
185 | * or: | |
186 | * | |
187 | * unlock(wait_lock); | |
188 | * lock(wait_lock); | |
189 | * mark_rt_mutex_waiters(lock); | |
190 | * | |
191 | * cmpxchg(p, owner, 0) != owner | |
192 | * enqueue_waiter(); | |
193 | * unlock(wait_lock); | |
194 | * lock(wait_lock); | |
195 | * wake waiter(); | |
196 | * unlock(wait_lock); | |
197 | * lock(wait_lock); | |
198 | * acquire(lock); | |
199 | */ | |
700318d1 | 200 | return rt_mutex_cmpxchg_release(lock, owner, NULL); |
27e35715 TG |
201 | } |
202 | ||
bd197234 | 203 | #else |
700318d1 DB |
204 | # define rt_mutex_cmpxchg_acquire(l,c,n) (0) |
205 | # define rt_mutex_cmpxchg_release(l,c,n) (0) | |
206 | ||
bd197234 TG |
207 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) |
208 | { | |
209 | lock->owner = (struct task_struct *) | |
210 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | |
211 | } | |
27e35715 TG |
212 | |
213 | /* | |
214 | * Simple slow path only version: lock->owner is protected by lock->wait_lock. | |
215 | */ | |
b4abf910 TG |
216 | static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, |
217 | unsigned long flags) | |
27e35715 TG |
218 | __releases(lock->wait_lock) |
219 | { | |
220 | lock->owner = NULL; | |
b4abf910 | 221 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
27e35715 TG |
222 | return true; |
223 | } | |
bd197234 TG |
224 | #endif |
225 | ||
19830e55 PZ |
226 | /* |
227 | * Only use with rt_mutex_waiter_{less,equal}() | |
228 | */ | |
229 | #define task_to_waiter(p) \ | |
230 | &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } | |
231 | ||
fb00aca4 PZ |
232 | static inline int |
233 | rt_mutex_waiter_less(struct rt_mutex_waiter *left, | |
234 | struct rt_mutex_waiter *right) | |
235 | { | |
2d3d891d | 236 | if (left->prio < right->prio) |
fb00aca4 PZ |
237 | return 1; |
238 | ||
239 | /* | |
2d3d891d DF |
240 | * If both waiters have dl_prio(), we check the deadlines of the |
241 | * associated tasks. | |
242 | * If left waiter has a dl_prio(), and we didn't return 1 above, | |
243 | * then right waiter has a dl_prio() too. | |
fb00aca4 | 244 | */ |
2d3d891d | 245 | if (dl_prio(left->prio)) |
e0aad5b4 | 246 | return dl_time_before(left->deadline, right->deadline); |
fb00aca4 PZ |
247 | |
248 | return 0; | |
249 | } | |
250 | ||
19830e55 PZ |
251 | static inline int |
252 | rt_mutex_waiter_equal(struct rt_mutex_waiter *left, | |
253 | struct rt_mutex_waiter *right) | |
254 | { | |
255 | if (left->prio != right->prio) | |
256 | return 0; | |
257 | ||
258 | /* | |
259 | * If both waiters have dl_prio(), we check the deadlines of the | |
260 | * associated tasks. | |
261 | * If left waiter has a dl_prio(), and we didn't return 0 above, | |
262 | * then right waiter has a dl_prio() too. | |
263 | */ | |
264 | if (dl_prio(left->prio)) | |
265 | return left->deadline == right->deadline; | |
266 | ||
267 | return 1; | |
268 | } | |
269 | ||
5a798725 PZ |
270 | #define __node_2_waiter(node) \ |
271 | rb_entry((node), struct rt_mutex_waiter, tree_entry) | |
272 | ||
273 | static inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) | |
274 | { | |
275 | return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b)); | |
276 | } | |
277 | ||
fb00aca4 PZ |
278 | static void |
279 | rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) | |
280 | { | |
5a798725 | 281 | rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less); |
fb00aca4 PZ |
282 | } |
283 | ||
284 | static void | |
285 | rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) | |
286 | { | |
287 | if (RB_EMPTY_NODE(&waiter->tree_entry)) | |
288 | return; | |
289 | ||
a23ba907 | 290 | rb_erase_cached(&waiter->tree_entry, &lock->waiters); |
fb00aca4 PZ |
291 | RB_CLEAR_NODE(&waiter->tree_entry); |
292 | } | |
293 | ||
5a798725 PZ |
294 | #define __node_2_pi_waiter(node) \ |
295 | rb_entry((node), struct rt_mutex_waiter, pi_tree_entry) | |
296 | ||
297 | static inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b) | |
298 | { | |
299 | return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b)); | |
300 | } | |
301 | ||
fb00aca4 PZ |
302 | static void |
303 | rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) | |
304 | { | |
5a798725 | 305 | rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less); |
fb00aca4 PZ |
306 | } |
307 | ||
308 | static void | |
309 | rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) | |
310 | { | |
311 | if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) | |
312 | return; | |
313 | ||
a23ba907 | 314 | rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters); |
fb00aca4 PZ |
315 | RB_CLEAR_NODE(&waiter->pi_tree_entry); |
316 | } | |
317 | ||
acd58620 | 318 | static void rt_mutex_adjust_prio(struct task_struct *p) |
c365c292 | 319 | { |
acd58620 | 320 | struct task_struct *pi_task = NULL; |
e96a7705 | 321 | |
acd58620 | 322 | lockdep_assert_held(&p->pi_lock); |
c365c292 | 323 | |
acd58620 PZ |
324 | if (task_has_pi_waiters(p)) |
325 | pi_task = task_top_pi_waiter(p)->task; | |
c365c292 | 326 | |
acd58620 | 327 | rt_mutex_setprio(p, pi_task); |
23f78d4a IM |
328 | } |
329 | ||
8930ed80 TG |
330 | /* |
331 | * Deadlock detection is conditional: | |
332 | * | |
333 | * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted | |
334 | * if the detect argument is == RT_MUTEX_FULL_CHAINWALK. | |
335 | * | |
336 | * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always | |
337 | * conducted independent of the detect argument. | |
338 | * | |
339 | * If the waiter argument is NULL this indicates the deboost path and | |
340 | * deadlock detection is disabled independent of the detect argument | |
341 | * and the config settings. | |
342 | */ | |
343 | static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, | |
344 | enum rtmutex_chainwalk chwalk) | |
345 | { | |
346 | /* | |
347 | * This is just a wrapper function for the following call, | |
348 | * because debug_rt_mutex_detect_deadlock() smells like a magic | |
349 | * debug feature and I wanted to keep the cond function in the | |
350 | * main source file along with the comments instead of having | |
351 | * two of the same in the headers. | |
352 | */ | |
353 | return debug_rt_mutex_detect_deadlock(waiter, chwalk); | |
354 | } | |
355 | ||
23f78d4a IM |
356 | /* |
357 | * Max number of times we'll walk the boosting chain: | |
358 | */ | |
359 | int max_lock_depth = 1024; | |
360 | ||
82084984 TG |
361 | static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) |
362 | { | |
363 | return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; | |
364 | } | |
365 | ||
23f78d4a IM |
366 | /* |
367 | * Adjust the priority chain. Also used for deadlock detection. | |
368 | * Decreases task's usage by one - may thus free the task. | |
0c106173 | 369 | * |
82084984 TG |
370 | * @task: the task owning the mutex (owner) for which a chain walk is |
371 | * probably needed | |
e6beaa36 | 372 | * @chwalk: do we have to carry out deadlock detection? |
82084984 TG |
373 | * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck |
374 | * things for a task that has just got its priority adjusted, and | |
375 | * is waiting on a mutex) | |
376 | * @next_lock: the mutex on which the owner of @orig_lock was blocked before | |
377 | * we dropped its pi_lock. Is never dereferenced, only used for | |
378 | * comparison to detect lock chain changes. | |
0c106173 | 379 | * @orig_waiter: rt_mutex_waiter struct for the task that has just donated |
82084984 TG |
380 | * its priority to the mutex owner (can be NULL in the case |
381 | * depicted above or if the top waiter is gone away and we are | |
382 | * actually deboosting the owner) | |
383 | * @top_task: the current top waiter | |
0c106173 | 384 | * |
23f78d4a | 385 | * Returns 0 or -EDEADLK. |
3eb65aea TG |
386 | * |
387 | * Chain walk basics and protection scope | |
388 | * | |
389 | * [R] refcount on task | |
390 | * [P] task->pi_lock held | |
391 | * [L] rtmutex->wait_lock held | |
392 | * | |
393 | * Step Description Protected by | |
394 | * function arguments: | |
395 | * @task [R] | |
396 | * @orig_lock if != NULL @top_task is blocked on it | |
397 | * @next_lock Unprotected. Cannot be | |
398 | * dereferenced. Only used for | |
399 | * comparison. | |
400 | * @orig_waiter if != NULL @top_task is blocked on it | |
401 | * @top_task current, or in case of proxy | |
402 | * locking protected by calling | |
403 | * code | |
404 | * again: | |
405 | * loop_sanity_check(); | |
406 | * retry: | |
407 | * [1] lock(task->pi_lock); [R] acquire [P] | |
408 | * [2] waiter = task->pi_blocked_on; [P] | |
409 | * [3] check_exit_conditions_1(); [P] | |
410 | * [4] lock = waiter->lock; [P] | |
411 | * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L] | |
412 | * unlock(task->pi_lock); release [P] | |
413 | * goto retry; | |
414 | * } | |
415 | * [6] check_exit_conditions_2(); [P] + [L] | |
416 | * [7] requeue_lock_waiter(lock, waiter); [P] + [L] | |
417 | * [8] unlock(task->pi_lock); release [P] | |
418 | * put_task_struct(task); release [R] | |
419 | * [9] check_exit_conditions_3(); [L] | |
420 | * [10] task = owner(lock); [L] | |
421 | * get_task_struct(task); [L] acquire [R] | |
422 | * lock(task->pi_lock); [L] acquire [P] | |
423 | * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L] | |
424 | * [12] check_exit_conditions_4(); [P] + [L] | |
425 | * [13] unlock(task->pi_lock); release [P] | |
426 | * unlock(lock->wait_lock); release [L] | |
427 | * goto again; | |
23f78d4a | 428 | */ |
bd197234 | 429 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
8930ed80 | 430 | enum rtmutex_chainwalk chwalk, |
bd197234 | 431 | struct rt_mutex *orig_lock, |
82084984 | 432 | struct rt_mutex *next_lock, |
bd197234 TG |
433 | struct rt_mutex_waiter *orig_waiter, |
434 | struct task_struct *top_task) | |
23f78d4a | 435 | { |
23f78d4a | 436 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; |
a57594a1 | 437 | struct rt_mutex_waiter *prerequeue_top_waiter; |
8930ed80 | 438 | int ret = 0, depth = 0; |
a57594a1 | 439 | struct rt_mutex *lock; |
8930ed80 | 440 | bool detect_deadlock; |
67792e2c | 441 | bool requeue = true; |
23f78d4a | 442 | |
8930ed80 | 443 | detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk); |
23f78d4a IM |
444 | |
445 | /* | |
446 | * The (de)boosting is a step by step approach with a lot of | |
447 | * pitfalls. We want this to be preemptible and we want hold a | |
448 | * maximum of two locks per step. So we have to check | |
449 | * carefully whether things change under us. | |
450 | */ | |
451 | again: | |
3eb65aea TG |
452 | /* |
453 | * We limit the lock chain length for each invocation. | |
454 | */ | |
23f78d4a IM |
455 | if (++depth > max_lock_depth) { |
456 | static int prev_max; | |
457 | ||
458 | /* | |
459 | * Print this only once. If the admin changes the limit, | |
460 | * print a new message when reaching the limit again. | |
461 | */ | |
462 | if (prev_max != max_lock_depth) { | |
463 | prev_max = max_lock_depth; | |
464 | printk(KERN_WARNING "Maximum lock depth %d reached " | |
465 | "task: %s (%d)\n", max_lock_depth, | |
ba25f9dc | 466 | top_task->comm, task_pid_nr(top_task)); |
23f78d4a IM |
467 | } |
468 | put_task_struct(task); | |
469 | ||
3d5c9340 | 470 | return -EDEADLK; |
23f78d4a | 471 | } |
3eb65aea TG |
472 | |
473 | /* | |
474 | * We are fully preemptible here and only hold the refcount on | |
475 | * @task. So everything can have changed under us since the | |
476 | * caller or our own code below (goto retry/again) dropped all | |
477 | * locks. | |
478 | */ | |
23f78d4a IM |
479 | retry: |
480 | /* | |
3eb65aea | 481 | * [1] Task cannot go away as we did a get_task() before ! |
23f78d4a | 482 | */ |
b4abf910 | 483 | raw_spin_lock_irq(&task->pi_lock); |
23f78d4a | 484 | |
3eb65aea TG |
485 | /* |
486 | * [2] Get the waiter on which @task is blocked on. | |
487 | */ | |
23f78d4a | 488 | waiter = task->pi_blocked_on; |
3eb65aea TG |
489 | |
490 | /* | |
491 | * [3] check_exit_conditions_1() protected by task->pi_lock. | |
492 | */ | |
493 | ||
23f78d4a IM |
494 | /* |
495 | * Check whether the end of the boosting chain has been | |
496 | * reached or the state of the chain has changed while we | |
497 | * dropped the locks. | |
498 | */ | |
8161239a | 499 | if (!waiter) |
23f78d4a IM |
500 | goto out_unlock_pi; |
501 | ||
1a539a87 TG |
502 | /* |
503 | * Check the orig_waiter state. After we dropped the locks, | |
8161239a | 504 | * the previous owner of the lock might have released the lock. |
1a539a87 | 505 | */ |
8161239a | 506 | if (orig_waiter && !rt_mutex_owner(orig_lock)) |
1a539a87 TG |
507 | goto out_unlock_pi; |
508 | ||
82084984 TG |
509 | /* |
510 | * We dropped all locks after taking a refcount on @task, so | |
511 | * the task might have moved on in the lock chain or even left | |
512 | * the chain completely and blocks now on an unrelated lock or | |
513 | * on @orig_lock. | |
514 | * | |
515 | * We stored the lock on which @task was blocked in @next_lock, | |
516 | * so we can detect the chain change. | |
517 | */ | |
518 | if (next_lock != waiter->lock) | |
519 | goto out_unlock_pi; | |
520 | ||
1a539a87 TG |
521 | /* |
522 | * Drop out, when the task has no waiters. Note, | |
523 | * top_waiter can be NULL, when we are in the deboosting | |
524 | * mode! | |
525 | */ | |
397335f0 TG |
526 | if (top_waiter) { |
527 | if (!task_has_pi_waiters(task)) | |
528 | goto out_unlock_pi; | |
529 | /* | |
530 | * If deadlock detection is off, we stop here if we | |
67792e2c TG |
531 | * are not the top pi waiter of the task. If deadlock |
532 | * detection is enabled we continue, but stop the | |
533 | * requeueing in the chain walk. | |
397335f0 | 534 | */ |
67792e2c TG |
535 | if (top_waiter != task_top_pi_waiter(task)) { |
536 | if (!detect_deadlock) | |
537 | goto out_unlock_pi; | |
538 | else | |
539 | requeue = false; | |
540 | } | |
397335f0 | 541 | } |
23f78d4a IM |
542 | |
543 | /* | |
67792e2c TG |
544 | * If the waiter priority is the same as the task priority |
545 | * then there is no further priority adjustment necessary. If | |
546 | * deadlock detection is off, we stop the chain walk. If its | |
547 | * enabled we continue, but stop the requeueing in the chain | |
548 | * walk. | |
23f78d4a | 549 | */ |
19830e55 | 550 | if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { |
67792e2c TG |
551 | if (!detect_deadlock) |
552 | goto out_unlock_pi; | |
553 | else | |
554 | requeue = false; | |
555 | } | |
23f78d4a | 556 | |
3eb65aea TG |
557 | /* |
558 | * [4] Get the next lock | |
559 | */ | |
23f78d4a | 560 | lock = waiter->lock; |
3eb65aea TG |
561 | /* |
562 | * [5] We need to trylock here as we are holding task->pi_lock, | |
563 | * which is the reverse lock order versus the other rtmutex | |
564 | * operations. | |
565 | */ | |
d209d74d | 566 | if (!raw_spin_trylock(&lock->wait_lock)) { |
b4abf910 | 567 | raw_spin_unlock_irq(&task->pi_lock); |
23f78d4a IM |
568 | cpu_relax(); |
569 | goto retry; | |
570 | } | |
571 | ||
397335f0 | 572 | /* |
3eb65aea TG |
573 | * [6] check_exit_conditions_2() protected by task->pi_lock and |
574 | * lock->wait_lock. | |
575 | * | |
397335f0 TG |
576 | * Deadlock detection. If the lock is the same as the original |
577 | * lock which caused us to walk the lock chain or if the | |
578 | * current lock is owned by the task which initiated the chain | |
579 | * walk, we detected a deadlock. | |
580 | */ | |
95e02ca9 | 581 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
8930ed80 | 582 | debug_rt_mutex_deadlock(chwalk, orig_waiter, lock); |
d209d74d | 583 | raw_spin_unlock(&lock->wait_lock); |
3d5c9340 | 584 | ret = -EDEADLK; |
23f78d4a IM |
585 | goto out_unlock_pi; |
586 | } | |
587 | ||
67792e2c TG |
588 | /* |
589 | * If we just follow the lock chain for deadlock detection, no | |
590 | * need to do all the requeue operations. To avoid a truckload | |
591 | * of conditionals around the various places below, just do the | |
592 | * minimum chain walk checks. | |
593 | */ | |
594 | if (!requeue) { | |
595 | /* | |
596 | * No requeue[7] here. Just release @task [8] | |
597 | */ | |
b4abf910 | 598 | raw_spin_unlock(&task->pi_lock); |
67792e2c TG |
599 | put_task_struct(task); |
600 | ||
601 | /* | |
602 | * [9] check_exit_conditions_3 protected by lock->wait_lock. | |
603 | * If there is no owner of the lock, end of chain. | |
604 | */ | |
605 | if (!rt_mutex_owner(lock)) { | |
b4abf910 | 606 | raw_spin_unlock_irq(&lock->wait_lock); |
67792e2c TG |
607 | return 0; |
608 | } | |
609 | ||
610 | /* [10] Grab the next task, i.e. owner of @lock */ | |
7b3c92b8 | 611 | task = get_task_struct(rt_mutex_owner(lock)); |
b4abf910 | 612 | raw_spin_lock(&task->pi_lock); |
67792e2c TG |
613 | |
614 | /* | |
615 | * No requeue [11] here. We just do deadlock detection. | |
616 | * | |
617 | * [12] Store whether owner is blocked | |
618 | * itself. Decision is made after dropping the locks | |
619 | */ | |
620 | next_lock = task_blocked_on_lock(task); | |
621 | /* | |
622 | * Get the top waiter for the next iteration | |
623 | */ | |
624 | top_waiter = rt_mutex_top_waiter(lock); | |
625 | ||
626 | /* [13] Drop locks */ | |
b4abf910 TG |
627 | raw_spin_unlock(&task->pi_lock); |
628 | raw_spin_unlock_irq(&lock->wait_lock); | |
67792e2c TG |
629 | |
630 | /* If owner is not blocked, end of chain. */ | |
631 | if (!next_lock) | |
632 | goto out_put_task; | |
633 | goto again; | |
634 | } | |
635 | ||
a57594a1 TG |
636 | /* |
637 | * Store the current top waiter before doing the requeue | |
638 | * operation on @lock. We need it for the boost/deboost | |
639 | * decision below. | |
640 | */ | |
641 | prerequeue_top_waiter = rt_mutex_top_waiter(lock); | |
23f78d4a | 642 | |
9f40a51a | 643 | /* [7] Requeue the waiter in the lock waiter tree. */ |
fb00aca4 | 644 | rt_mutex_dequeue(lock, waiter); |
e0aad5b4 PZ |
645 | |
646 | /* | |
647 | * Update the waiter prio fields now that we're dequeued. | |
648 | * | |
649 | * These values can have changed through either: | |
650 | * | |
651 | * sys_sched_set_scheduler() / sys_sched_setattr() | |
652 | * | |
653 | * or | |
654 | * | |
655 | * DL CBS enforcement advancing the effective deadline. | |
656 | * | |
657 | * Even though pi_waiters also uses these fields, and that tree is only | |
658 | * updated in [11], we can do this here, since we hold [L], which | |
659 | * serializes all pi_waiters access and rb_erase() does not care about | |
660 | * the values of the node being removed. | |
661 | */ | |
2d3d891d | 662 | waiter->prio = task->prio; |
e0aad5b4 PZ |
663 | waiter->deadline = task->dl.deadline; |
664 | ||
fb00aca4 | 665 | rt_mutex_enqueue(lock, waiter); |
23f78d4a | 666 | |
3eb65aea | 667 | /* [8] Release the task */ |
b4abf910 | 668 | raw_spin_unlock(&task->pi_lock); |
2ffa5a5c TG |
669 | put_task_struct(task); |
670 | ||
a57594a1 | 671 | /* |
3eb65aea TG |
672 | * [9] check_exit_conditions_3 protected by lock->wait_lock. |
673 | * | |
a57594a1 TG |
674 | * We must abort the chain walk if there is no lock owner even |
675 | * in the dead lock detection case, as we have nothing to | |
676 | * follow here. This is the end of the chain we are walking. | |
677 | */ | |
8161239a LJ |
678 | if (!rt_mutex_owner(lock)) { |
679 | /* | |
3eb65aea TG |
680 | * If the requeue [7] above changed the top waiter, |
681 | * then we need to wake the new top waiter up to try | |
682 | * to get the lock. | |
8161239a | 683 | */ |
a57594a1 | 684 | if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) |
8161239a | 685 | wake_up_process(rt_mutex_top_waiter(lock)->task); |
b4abf910 | 686 | raw_spin_unlock_irq(&lock->wait_lock); |
2ffa5a5c | 687 | return 0; |
8161239a | 688 | } |
23f78d4a | 689 | |
3eb65aea | 690 | /* [10] Grab the next task, i.e. the owner of @lock */ |
7b3c92b8 | 691 | task = get_task_struct(rt_mutex_owner(lock)); |
b4abf910 | 692 | raw_spin_lock(&task->pi_lock); |
23f78d4a | 693 | |
3eb65aea | 694 | /* [11] requeue the pi waiters if necessary */ |
23f78d4a | 695 | if (waiter == rt_mutex_top_waiter(lock)) { |
a57594a1 TG |
696 | /* |
697 | * The waiter became the new top (highest priority) | |
698 | * waiter on the lock. Replace the previous top waiter | |
9f40a51a | 699 | * in the owner tasks pi waiters tree with this waiter |
a57594a1 TG |
700 | * and adjust the priority of the owner. |
701 | */ | |
702 | rt_mutex_dequeue_pi(task, prerequeue_top_waiter); | |
fb00aca4 | 703 | rt_mutex_enqueue_pi(task, waiter); |
acd58620 | 704 | rt_mutex_adjust_prio(task); |
23f78d4a | 705 | |
a57594a1 TG |
706 | } else if (prerequeue_top_waiter == waiter) { |
707 | /* | |
708 | * The waiter was the top waiter on the lock, but is | |
709 | * no longer the top prority waiter. Replace waiter in | |
9f40a51a | 710 | * the owner tasks pi waiters tree with the new top |
a57594a1 TG |
711 | * (highest priority) waiter and adjust the priority |
712 | * of the owner. | |
713 | * The new top waiter is stored in @waiter so that | |
714 | * @waiter == @top_waiter evaluates to true below and | |
715 | * we continue to deboost the rest of the chain. | |
716 | */ | |
fb00aca4 | 717 | rt_mutex_dequeue_pi(task, waiter); |
23f78d4a | 718 | waiter = rt_mutex_top_waiter(lock); |
fb00aca4 | 719 | rt_mutex_enqueue_pi(task, waiter); |
acd58620 | 720 | rt_mutex_adjust_prio(task); |
a57594a1 TG |
721 | } else { |
722 | /* | |
723 | * Nothing changed. No need to do any priority | |
724 | * adjustment. | |
725 | */ | |
23f78d4a IM |
726 | } |
727 | ||
82084984 | 728 | /* |
3eb65aea TG |
729 | * [12] check_exit_conditions_4() protected by task->pi_lock |
730 | * and lock->wait_lock. The actual decisions are made after we | |
731 | * dropped the locks. | |
732 | * | |
82084984 TG |
733 | * Check whether the task which owns the current lock is pi |
734 | * blocked itself. If yes we store a pointer to the lock for | |
735 | * the lock chain change detection above. After we dropped | |
736 | * task->pi_lock next_lock cannot be dereferenced anymore. | |
737 | */ | |
738 | next_lock = task_blocked_on_lock(task); | |
a57594a1 TG |
739 | /* |
740 | * Store the top waiter of @lock for the end of chain walk | |
741 | * decision below. | |
742 | */ | |
23f78d4a | 743 | top_waiter = rt_mutex_top_waiter(lock); |
3eb65aea TG |
744 | |
745 | /* [13] Drop the locks */ | |
b4abf910 TG |
746 | raw_spin_unlock(&task->pi_lock); |
747 | raw_spin_unlock_irq(&lock->wait_lock); | |
23f78d4a | 748 | |
82084984 | 749 | /* |
3eb65aea TG |
750 | * Make the actual exit decisions [12], based on the stored |
751 | * values. | |
752 | * | |
82084984 TG |
753 | * We reached the end of the lock chain. Stop right here. No |
754 | * point to go back just to figure that out. | |
755 | */ | |
756 | if (!next_lock) | |
757 | goto out_put_task; | |
758 | ||
a57594a1 TG |
759 | /* |
760 | * If the current waiter is not the top waiter on the lock, | |
761 | * then we can stop the chain walk here if we are not in full | |
762 | * deadlock detection mode. | |
763 | */ | |
23f78d4a IM |
764 | if (!detect_deadlock && waiter != top_waiter) |
765 | goto out_put_task; | |
766 | ||
767 | goto again; | |
768 | ||
769 | out_unlock_pi: | |
b4abf910 | 770 | raw_spin_unlock_irq(&task->pi_lock); |
23f78d4a IM |
771 | out_put_task: |
772 | put_task_struct(task); | |
36c8b586 | 773 | |
23f78d4a IM |
774 | return ret; |
775 | } | |
776 | ||
23f78d4a IM |
777 | /* |
778 | * Try to take an rt-mutex | |
779 | * | |
b4abf910 | 780 | * Must be called with lock->wait_lock held and interrupts disabled |
8161239a | 781 | * |
358c331f TG |
782 | * @lock: The lock to be acquired. |
783 | * @task: The task which wants to acquire the lock | |
9f40a51a | 784 | * @waiter: The waiter that is queued to the lock's wait tree if the |
358c331f | 785 | * callsite called task_blocked_on_lock(), otherwise NULL |
23f78d4a | 786 | */ |
8161239a | 787 | static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, |
358c331f | 788 | struct rt_mutex_waiter *waiter) |
23f78d4a | 789 | { |
e0aad5b4 PZ |
790 | lockdep_assert_held(&lock->wait_lock); |
791 | ||
23f78d4a | 792 | /* |
358c331f TG |
793 | * Before testing whether we can acquire @lock, we set the |
794 | * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all | |
795 | * other tasks which try to modify @lock into the slow path | |
796 | * and they serialize on @lock->wait_lock. | |
23f78d4a | 797 | * |
358c331f TG |
798 | * The RT_MUTEX_HAS_WAITERS bit can have a transitional state |
799 | * as explained at the top of this file if and only if: | |
23f78d4a | 800 | * |
358c331f TG |
801 | * - There is a lock owner. The caller must fixup the |
802 | * transient state if it does a trylock or leaves the lock | |
803 | * function due to a signal or timeout. | |
804 | * | |
805 | * - @task acquires the lock and there are no other | |
806 | * waiters. This is undone in rt_mutex_set_owner(@task) at | |
807 | * the end of this function. | |
23f78d4a IM |
808 | */ |
809 | mark_rt_mutex_waiters(lock); | |
810 | ||
358c331f TG |
811 | /* |
812 | * If @lock has an owner, give up. | |
813 | */ | |
8161239a | 814 | if (rt_mutex_owner(lock)) |
23f78d4a IM |
815 | return 0; |
816 | ||
8161239a | 817 | /* |
358c331f | 818 | * If @waiter != NULL, @task has already enqueued the waiter |
9f40a51a | 819 | * into @lock waiter tree. If @waiter == NULL then this is a |
358c331f | 820 | * trylock attempt. |
8161239a | 821 | */ |
358c331f TG |
822 | if (waiter) { |
823 | /* | |
824 | * If waiter is not the highest priority waiter of | |
825 | * @lock, give up. | |
826 | */ | |
827 | if (waiter != rt_mutex_top_waiter(lock)) | |
828 | return 0; | |
8161239a | 829 | |
358c331f TG |
830 | /* |
831 | * We can acquire the lock. Remove the waiter from the | |
9f40a51a | 832 | * lock waiters tree. |
358c331f TG |
833 | */ |
834 | rt_mutex_dequeue(lock, waiter); | |
8161239a | 835 | |
358c331f | 836 | } else { |
8161239a | 837 | /* |
358c331f TG |
838 | * If the lock has waiters already we check whether @task is |
839 | * eligible to take over the lock. | |
840 | * | |
841 | * If there are no other waiters, @task can acquire | |
842 | * the lock. @task->pi_blocked_on is NULL, so it does | |
843 | * not need to be dequeued. | |
8161239a LJ |
844 | */ |
845 | if (rt_mutex_has_waiters(lock)) { | |
358c331f TG |
846 | /* |
847 | * If @task->prio is greater than or equal to | |
848 | * the top waiter priority (kernel view), | |
849 | * @task lost. | |
850 | */ | |
19830e55 PZ |
851 | if (!rt_mutex_waiter_less(task_to_waiter(task), |
852 | rt_mutex_top_waiter(lock))) | |
358c331f TG |
853 | return 0; |
854 | ||
855 | /* | |
856 | * The current top waiter stays enqueued. We | |
857 | * don't have to change anything in the lock | |
858 | * waiters order. | |
859 | */ | |
860 | } else { | |
861 | /* | |
862 | * No waiters. Take the lock without the | |
863 | * pi_lock dance.@task->pi_blocked_on is NULL | |
864 | * and we have no waiters to enqueue in @task | |
9f40a51a | 865 | * pi waiters tree. |
358c331f TG |
866 | */ |
867 | goto takeit; | |
8161239a | 868 | } |
8161239a LJ |
869 | } |
870 | ||
358c331f TG |
871 | /* |
872 | * Clear @task->pi_blocked_on. Requires protection by | |
873 | * @task->pi_lock. Redundant operation for the @waiter == NULL | |
874 | * case, but conditionals are more expensive than a redundant | |
875 | * store. | |
876 | */ | |
b4abf910 | 877 | raw_spin_lock(&task->pi_lock); |
358c331f TG |
878 | task->pi_blocked_on = NULL; |
879 | /* | |
880 | * Finish the lock acquisition. @task is the new owner. If | |
881 | * other waiters exist we have to insert the highest priority | |
9f40a51a | 882 | * waiter into @task->pi_waiters tree. |
358c331f TG |
883 | */ |
884 | if (rt_mutex_has_waiters(lock)) | |
885 | rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); | |
b4abf910 | 886 | raw_spin_unlock(&task->pi_lock); |
358c331f TG |
887 | |
888 | takeit: | |
23f78d4a | 889 | /* We got the lock. */ |
9a11b49a | 890 | debug_rt_mutex_lock(lock); |
23f78d4a | 891 | |
358c331f TG |
892 | /* |
893 | * This either preserves the RT_MUTEX_HAS_WAITERS bit if there | |
894 | * are still waiters or clears it. | |
895 | */ | |
8161239a | 896 | rt_mutex_set_owner(lock, task); |
23f78d4a | 897 | |
23f78d4a IM |
898 | return 1; |
899 | } | |
900 | ||
901 | /* | |
902 | * Task blocks on lock. | |
903 | * | |
904 | * Prepare waiter and propagate pi chain | |
905 | * | |
b4abf910 | 906 | * This must be called with lock->wait_lock held and interrupts disabled |
23f78d4a IM |
907 | */ |
908 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |
909 | struct rt_mutex_waiter *waiter, | |
8dac456a | 910 | struct task_struct *task, |
8930ed80 | 911 | enum rtmutex_chainwalk chwalk) |
23f78d4a | 912 | { |
36c8b586 | 913 | struct task_struct *owner = rt_mutex_owner(lock); |
23f78d4a | 914 | struct rt_mutex_waiter *top_waiter = waiter; |
82084984 | 915 | struct rt_mutex *next_lock; |
db630637 | 916 | int chain_walk = 0, res; |
23f78d4a | 917 | |
e0aad5b4 PZ |
918 | lockdep_assert_held(&lock->wait_lock); |
919 | ||
397335f0 TG |
920 | /* |
921 | * Early deadlock detection. We really don't want the task to | |
922 | * enqueue on itself just to untangle the mess later. It's not | |
923 | * only an optimization. We drop the locks, so another waiter | |
924 | * can come in before the chain walk detects the deadlock. So | |
925 | * the other will detect the deadlock and return -EDEADLOCK, | |
926 | * which is wrong, as the other waiter is not in a deadlock | |
927 | * situation. | |
928 | */ | |
3d5c9340 | 929 | if (owner == task) |
397335f0 TG |
930 | return -EDEADLK; |
931 | ||
b4abf910 | 932 | raw_spin_lock(&task->pi_lock); |
8dac456a | 933 | waiter->task = task; |
23f78d4a | 934 | waiter->lock = lock; |
2d3d891d | 935 | waiter->prio = task->prio; |
e0aad5b4 | 936 | waiter->deadline = task->dl.deadline; |
23f78d4a IM |
937 | |
938 | /* Get the top priority waiter on the lock */ | |
939 | if (rt_mutex_has_waiters(lock)) | |
940 | top_waiter = rt_mutex_top_waiter(lock); | |
fb00aca4 | 941 | rt_mutex_enqueue(lock, waiter); |
23f78d4a | 942 | |
8dac456a | 943 | task->pi_blocked_on = waiter; |
23f78d4a | 944 | |
b4abf910 | 945 | raw_spin_unlock(&task->pi_lock); |
23f78d4a | 946 | |
8161239a LJ |
947 | if (!owner) |
948 | return 0; | |
949 | ||
b4abf910 | 950 | raw_spin_lock(&owner->pi_lock); |
23f78d4a | 951 | if (waiter == rt_mutex_top_waiter(lock)) { |
fb00aca4 PZ |
952 | rt_mutex_dequeue_pi(owner, top_waiter); |
953 | rt_mutex_enqueue_pi(owner, waiter); | |
23f78d4a | 954 | |
acd58620 | 955 | rt_mutex_adjust_prio(owner); |
db630637 SR |
956 | if (owner->pi_blocked_on) |
957 | chain_walk = 1; | |
8930ed80 | 958 | } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { |
db630637 | 959 | chain_walk = 1; |
82084984 | 960 | } |
db630637 | 961 | |
82084984 TG |
962 | /* Store the lock on which owner is blocked or NULL */ |
963 | next_lock = task_blocked_on_lock(owner); | |
964 | ||
b4abf910 | 965 | raw_spin_unlock(&owner->pi_lock); |
82084984 TG |
966 | /* |
967 | * Even if full deadlock detection is on, if the owner is not | |
968 | * blocked itself, we can avoid finding this out in the chain | |
969 | * walk. | |
970 | */ | |
971 | if (!chain_walk || !next_lock) | |
23f78d4a IM |
972 | return 0; |
973 | ||
db630637 SR |
974 | /* |
975 | * The owner can't disappear while holding a lock, | |
976 | * so the owner struct is protected by wait_lock. | |
977 | * Gets dropped in rt_mutex_adjust_prio_chain()! | |
978 | */ | |
979 | get_task_struct(owner); | |
980 | ||
b4abf910 | 981 | raw_spin_unlock_irq(&lock->wait_lock); |
23f78d4a | 982 | |
8930ed80 | 983 | res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, |
82084984 | 984 | next_lock, waiter, task); |
23f78d4a | 985 | |
b4abf910 | 986 | raw_spin_lock_irq(&lock->wait_lock); |
23f78d4a IM |
987 | |
988 | return res; | |
989 | } | |
990 | ||
991 | /* | |
9f40a51a | 992 | * Remove the top waiter from the current tasks pi waiter tree and |
45ab4eff | 993 | * queue it up. |
23f78d4a | 994 | * |
b4abf910 | 995 | * Called with lock->wait_lock held and interrupts disabled. |
23f78d4a | 996 | */ |
45ab4eff DB |
997 | static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, |
998 | struct rt_mutex *lock) | |
23f78d4a IM |
999 | { |
1000 | struct rt_mutex_waiter *waiter; | |
23f78d4a | 1001 | |
b4abf910 | 1002 | raw_spin_lock(¤t->pi_lock); |
23f78d4a IM |
1003 | |
1004 | waiter = rt_mutex_top_waiter(lock); | |
23f78d4a IM |
1005 | |
1006 | /* | |
acd58620 PZ |
1007 | * Remove it from current->pi_waiters and deboost. |
1008 | * | |
1009 | * We must in fact deboost here in order to ensure we call | |
1010 | * rt_mutex_setprio() to update p->pi_top_task before the | |
1011 | * task unblocks. | |
23f78d4a | 1012 | */ |
fb00aca4 | 1013 | rt_mutex_dequeue_pi(current, waiter); |
acd58620 | 1014 | rt_mutex_adjust_prio(current); |
23f78d4a | 1015 | |
27e35715 TG |
1016 | /* |
1017 | * As we are waking up the top waiter, and the waiter stays | |
1018 | * queued on the lock until it gets the lock, this lock | |
1019 | * obviously has waiters. Just set the bit here and this has | |
1020 | * the added benefit of forcing all new tasks into the | |
1021 | * slow path making sure no task of lower priority than | |
1022 | * the top waiter can steal this lock. | |
1023 | */ | |
1024 | lock->owner = (void *) RT_MUTEX_HAS_WAITERS; | |
23f78d4a | 1025 | |
acd58620 PZ |
1026 | /* |
1027 | * We deboosted before waking the top waiter task such that we don't | |
1028 | * run two tasks with the 'same' priority (and ensure the | |
1029 | * p->pi_top_task pointer points to a blocked task). This however can | |
1030 | * lead to priority inversion if we would get preempted after the | |
1031 | * deboost but before waking our donor task, hence the preempt_disable() | |
1032 | * before unlock. | |
1033 | * | |
1034 | * Pairs with preempt_enable() in rt_mutex_postunlock(); | |
1035 | */ | |
1036 | preempt_disable(); | |
45ab4eff | 1037 | wake_q_add(wake_q, waiter->task); |
acd58620 | 1038 | raw_spin_unlock(¤t->pi_lock); |
23f78d4a IM |
1039 | } |
1040 | ||
1041 | /* | |
8161239a | 1042 | * Remove a waiter from a lock and give up |
23f78d4a | 1043 | * |
b4abf910 | 1044 | * Must be called with lock->wait_lock held and interrupts disabled. I must |
8161239a | 1045 | * have just failed to try_to_take_rt_mutex(). |
23f78d4a | 1046 | */ |
bd197234 TG |
1047 | static void remove_waiter(struct rt_mutex *lock, |
1048 | struct rt_mutex_waiter *waiter) | |
23f78d4a | 1049 | { |
1ca7b860 | 1050 | bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); |
36c8b586 | 1051 | struct task_struct *owner = rt_mutex_owner(lock); |
1ca7b860 | 1052 | struct rt_mutex *next_lock; |
23f78d4a | 1053 | |
e0aad5b4 PZ |
1054 | lockdep_assert_held(&lock->wait_lock); |
1055 | ||
b4abf910 | 1056 | raw_spin_lock(¤t->pi_lock); |
fb00aca4 | 1057 | rt_mutex_dequeue(lock, waiter); |
23f78d4a | 1058 | current->pi_blocked_on = NULL; |
b4abf910 | 1059 | raw_spin_unlock(¤t->pi_lock); |
23f78d4a | 1060 | |
1ca7b860 TG |
1061 | /* |
1062 | * Only update priority if the waiter was the highest priority | |
1063 | * waiter of the lock and there is an owner to update. | |
1064 | */ | |
1065 | if (!owner || !is_top_waiter) | |
8161239a LJ |
1066 | return; |
1067 | ||
b4abf910 | 1068 | raw_spin_lock(&owner->pi_lock); |
23f78d4a | 1069 | |
1ca7b860 | 1070 | rt_mutex_dequeue_pi(owner, waiter); |
23f78d4a | 1071 | |
1ca7b860 TG |
1072 | if (rt_mutex_has_waiters(lock)) |
1073 | rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); | |
23f78d4a | 1074 | |
acd58620 | 1075 | rt_mutex_adjust_prio(owner); |
23f78d4a | 1076 | |
1ca7b860 TG |
1077 | /* Store the lock on which owner is blocked or NULL */ |
1078 | next_lock = task_blocked_on_lock(owner); | |
db630637 | 1079 | |
b4abf910 | 1080 | raw_spin_unlock(&owner->pi_lock); |
23f78d4a | 1081 | |
1ca7b860 TG |
1082 | /* |
1083 | * Don't walk the chain, if the owner task is not blocked | |
1084 | * itself. | |
1085 | */ | |
82084984 | 1086 | if (!next_lock) |
23f78d4a IM |
1087 | return; |
1088 | ||
db630637 SR |
1089 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
1090 | get_task_struct(owner); | |
1091 | ||
b4abf910 | 1092 | raw_spin_unlock_irq(&lock->wait_lock); |
23f78d4a | 1093 | |
8930ed80 TG |
1094 | rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, |
1095 | next_lock, NULL, current); | |
23f78d4a | 1096 | |
b4abf910 | 1097 | raw_spin_lock_irq(&lock->wait_lock); |
23f78d4a IM |
1098 | } |
1099 | ||
95e02ca9 TG |
1100 | /* |
1101 | * Recheck the pi chain, in case we got a priority setting | |
1102 | * | |
1103 | * Called from sched_setscheduler | |
1104 | */ | |
1105 | void rt_mutex_adjust_pi(struct task_struct *task) | |
1106 | { | |
1107 | struct rt_mutex_waiter *waiter; | |
82084984 | 1108 | struct rt_mutex *next_lock; |
95e02ca9 TG |
1109 | unsigned long flags; |
1110 | ||
1d615482 | 1111 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
95e02ca9 TG |
1112 | |
1113 | waiter = task->pi_blocked_on; | |
19830e55 | 1114 | if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { |
1d615482 | 1115 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
95e02ca9 TG |
1116 | return; |
1117 | } | |
82084984 | 1118 | next_lock = waiter->lock; |
1d615482 | 1119 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
95e02ca9 | 1120 | |
db630637 SR |
1121 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
1122 | get_task_struct(task); | |
82084984 | 1123 | |
8930ed80 TG |
1124 | rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, |
1125 | next_lock, NULL, task); | |
95e02ca9 TG |
1126 | } |
1127 | ||
50809358 PZ |
1128 | void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) |
1129 | { | |
1130 | debug_rt_mutex_init_waiter(waiter); | |
1131 | RB_CLEAR_NODE(&waiter->pi_tree_entry); | |
1132 | RB_CLEAR_NODE(&waiter->tree_entry); | |
1133 | waiter->task = NULL; | |
1134 | } | |
1135 | ||
8dac456a DH |
1136 | /** |
1137 | * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop | |
1138 | * @lock: the rt_mutex to take | |
1139 | * @state: the state the task should block in (TASK_INTERRUPTIBLE | |
b4abf910 | 1140 | * or TASK_UNINTERRUPTIBLE) |
8dac456a DH |
1141 | * @timeout: the pre-initialized and started timer, or NULL for none |
1142 | * @waiter: the pre-initialized rt_mutex_waiter | |
8dac456a | 1143 | * |
b4abf910 | 1144 | * Must be called with lock->wait_lock held and interrupts disabled |
23f78d4a IM |
1145 | */ |
1146 | static int __sched | |
8dac456a DH |
1147 | __rt_mutex_slowlock(struct rt_mutex *lock, int state, |
1148 | struct hrtimer_sleeper *timeout, | |
8161239a | 1149 | struct rt_mutex_waiter *waiter) |
23f78d4a | 1150 | { |
23f78d4a IM |
1151 | int ret = 0; |
1152 | ||
23f78d4a IM |
1153 | for (;;) { |
1154 | /* Try to acquire the lock: */ | |
8161239a | 1155 | if (try_to_take_rt_mutex(lock, current, waiter)) |
23f78d4a IM |
1156 | break; |
1157 | ||
1158 | /* | |
1159 | * TASK_INTERRUPTIBLE checks for signals and | |
1160 | * timeout. Ignored otherwise. | |
1161 | */ | |
4009f4b3 | 1162 | if (likely(state == TASK_INTERRUPTIBLE)) { |
23f78d4a IM |
1163 | /* Signal pending? */ |
1164 | if (signal_pending(current)) | |
1165 | ret = -EINTR; | |
1166 | if (timeout && !timeout->task) | |
1167 | ret = -ETIMEDOUT; | |
1168 | if (ret) | |
1169 | break; | |
1170 | } | |
1171 | ||
b4abf910 | 1172 | raw_spin_unlock_irq(&lock->wait_lock); |
23f78d4a | 1173 | |
8dac456a | 1174 | debug_rt_mutex_print_deadlock(waiter); |
23f78d4a | 1175 | |
1b0b7c17 | 1176 | schedule(); |
23f78d4a | 1177 | |
b4abf910 | 1178 | raw_spin_lock_irq(&lock->wait_lock); |
23f78d4a IM |
1179 | set_current_state(state); |
1180 | } | |
1181 | ||
afffc6c1 | 1182 | __set_current_state(TASK_RUNNING); |
8dac456a DH |
1183 | return ret; |
1184 | } | |
1185 | ||
3d5c9340 TG |
1186 | static void rt_mutex_handle_deadlock(int res, int detect_deadlock, |
1187 | struct rt_mutex_waiter *w) | |
1188 | { | |
1189 | /* | |
1190 | * If the result is not -EDEADLOCK or the caller requested | |
1191 | * deadlock detection, nothing to do here. | |
1192 | */ | |
1193 | if (res != -EDEADLOCK || detect_deadlock) | |
1194 | return; | |
1195 | ||
1196 | /* | |
1197 | * Yell lowdly and stop the task right here. | |
1198 | */ | |
1199 | rt_mutex_print_deadlock(w); | |
1200 | while (1) { | |
1201 | set_current_state(TASK_INTERRUPTIBLE); | |
1202 | schedule(); | |
1203 | } | |
1204 | } | |
1205 | ||
8dac456a DH |
1206 | /* |
1207 | * Slow path lock function: | |
1208 | */ | |
1209 | static int __sched | |
1210 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | |
1211 | struct hrtimer_sleeper *timeout, | |
8930ed80 | 1212 | enum rtmutex_chainwalk chwalk) |
8dac456a DH |
1213 | { |
1214 | struct rt_mutex_waiter waiter; | |
b4abf910 | 1215 | unsigned long flags; |
8dac456a DH |
1216 | int ret = 0; |
1217 | ||
50809358 | 1218 | rt_mutex_init_waiter(&waiter); |
8dac456a | 1219 | |
b4abf910 TG |
1220 | /* |
1221 | * Technically we could use raw_spin_[un]lock_irq() here, but this can | |
1222 | * be called in early boot if the cmpxchg() fast path is disabled | |
1223 | * (debug, no architecture support). In this case we will acquire the | |
1224 | * rtmutex with lock->wait_lock held. But we cannot unconditionally | |
1225 | * enable interrupts in that early boot case. So we need to use the | |
1226 | * irqsave/restore variants. | |
1227 | */ | |
1228 | raw_spin_lock_irqsave(&lock->wait_lock, flags); | |
8dac456a DH |
1229 | |
1230 | /* Try to acquire the lock again: */ | |
8161239a | 1231 | if (try_to_take_rt_mutex(lock, current, NULL)) { |
b4abf910 | 1232 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
8dac456a DH |
1233 | return 0; |
1234 | } | |
1235 | ||
1236 | set_current_state(state); | |
1237 | ||
1238 | /* Setup the timer, when timeout != NULL */ | |
ccdd92c1 | 1239 | if (unlikely(timeout)) |
8dac456a | 1240 | hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); |
8dac456a | 1241 | |
8930ed80 | 1242 | ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); |
8161239a LJ |
1243 | |
1244 | if (likely(!ret)) | |
afffc6c1 | 1245 | /* sleep on the mutex */ |
8161239a | 1246 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); |
8dac456a | 1247 | |
3d5c9340 | 1248 | if (unlikely(ret)) { |
9d3e2d02 | 1249 | __set_current_state(TASK_RUNNING); |
c28d62cf | 1250 | remove_waiter(lock, &waiter); |
8930ed80 | 1251 | rt_mutex_handle_deadlock(ret, chwalk, &waiter); |
3d5c9340 | 1252 | } |
23f78d4a IM |
1253 | |
1254 | /* | |
1255 | * try_to_take_rt_mutex() sets the waiter bit | |
1256 | * unconditionally. We might have to fix that up. | |
1257 | */ | |
1258 | fixup_rt_mutex_waiters(lock); | |
1259 | ||
b4abf910 | 1260 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
23f78d4a IM |
1261 | |
1262 | /* Remove pending timer: */ | |
1263 | if (unlikely(timeout)) | |
1264 | hrtimer_cancel(&timeout->timer); | |
1265 | ||
23f78d4a IM |
1266 | debug_rt_mutex_free_waiter(&waiter); |
1267 | ||
1268 | return ret; | |
1269 | } | |
1270 | ||
c1e2f0ea PZ |
1271 | static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) |
1272 | { | |
1273 | int ret = try_to_take_rt_mutex(lock, current, NULL); | |
1274 | ||
1275 | /* | |
1276 | * try_to_take_rt_mutex() sets the lock waiters bit | |
1277 | * unconditionally. Clean this up. | |
1278 | */ | |
1279 | fixup_rt_mutex_waiters(lock); | |
1280 | ||
1281 | return ret; | |
1282 | } | |
1283 | ||
23f78d4a IM |
1284 | /* |
1285 | * Slow path try-lock function: | |
1286 | */ | |
88f2b4c1 | 1287 | static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) |
23f78d4a | 1288 | { |
b4abf910 | 1289 | unsigned long flags; |
88f2b4c1 TG |
1290 | int ret; |
1291 | ||
1292 | /* | |
1293 | * If the lock already has an owner we fail to get the lock. | |
1294 | * This can be done without taking the @lock->wait_lock as | |
1295 | * it is only being read, and this is a trylock anyway. | |
1296 | */ | |
1297 | if (rt_mutex_owner(lock)) | |
1298 | return 0; | |
23f78d4a | 1299 | |
88f2b4c1 | 1300 | /* |
b4abf910 TG |
1301 | * The mutex has currently no owner. Lock the wait lock and try to |
1302 | * acquire the lock. We use irqsave here to support early boot calls. | |
88f2b4c1 | 1303 | */ |
b4abf910 | 1304 | raw_spin_lock_irqsave(&lock->wait_lock, flags); |
23f78d4a | 1305 | |
c1e2f0ea | 1306 | ret = __rt_mutex_slowtrylock(lock); |
23f78d4a | 1307 | |
b4abf910 | 1308 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
23f78d4a IM |
1309 | |
1310 | return ret; | |
1311 | } | |
1312 | ||
1313 | /* | |
802ab58d | 1314 | * Slow path to release a rt-mutex. |
aa2bfe55 PZ |
1315 | * |
1316 | * Return whether the current task needs to call rt_mutex_postunlock(). | |
23f78d4a | 1317 | */ |
802ab58d SAS |
1318 | static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, |
1319 | struct wake_q_head *wake_q) | |
23f78d4a | 1320 | { |
b4abf910 TG |
1321 | unsigned long flags; |
1322 | ||
1323 | /* irqsave required to support early boot calls */ | |
1324 | raw_spin_lock_irqsave(&lock->wait_lock, flags); | |
23f78d4a IM |
1325 | |
1326 | debug_rt_mutex_unlock(lock); | |
1327 | ||
27e35715 TG |
1328 | /* |
1329 | * We must be careful here if the fast path is enabled. If we | |
1330 | * have no waiters queued we cannot set owner to NULL here | |
1331 | * because of: | |
1332 | * | |
1333 | * foo->lock->owner = NULL; | |
1334 | * rtmutex_lock(foo->lock); <- fast path | |
1335 | * free = atomic_dec_and_test(foo->refcnt); | |
1336 | * rtmutex_unlock(foo->lock); <- fast path | |
1337 | * if (free) | |
1338 | * kfree(foo); | |
1339 | * raw_spin_unlock(foo->lock->wait_lock); | |
1340 | * | |
1341 | * So for the fastpath enabled kernel: | |
1342 | * | |
1343 | * Nothing can set the waiters bit as long as we hold | |
1344 | * lock->wait_lock. So we do the following sequence: | |
1345 | * | |
1346 | * owner = rt_mutex_owner(lock); | |
1347 | * clear_rt_mutex_waiters(lock); | |
1348 | * raw_spin_unlock(&lock->wait_lock); | |
1349 | * if (cmpxchg(&lock->owner, owner, 0) == owner) | |
1350 | * return; | |
1351 | * goto retry; | |
1352 | * | |
1353 | * The fastpath disabled variant is simple as all access to | |
1354 | * lock->owner is serialized by lock->wait_lock: | |
1355 | * | |
1356 | * lock->owner = NULL; | |
1357 | * raw_spin_unlock(&lock->wait_lock); | |
1358 | */ | |
1359 | while (!rt_mutex_has_waiters(lock)) { | |
1360 | /* Drops lock->wait_lock ! */ | |
b4abf910 | 1361 | if (unlock_rt_mutex_safe(lock, flags) == true) |
802ab58d | 1362 | return false; |
27e35715 | 1363 | /* Relock the rtmutex and try again */ |
b4abf910 | 1364 | raw_spin_lock_irqsave(&lock->wait_lock, flags); |
23f78d4a IM |
1365 | } |
1366 | ||
27e35715 TG |
1367 | /* |
1368 | * The wakeup next waiter path does not suffer from the above | |
1369 | * race. See the comments there. | |
45ab4eff DB |
1370 | * |
1371 | * Queue the next waiter for wakeup once we release the wait_lock. | |
27e35715 | 1372 | */ |
802ab58d | 1373 | mark_wakeup_next_waiter(wake_q, lock); |
b4abf910 | 1374 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
23f78d4a | 1375 | |
aa2bfe55 | 1376 | return true; /* call rt_mutex_postunlock() */ |
23f78d4a IM |
1377 | } |
1378 | ||
1379 | /* | |
1380 | * debug aware fast / slowpath lock,trylock,unlock | |
1381 | * | |
1382 | * The atomic acquire/release ops are compiled away, when either the | |
1383 | * architecture does not support cmpxchg or when debugging is enabled. | |
1384 | */ | |
1385 | static inline int | |
1386 | rt_mutex_fastlock(struct rt_mutex *lock, int state, | |
23f78d4a IM |
1387 | int (*slowfn)(struct rt_mutex *lock, int state, |
1388 | struct hrtimer_sleeper *timeout, | |
8930ed80 | 1389 | enum rtmutex_chainwalk chwalk)) |
23f78d4a | 1390 | { |
fffa954f | 1391 | if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) |
23f78d4a | 1392 | return 0; |
fffa954f PZ |
1393 | |
1394 | return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); | |
23f78d4a IM |
1395 | } |
1396 | ||
1397 | static inline int | |
1398 | rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | |
8930ed80 TG |
1399 | struct hrtimer_sleeper *timeout, |
1400 | enum rtmutex_chainwalk chwalk, | |
23f78d4a IM |
1401 | int (*slowfn)(struct rt_mutex *lock, int state, |
1402 | struct hrtimer_sleeper *timeout, | |
8930ed80 | 1403 | enum rtmutex_chainwalk chwalk)) |
23f78d4a | 1404 | { |
8930ed80 | 1405 | if (chwalk == RT_MUTEX_MIN_CHAINWALK && |
fffa954f | 1406 | likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) |
23f78d4a | 1407 | return 0; |
fffa954f PZ |
1408 | |
1409 | return slowfn(lock, state, timeout, chwalk); | |
23f78d4a IM |
1410 | } |
1411 | ||
1412 | static inline int | |
1413 | rt_mutex_fasttrylock(struct rt_mutex *lock, | |
9a11b49a | 1414 | int (*slowfn)(struct rt_mutex *lock)) |
23f78d4a | 1415 | { |
fffa954f | 1416 | if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) |
23f78d4a | 1417 | return 1; |
fffa954f | 1418 | |
9a11b49a | 1419 | return slowfn(lock); |
23f78d4a IM |
1420 | } |
1421 | ||
2a1c6029 | 1422 | /* |
c034f48e | 1423 | * Performs the wakeup of the top-waiter and re-enables preemption. |
2a1c6029 | 1424 | */ |
aa2bfe55 | 1425 | void rt_mutex_postunlock(struct wake_q_head *wake_q) |
2a1c6029 XP |
1426 | { |
1427 | wake_up_q(wake_q); | |
1428 | ||
1429 | /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ | |
aa2bfe55 | 1430 | preempt_enable(); |
2a1c6029 XP |
1431 | } |
1432 | ||
23f78d4a IM |
1433 | static inline void |
1434 | rt_mutex_fastunlock(struct rt_mutex *lock, | |
802ab58d SAS |
1435 | bool (*slowfn)(struct rt_mutex *lock, |
1436 | struct wake_q_head *wqh)) | |
23f78d4a | 1437 | { |
194a6b5b | 1438 | DEFINE_WAKE_Q(wake_q); |
802ab58d | 1439 | |
fffa954f PZ |
1440 | if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) |
1441 | return; | |
802ab58d | 1442 | |
aa2bfe55 PZ |
1443 | if (slowfn(lock, &wake_q)) |
1444 | rt_mutex_postunlock(&wake_q); | |
23f78d4a IM |
1445 | } |
1446 | ||
62cedf3e PR |
1447 | static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) |
1448 | { | |
1449 | might_sleep(); | |
1450 | ||
1451 | mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | |
1452 | rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); | |
1453 | } | |
1454 | ||
1455 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
1456 | /** | |
1457 | * rt_mutex_lock_nested - lock a rt_mutex | |
1458 | * | |
1459 | * @lock: the rt_mutex to be locked | |
1460 | * @subclass: the lockdep subclass | |
1461 | */ | |
1462 | void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) | |
1463 | { | |
1464 | __rt_mutex_lock(lock, subclass); | |
1465 | } | |
1466 | EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); | |
62cedf3e | 1467 | |
84818af2 SRV |
1468 | #else /* !CONFIG_DEBUG_LOCK_ALLOC */ |
1469 | ||
23f78d4a IM |
1470 | /** |
1471 | * rt_mutex_lock - lock a rt_mutex | |
1472 | * | |
1473 | * @lock: the rt_mutex to be locked | |
1474 | */ | |
1475 | void __sched rt_mutex_lock(struct rt_mutex *lock) | |
1476 | { | |
62cedf3e | 1477 | __rt_mutex_lock(lock, 0); |
23f78d4a IM |
1478 | } |
1479 | EXPORT_SYMBOL_GPL(rt_mutex_lock); | |
62cedf3e | 1480 | #endif |
23f78d4a IM |
1481 | |
1482 | /** | |
1483 | * rt_mutex_lock_interruptible - lock a rt_mutex interruptible | |
1484 | * | |
c051b21f | 1485 | * @lock: the rt_mutex to be locked |
23f78d4a IM |
1486 | * |
1487 | * Returns: | |
c051b21f TG |
1488 | * 0 on success |
1489 | * -EINTR when interrupted by a signal | |
23f78d4a | 1490 | */ |
c051b21f | 1491 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) |
23f78d4a | 1492 | { |
f5694788 PZ |
1493 | int ret; |
1494 | ||
23f78d4a IM |
1495 | might_sleep(); |
1496 | ||
f5694788 PZ |
1497 | mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
1498 | ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); | |
1499 | if (ret) | |
5facae4f | 1500 | mutex_release(&lock->dep_map, _RET_IP_); |
f5694788 PZ |
1501 | |
1502 | return ret; | |
23f78d4a IM |
1503 | } |
1504 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | |
1505 | ||
5293c2ef PZ |
1506 | /* |
1507 | * Futex variant, must not use fastpath. | |
1508 | */ | |
1509 | int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) | |
1510 | { | |
1511 | return rt_mutex_slowtrylock(lock); | |
c051b21f TG |
1512 | } |
1513 | ||
c1e2f0ea PZ |
1514 | int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) |
1515 | { | |
1516 | return __rt_mutex_slowtrylock(lock); | |
1517 | } | |
1518 | ||
23f78d4a | 1519 | /** |
23b94b96 LH |
1520 | * rt_mutex_timed_lock - lock a rt_mutex interruptible |
1521 | * the timeout structure is provided | |
1522 | * by the caller | |
23f78d4a | 1523 | * |
c051b21f | 1524 | * @lock: the rt_mutex to be locked |
23f78d4a | 1525 | * @timeout: timeout structure or NULL (no timeout) |
23f78d4a IM |
1526 | * |
1527 | * Returns: | |
c051b21f TG |
1528 | * 0 on success |
1529 | * -EINTR when interrupted by a signal | |
3ac49a1c | 1530 | * -ETIMEDOUT when the timeout expired |
23f78d4a IM |
1531 | */ |
1532 | int | |
c051b21f | 1533 | rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) |
23f78d4a | 1534 | { |
f5694788 PZ |
1535 | int ret; |
1536 | ||
23f78d4a IM |
1537 | might_sleep(); |
1538 | ||
f5694788 PZ |
1539 | mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
1540 | ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | |
8930ed80 | 1541 | RT_MUTEX_MIN_CHAINWALK, |
c051b21f | 1542 | rt_mutex_slowlock); |
f5694788 | 1543 | if (ret) |
5facae4f | 1544 | mutex_release(&lock->dep_map, _RET_IP_); |
f5694788 PZ |
1545 | |
1546 | return ret; | |
23f78d4a IM |
1547 | } |
1548 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | |
1549 | ||
1550 | /** | |
1551 | * rt_mutex_trylock - try to lock a rt_mutex | |
1552 | * | |
1553 | * @lock: the rt_mutex to be locked | |
1554 | * | |
6ce47fd9 TG |
1555 | * This function can only be called in thread context. It's safe to |
1556 | * call it from atomic regions, but not from hard interrupt or soft | |
1557 | * interrupt context. | |
1558 | * | |
23f78d4a IM |
1559 | * Returns 1 on success and 0 on contention |
1560 | */ | |
1561 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | |
1562 | { | |
f5694788 PZ |
1563 | int ret; |
1564 | ||
a461d587 | 1565 | if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) |
6ce47fd9 TG |
1566 | return 0; |
1567 | ||
f5694788 PZ |
1568 | ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); |
1569 | if (ret) | |
1570 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | |
1571 | ||
1572 | return ret; | |
23f78d4a IM |
1573 | } |
1574 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | |
1575 | ||
1576 | /** | |
1577 | * rt_mutex_unlock - unlock a rt_mutex | |
1578 | * | |
1579 | * @lock: the rt_mutex to be unlocked | |
1580 | */ | |
1581 | void __sched rt_mutex_unlock(struct rt_mutex *lock) | |
1582 | { | |
5facae4f | 1583 | mutex_release(&lock->dep_map, _RET_IP_); |
23f78d4a IM |
1584 | rt_mutex_fastunlock(lock, rt_mutex_slowunlock); |
1585 | } | |
1586 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | |
1587 | ||
802ab58d | 1588 | /** |
bf594bf4 AS |
1589 | * __rt_mutex_futex_unlock - Futex variant, that since futex variants |
1590 | * do not use the fast-path, can be simple and will not need to retry. | |
1591 | * | |
1592 | * @lock: The rt_mutex to be unlocked | |
1593 | * @wake_q: The wake queue head from which to get the next lock waiter | |
802ab58d | 1594 | */ |
5293c2ef PZ |
1595 | bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, |
1596 | struct wake_q_head *wake_q) | |
802ab58d | 1597 | { |
5293c2ef PZ |
1598 | lockdep_assert_held(&lock->wait_lock); |
1599 | ||
1600 | debug_rt_mutex_unlock(lock); | |
1601 | ||
1602 | if (!rt_mutex_has_waiters(lock)) { | |
1603 | lock->owner = NULL; | |
1604 | return false; /* done */ | |
1605 | } | |
1606 | ||
2a1c6029 | 1607 | /* |
def34eaa MG |
1608 | * We've already deboosted, mark_wakeup_next_waiter() will |
1609 | * retain preempt_disabled when we drop the wait_lock, to | |
1610 | * avoid inversion prior to the wakeup. preempt_disable() | |
1611 | * therein pairs with rt_mutex_postunlock(). | |
2a1c6029 | 1612 | */ |
def34eaa | 1613 | mark_wakeup_next_waiter(wake_q, lock); |
2a1c6029 | 1614 | |
aa2bfe55 | 1615 | return true; /* call postunlock() */ |
5293c2ef | 1616 | } |
fffa954f | 1617 | |
5293c2ef PZ |
1618 | void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) |
1619 | { | |
1620 | DEFINE_WAKE_Q(wake_q); | |
6b0ef92f | 1621 | unsigned long flags; |
aa2bfe55 | 1622 | bool postunlock; |
5293c2ef | 1623 | |
6b0ef92f | 1624 | raw_spin_lock_irqsave(&lock->wait_lock, flags); |
aa2bfe55 | 1625 | postunlock = __rt_mutex_futex_unlock(lock, &wake_q); |
6b0ef92f | 1626 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
5293c2ef | 1627 | |
aa2bfe55 PZ |
1628 | if (postunlock) |
1629 | rt_mutex_postunlock(&wake_q); | |
802ab58d SAS |
1630 | } |
1631 | ||
23b94b96 | 1632 | /** |
23f78d4a IM |
1633 | * rt_mutex_destroy - mark a mutex unusable |
1634 | * @lock: the mutex to be destroyed | |
1635 | * | |
1636 | * This function marks the mutex uninitialized, and any subsequent | |
1637 | * use of the mutex is forbidden. The mutex must not be locked when | |
1638 | * this function is called. | |
1639 | */ | |
1640 | void rt_mutex_destroy(struct rt_mutex *lock) | |
1641 | { | |
1642 | WARN_ON(rt_mutex_is_locked(lock)); | |
1643 | #ifdef CONFIG_DEBUG_RT_MUTEXES | |
1644 | lock->magic = NULL; | |
1645 | #endif | |
1646 | } | |
23f78d4a IM |
1647 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); |
1648 | ||
1649 | /** | |
bf594bf4 | 1650 | * __rt_mutex_init - initialize the rt_mutex |
23f78d4a | 1651 | * |
bf594bf4 AS |
1652 | * @lock: The rt_mutex to be initialized |
1653 | * @name: The lock name used for debugging | |
1654 | * @key: The lock class key used for debugging | |
23f78d4a | 1655 | * |
bf594bf4 | 1656 | * Initialize the rt_mutex to unlocked state. |
23f78d4a | 1657 | * |
bf594bf4 | 1658 | * Initializing of a locked rt_mutex is not allowed |
23f78d4a | 1659 | */ |
f5694788 PZ |
1660 | void __rt_mutex_init(struct rt_mutex *lock, const char *name, |
1661 | struct lock_class_key *key) | |
23f78d4a IM |
1662 | { |
1663 | lock->owner = NULL; | |
d209d74d | 1664 | raw_spin_lock_init(&lock->wait_lock); |
a23ba907 | 1665 | lock->waiters = RB_ROOT_CACHED; |
23f78d4a | 1666 | |
cde50a67 LASL |
1667 | if (name && key) |
1668 | debug_rt_mutex_init(lock, name, key); | |
23f78d4a IM |
1669 | } |
1670 | EXPORT_SYMBOL_GPL(__rt_mutex_init); | |
0cdbee99 IM |
1671 | |
1672 | /** | |
1673 | * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a | |
1674 | * proxy owner | |
1675 | * | |
84d82ec5 | 1676 | * @lock: the rt_mutex to be locked |
0cdbee99 IM |
1677 | * @proxy_owner:the task to set as owner |
1678 | * | |
1679 | * No locking. Caller has to do serializing itself | |
84d82ec5 TG |
1680 | * |
1681 | * Special API call for PI-futex support. This initializes the rtmutex and | |
1682 | * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not | |
1683 | * possible at this point because the pi_state which contains the rtmutex | |
1684 | * is not yet visible to other tasks. | |
0cdbee99 IM |
1685 | */ |
1686 | void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | |
1687 | struct task_struct *proxy_owner) | |
1688 | { | |
f5694788 | 1689 | __rt_mutex_init(lock, NULL, NULL); |
9a11b49a | 1690 | debug_rt_mutex_proxy_lock(lock, proxy_owner); |
8161239a | 1691 | rt_mutex_set_owner(lock, proxy_owner); |
0cdbee99 IM |
1692 | } |
1693 | ||
1694 | /** | |
1695 | * rt_mutex_proxy_unlock - release a lock on behalf of owner | |
1696 | * | |
84d82ec5 | 1697 | * @lock: the rt_mutex to be locked |
0cdbee99 IM |
1698 | * |
1699 | * No locking. Caller has to do serializing itself | |
84d82ec5 TG |
1700 | * |
1701 | * Special API call for PI-futex support. This merrily cleans up the rtmutex | |
1702 | * (debugging) state. Concurrent operations on this rt_mutex are not | |
1703 | * possible because it belongs to the pi_state which is about to be freed | |
1704 | * and it is not longer visible to other tasks. | |
0cdbee99 | 1705 | */ |
2156ac19 | 1706 | void rt_mutex_proxy_unlock(struct rt_mutex *lock) |
0cdbee99 IM |
1707 | { |
1708 | debug_rt_mutex_proxy_unlock(lock); | |
8161239a | 1709 | rt_mutex_set_owner(lock, NULL); |
0cdbee99 IM |
1710 | } |
1711 | ||
1a1fb985 TG |
1712 | /** |
1713 | * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task | |
1714 | * @lock: the rt_mutex to take | |
1715 | * @waiter: the pre-initialized rt_mutex_waiter | |
1716 | * @task: the task to prepare | |
1717 | * | |
1718 | * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock | |
1719 | * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. | |
1720 | * | |
1721 | * NOTE: does _NOT_ remove the @waiter on failure; must either call | |
1722 | * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this. | |
1723 | * | |
1724 | * Returns: | |
1725 | * 0 - task blocked on lock | |
1726 | * 1 - acquired the lock for task, caller should wake it up | |
1727 | * <0 - error | |
1728 | * | |
1729 | * Special API call for PI-futex support. | |
1730 | */ | |
56222b21 | 1731 | int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, |
8dac456a | 1732 | struct rt_mutex_waiter *waiter, |
c051b21f | 1733 | struct task_struct *task) |
8dac456a DH |
1734 | { |
1735 | int ret; | |
1736 | ||
1a1fb985 TG |
1737 | lockdep_assert_held(&lock->wait_lock); |
1738 | ||
56222b21 | 1739 | if (try_to_take_rt_mutex(lock, task, NULL)) |
8dac456a | 1740 | return 1; |
8dac456a | 1741 | |
3d5c9340 | 1742 | /* We enforce deadlock detection for futexes */ |
8930ed80 TG |
1743 | ret = task_blocks_on_rt_mutex(lock, waiter, task, |
1744 | RT_MUTEX_FULL_CHAINWALK); | |
8dac456a | 1745 | |
8161239a | 1746 | if (ret && !rt_mutex_owner(lock)) { |
8dac456a DH |
1747 | /* |
1748 | * Reset the return value. We might have | |
1749 | * returned with -EDEADLK and the owner | |
1750 | * released the lock while we were walking the | |
1751 | * pi chain. Let the waiter sort it out. | |
1752 | */ | |
1753 | ret = 0; | |
1754 | } | |
8161239a | 1755 | |
8dac456a DH |
1756 | debug_rt_mutex_print_deadlock(waiter); |
1757 | ||
1758 | return ret; | |
1759 | } | |
1760 | ||
56222b21 PZ |
1761 | /** |
1762 | * rt_mutex_start_proxy_lock() - Start lock acquisition for another task | |
1763 | * @lock: the rt_mutex to take | |
1764 | * @waiter: the pre-initialized rt_mutex_waiter | |
1765 | * @task: the task to prepare | |
1766 | * | |
1a1fb985 TG |
1767 | * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock |
1768 | * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. | |
1769 | * | |
1770 | * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter | |
1771 | * on failure. | |
1772 | * | |
56222b21 PZ |
1773 | * Returns: |
1774 | * 0 - task blocked on lock | |
1775 | * 1 - acquired the lock for task, caller should wake it up | |
1776 | * <0 - error | |
1777 | * | |
1a1fb985 | 1778 | * Special API call for PI-futex support. |
56222b21 PZ |
1779 | */ |
1780 | int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |
1781 | struct rt_mutex_waiter *waiter, | |
1782 | struct task_struct *task) | |
1783 | { | |
1784 | int ret; | |
1785 | ||
1786 | raw_spin_lock_irq(&lock->wait_lock); | |
1787 | ret = __rt_mutex_start_proxy_lock(lock, waiter, task); | |
1a1fb985 TG |
1788 | if (unlikely(ret)) |
1789 | remove_waiter(lock, waiter); | |
56222b21 PZ |
1790 | raw_spin_unlock_irq(&lock->wait_lock); |
1791 | ||
1792 | return ret; | |
1793 | } | |
1794 | ||
0cdbee99 IM |
1795 | /** |
1796 | * rt_mutex_next_owner - return the next owner of the lock | |
1797 | * | |
1798 | * @lock: the rt lock query | |
1799 | * | |
1800 | * Returns the next owner of the lock or NULL | |
1801 | * | |
1802 | * Caller has to serialize against other accessors to the lock | |
1803 | * itself. | |
1804 | * | |
1805 | * Special API call for PI-futex support | |
1806 | */ | |
1807 | struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) | |
1808 | { | |
1809 | if (!rt_mutex_has_waiters(lock)) | |
1810 | return NULL; | |
1811 | ||
1812 | return rt_mutex_top_waiter(lock)->task; | |
1813 | } | |
8dac456a DH |
1814 | |
1815 | /** | |
38d589f2 | 1816 | * rt_mutex_wait_proxy_lock() - Wait for lock acquisition |
8dac456a DH |
1817 | * @lock: the rt_mutex we were woken on |
1818 | * @to: the timeout, null if none. hrtimer should already have | |
c051b21f | 1819 | * been started. |
8dac456a | 1820 | * @waiter: the pre-initialized rt_mutex_waiter |
8dac456a | 1821 | * |
c034f48e | 1822 | * Wait for the lock acquisition started on our behalf by |
38d589f2 PZ |
1823 | * rt_mutex_start_proxy_lock(). Upon failure, the caller must call |
1824 | * rt_mutex_cleanup_proxy_lock(). | |
8dac456a DH |
1825 | * |
1826 | * Returns: | |
1827 | * 0 - success | |
c051b21f | 1828 | * <0 - error, one of -EINTR, -ETIMEDOUT |
8dac456a | 1829 | * |
38d589f2 | 1830 | * Special API call for PI-futex support |
8dac456a | 1831 | */ |
38d589f2 | 1832 | int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, |
8dac456a | 1833 | struct hrtimer_sleeper *to, |
c051b21f | 1834 | struct rt_mutex_waiter *waiter) |
8dac456a DH |
1835 | { |
1836 | int ret; | |
1837 | ||
b4abf910 | 1838 | raw_spin_lock_irq(&lock->wait_lock); |
afffc6c1 | 1839 | /* sleep on the mutex */ |
04dc1b2f | 1840 | set_current_state(TASK_INTERRUPTIBLE); |
8161239a | 1841 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); |
04dc1b2f PZ |
1842 | /* |
1843 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | |
1844 | * have to fix that up. | |
1845 | */ | |
1846 | fixup_rt_mutex_waiters(lock); | |
b4abf910 | 1847 | raw_spin_unlock_irq(&lock->wait_lock); |
8dac456a | 1848 | |
8dac456a DH |
1849 | return ret; |
1850 | } | |
38d589f2 PZ |
1851 | |
1852 | /** | |
1853 | * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition | |
1854 | * @lock: the rt_mutex we were woken on | |
1855 | * @waiter: the pre-initialized rt_mutex_waiter | |
1856 | * | |
1a1fb985 TG |
1857 | * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or |
1858 | * rt_mutex_wait_proxy_lock(). | |
38d589f2 PZ |
1859 | * |
1860 | * Unless we acquired the lock; we're still enqueued on the wait-list and can | |
1861 | * in fact still be granted ownership until we're removed. Therefore we can | |
1862 | * find we are in fact the owner and must disregard the | |
1863 | * rt_mutex_wait_proxy_lock() failure. | |
1864 | * | |
1865 | * Returns: | |
1866 | * true - did the cleanup, we done. | |
1867 | * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned, | |
1868 | * caller should disregards its return value. | |
1869 | * | |
1870 | * Special API call for PI-futex support | |
1871 | */ | |
1872 | bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, | |
1873 | struct rt_mutex_waiter *waiter) | |
1874 | { | |
1875 | bool cleanup = false; | |
1876 | ||
1877 | raw_spin_lock_irq(&lock->wait_lock); | |
04dc1b2f PZ |
1878 | /* |
1879 | * Do an unconditional try-lock, this deals with the lock stealing | |
1880 | * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter() | |
1881 | * sets a NULL owner. | |
1882 | * | |
1883 | * We're not interested in the return value, because the subsequent | |
1884 | * test on rt_mutex_owner() will infer that. If the trylock succeeded, | |
1885 | * we will own the lock and it will have removed the waiter. If we | |
1886 | * failed the trylock, we're still not owner and we need to remove | |
1887 | * ourselves. | |
1888 | */ | |
1889 | try_to_take_rt_mutex(lock, current, waiter); | |
38d589f2 PZ |
1890 | /* |
1891 | * Unless we're the owner; we're still enqueued on the wait_list. | |
1892 | * So check if we became owner, if not, take us off the wait_list. | |
1893 | */ | |
1894 | if (rt_mutex_owner(lock) != current) { | |
1895 | remove_waiter(lock, waiter); | |
38d589f2 PZ |
1896 | cleanup = true; |
1897 | } | |
cfafcd11 PZ |
1898 | /* |
1899 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | |
1900 | * have to fix that up. | |
1901 | */ | |
1902 | fixup_rt_mutex_waiters(lock); | |
1903 | ||
38d589f2 PZ |
1904 | raw_spin_unlock_irq(&lock->wait_lock); |
1905 | ||
1906 | return cleanup; | |
1907 | } |