]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - include/linux/lockdep.h
Input: wm97xx: add new AC97 bus support
[mirror_ubuntu-focal-kernel.git] / include / linux / lockdep.h
1 /*
2 * Runtime locking correctness validator
3 *
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
6 *
7 * see Documentation/locking/lockdep-design.txt for more details.
8 */
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
11
12 struct task_struct;
13 struct lockdep_map;
14
15 /* for sysctl */
16 extern int prove_locking;
17 extern int lock_stat;
18
19 #define MAX_LOCKDEP_SUBCLASSES 8UL
20
21 #include <linux/types.h>
22
23 #ifdef CONFIG_LOCKDEP
24
25 #include <linux/linkage.h>
26 #include <linux/list.h>
27 #include <linux/debug_locks.h>
28 #include <linux/stacktrace.h>
29
30 /*
31 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
32 * the total number of states... :-(
33 */
34 #define XXX_LOCK_USAGE_STATES (1+2*4)
35
36 /*
37 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
38 * cached in the instance of lockdep_map
39 *
40 * Currently main class (subclass == 0) and signle depth subclass
41 * are cached in lockdep_map. This optimization is mainly targeting
42 * on rq->lock. double_rq_lock() acquires this highly competitive with
43 * single depth.
44 */
45 #define NR_LOCKDEP_CACHING_CLASSES 2
46
47 /*
48 * Lock-classes are keyed via unique addresses, by embedding the
49 * lockclass-key into the kernel (or module) .data section. (For
50 * static locks we use the lock address itself as the key.)
51 */
52 struct lockdep_subclass_key {
53 char __one_byte;
54 } __attribute__ ((__packed__));
55
56 struct lock_class_key {
57 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
58 };
59
60 extern struct lock_class_key __lockdep_no_validate__;
61
62 #define LOCKSTAT_POINTS 4
63
64 /*
65 * The lock-class itself:
66 */
67 struct lock_class {
68 /*
69 * class-hash:
70 */
71 struct hlist_node hash_entry;
72
73 /*
74 * global list of all lock-classes:
75 */
76 struct list_head lock_entry;
77
78 struct lockdep_subclass_key *key;
79 unsigned int subclass;
80 unsigned int dep_gen_id;
81
82 /*
83 * IRQ/softirq usage tracking bits:
84 */
85 unsigned long usage_mask;
86 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
87
88 /*
89 * These fields represent a directed graph of lock dependencies,
90 * to every node we attach a list of "forward" and a list of
91 * "backward" graph nodes.
92 */
93 struct list_head locks_after, locks_before;
94
95 /*
96 * Generation counter, when doing certain classes of graph walking,
97 * to ensure that we check one node only once:
98 */
99 unsigned int version;
100
101 /*
102 * Statistics counter:
103 */
104 unsigned long ops;
105
106 const char *name;
107 int name_version;
108
109 #ifdef CONFIG_LOCK_STAT
110 unsigned long contention_point[LOCKSTAT_POINTS];
111 unsigned long contending_point[LOCKSTAT_POINTS];
112 #endif
113 };
114
115 #ifdef CONFIG_LOCK_STAT
116 struct lock_time {
117 s64 min;
118 s64 max;
119 s64 total;
120 unsigned long nr;
121 };
122
123 enum bounce_type {
124 bounce_acquired_write,
125 bounce_acquired_read,
126 bounce_contended_write,
127 bounce_contended_read,
128 nr_bounce_types,
129
130 bounce_acquired = bounce_acquired_write,
131 bounce_contended = bounce_contended_write,
132 };
133
134 struct lock_class_stats {
135 unsigned long contention_point[LOCKSTAT_POINTS];
136 unsigned long contending_point[LOCKSTAT_POINTS];
137 struct lock_time read_waittime;
138 struct lock_time write_waittime;
139 struct lock_time read_holdtime;
140 struct lock_time write_holdtime;
141 unsigned long bounces[nr_bounce_types];
142 };
143
144 struct lock_class_stats lock_stats(struct lock_class *class);
145 void clear_lock_stats(struct lock_class *class);
146 #endif
147
148 /*
149 * Map the lock object (the lock instance) to the lock-class object.
150 * This is embedded into specific lock instances:
151 */
152 struct lockdep_map {
153 struct lock_class_key *key;
154 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
155 const char *name;
156 #ifdef CONFIG_LOCK_STAT
157 int cpu;
158 unsigned long ip;
159 #endif
160 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
161 /*
162 * Whether it's a crosslock.
163 */
164 int cross;
165 #endif
166 };
167
168 static inline void lockdep_copy_map(struct lockdep_map *to,
169 struct lockdep_map *from)
170 {
171 int i;
172
173 *to = *from;
174 /*
175 * Since the class cache can be modified concurrently we could observe
176 * half pointers (64bit arch using 32bit copy insns). Therefore clear
177 * the caches and take the performance hit.
178 *
179 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
180 * that relies on cache abuse.
181 */
182 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
183 to->class_cache[i] = NULL;
184 }
185
186 /*
187 * Every lock has a list of other locks that were taken after it.
188 * We only grow the list, never remove from it:
189 */
190 struct lock_list {
191 struct list_head entry;
192 struct lock_class *class;
193 struct stack_trace trace;
194 int distance;
195
196 /*
197 * The parent field is used to implement breadth-first search, and the
198 * bit 0 is reused to indicate if the lock has been accessed in BFS.
199 */
200 struct lock_list *parent;
201 };
202
203 /*
204 * We record lock dependency chains, so that we can cache them:
205 */
206 struct lock_chain {
207 /* see BUILD_BUG_ON()s in lookup_chain_cache() */
208 unsigned int irq_context : 2,
209 depth : 6,
210 base : 24;
211 /* 4 byte hole */
212 struct hlist_node entry;
213 u64 chain_key;
214 };
215
216 #define MAX_LOCKDEP_KEYS_BITS 13
217 /*
218 * Subtract one because we offset hlock->class_idx by 1 in order
219 * to make 0 mean no class. This avoids overflowing the class_idx
220 * bitfield and hitting the BUG in hlock_class().
221 */
222 #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
223
224 struct held_lock {
225 /*
226 * One-way hash of the dependency chain up to this point. We
227 * hash the hashes step by step as the dependency chain grows.
228 *
229 * We use it for dependency-caching and we skip detection
230 * passes and dependency-updates if there is a cache-hit, so
231 * it is absolutely critical for 100% coverage of the validator
232 * to have a unique key value for every unique dependency path
233 * that can occur in the system, to make a unique hash value
234 * as likely as possible - hence the 64-bit width.
235 *
236 * The task struct holds the current hash value (initialized
237 * with zero), here we store the previous hash value:
238 */
239 u64 prev_chain_key;
240 unsigned long acquire_ip;
241 struct lockdep_map *instance;
242 struct lockdep_map *nest_lock;
243 #ifdef CONFIG_LOCK_STAT
244 u64 waittime_stamp;
245 u64 holdtime_stamp;
246 #endif
247 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
248 /*
249 * The lock-stack is unified in that the lock chains of interrupt
250 * contexts nest ontop of process context chains, but we 'separate'
251 * the hashes by starting with 0 if we cross into an interrupt
252 * context, and we also keep do not add cross-context lock
253 * dependencies - the lock usage graph walking covers that area
254 * anyway, and we'd just unnecessarily increase the number of
255 * dependencies otherwise. [Note: hardirq and softirq contexts
256 * are separated from each other too.]
257 *
258 * The following field is used to detect when we cross into an
259 * interrupt context:
260 */
261 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
262 unsigned int trylock:1; /* 16 bits */
263
264 unsigned int read:2; /* see lock_acquire() comment */
265 unsigned int check:1; /* see lock_acquire() comment */
266 unsigned int hardirqs_off:1;
267 unsigned int references:12; /* 32 bits */
268 unsigned int pin_count;
269 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
270 /*
271 * Generation id.
272 *
273 * A value of cross_gen_id will be stored when holding this,
274 * which is globally increased whenever each crosslock is held.
275 */
276 unsigned int gen_id;
277 #endif
278 };
279
280 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
281 #define MAX_XHLOCK_TRACE_ENTRIES 5
282
283 /*
284 * This is for keeping locks waiting for commit so that true dependencies
285 * can be added at commit step.
286 */
287 struct hist_lock {
288 /*
289 * Id for each entry in the ring buffer. This is used to
290 * decide whether the ring buffer was overwritten or not.
291 *
292 * For example,
293 *
294 * |<----------- hist_lock ring buffer size ------->|
295 * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
296 * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
297 *
298 * where 'p' represents an acquisition in process
299 * context, 'i' represents an acquisition in irq
300 * context.
301 *
302 * In this example, the ring buffer was overwritten by
303 * acquisitions in irq context, that should be detected on
304 * rollback or commit.
305 */
306 unsigned int hist_id;
307
308 /*
309 * Seperate stack_trace data. This will be used at commit step.
310 */
311 struct stack_trace trace;
312 unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
313
314 /*
315 * Seperate hlock instance. This will be used at commit step.
316 *
317 * TODO: Use a smaller data structure containing only necessary
318 * data. However, we should make lockdep code able to handle the
319 * smaller one first.
320 */
321 struct held_lock hlock;
322 };
323
324 /*
325 * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
326 * be called instead of lockdep_init_map().
327 */
328 struct cross_lock {
329 /*
330 * When more than one acquisition of crosslocks are overlapped,
331 * we have to perform commit for them based on cross_gen_id of
332 * the first acquisition, which allows us to add more true
333 * dependencies.
334 *
335 * Moreover, when no acquisition of a crosslock is in progress,
336 * we should not perform commit because the lock might not exist
337 * any more, which might cause incorrect memory access. So we
338 * have to track the number of acquisitions of a crosslock.
339 */
340 int nr_acquire;
341
342 /*
343 * Seperate hlock instance. This will be used at commit step.
344 *
345 * TODO: Use a smaller data structure containing only necessary
346 * data. However, we should make lockdep code able to handle the
347 * smaller one first.
348 */
349 struct held_lock hlock;
350 };
351
352 struct lockdep_map_cross {
353 struct lockdep_map map;
354 struct cross_lock xlock;
355 };
356 #endif
357
358 /*
359 * Initialization, self-test and debugging-output methods:
360 */
361 extern void lockdep_info(void);
362 extern void lockdep_reset(void);
363 extern void lockdep_reset_lock(struct lockdep_map *lock);
364 extern void lockdep_free_key_range(void *start, unsigned long size);
365 extern asmlinkage void lockdep_sys_exit(void);
366
367 extern void lockdep_off(void);
368 extern void lockdep_on(void);
369
370 /*
371 * These methods are used by specific locking variants (spinlocks,
372 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
373 * to lockdep:
374 */
375
376 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
377 struct lock_class_key *key, int subclass);
378
379 /*
380 * Reinitialize a lock key - for cases where there is special locking or
381 * special initialization of locks so that the validator gets the scope
382 * of dependencies wrong: they are either too broad (they need a class-split)
383 * or they are too narrow (they suffer from a false class-split):
384 */
385 #define lockdep_set_class(lock, key) \
386 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
387 #define lockdep_set_class_and_name(lock, key, name) \
388 lockdep_init_map(&(lock)->dep_map, name, key, 0)
389 #define lockdep_set_class_and_subclass(lock, key, sub) \
390 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
391 #define lockdep_set_subclass(lock, sub) \
392 lockdep_init_map(&(lock)->dep_map, #lock, \
393 (lock)->dep_map.key, sub)
394
395 #define lockdep_set_novalidate_class(lock) \
396 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
397 /*
398 * Compare locking classes
399 */
400 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
401
402 static inline int lockdep_match_key(struct lockdep_map *lock,
403 struct lock_class_key *key)
404 {
405 return lock->key == key;
406 }
407
408 /*
409 * Acquire a lock.
410 *
411 * Values for "read":
412 *
413 * 0: exclusive (write) acquire
414 * 1: read-acquire (no recursion allowed)
415 * 2: read-acquire with same-instance recursion allowed
416 *
417 * Values for check:
418 *
419 * 0: simple checks (freeing, held-at-exit-time, etc.)
420 * 1: full validation
421 */
422 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
423 int trylock, int read, int check,
424 struct lockdep_map *nest_lock, unsigned long ip);
425
426 extern void lock_release(struct lockdep_map *lock, int nested,
427 unsigned long ip);
428
429 /*
430 * Same "read" as for lock_acquire(), except -1 means any.
431 */
432 extern int lock_is_held_type(struct lockdep_map *lock, int read);
433
434 static inline int lock_is_held(struct lockdep_map *lock)
435 {
436 return lock_is_held_type(lock, -1);
437 }
438
439 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
440 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
441
442 extern void lock_set_class(struct lockdep_map *lock, const char *name,
443 struct lock_class_key *key, unsigned int subclass,
444 unsigned long ip);
445
446 static inline void lock_set_subclass(struct lockdep_map *lock,
447 unsigned int subclass, unsigned long ip)
448 {
449 lock_set_class(lock, lock->name, lock->key, subclass, ip);
450 }
451
452 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
453
454 struct pin_cookie { unsigned int val; };
455
456 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
457
458 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
459 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
460 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
461
462 # define INIT_LOCKDEP .lockdep_recursion = 0,
463
464 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
465
466 #define lockdep_assert_held(l) do { \
467 WARN_ON(debug_locks && !lockdep_is_held(l)); \
468 } while (0)
469
470 #define lockdep_assert_held_exclusive(l) do { \
471 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
472 } while (0)
473
474 #define lockdep_assert_held_read(l) do { \
475 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
476 } while (0)
477
478 #define lockdep_assert_held_once(l) do { \
479 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
480 } while (0)
481
482 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
483
484 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
485 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
486 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
487
488 #else /* !CONFIG_LOCKDEP */
489
490 static inline void lockdep_off(void)
491 {
492 }
493
494 static inline void lockdep_on(void)
495 {
496 }
497
498 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
499 # define lock_release(l, n, i) do { } while (0)
500 # define lock_downgrade(l, i) do { } while (0)
501 # define lock_set_class(l, n, k, s, i) do { } while (0)
502 # define lock_set_subclass(l, s, i) do { } while (0)
503 # define lockdep_info() do { } while (0)
504 # define lockdep_init_map(lock, name, key, sub) \
505 do { (void)(name); (void)(key); } while (0)
506 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
507 # define lockdep_set_class_and_name(lock, key, name) \
508 do { (void)(key); (void)(name); } while (0)
509 #define lockdep_set_class_and_subclass(lock, key, sub) \
510 do { (void)(key); } while (0)
511 #define lockdep_set_subclass(lock, sub) do { } while (0)
512
513 #define lockdep_set_novalidate_class(lock) do { } while (0)
514
515 /*
516 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
517 * case since the result is not well defined and the caller should rather
518 * #ifdef the call himself.
519 */
520
521 # define INIT_LOCKDEP
522 # define lockdep_reset() do { debug_locks = 1; } while (0)
523 # define lockdep_free_key_range(start, size) do { } while (0)
524 # define lockdep_sys_exit() do { } while (0)
525 /*
526 * The class key takes no space if lockdep is disabled:
527 */
528 struct lock_class_key { };
529
530 #define lockdep_depth(tsk) (0)
531
532 #define lockdep_is_held_type(l, r) (1)
533
534 #define lockdep_assert_held(l) do { (void)(l); } while (0)
535 #define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0)
536 #define lockdep_assert_held_read(l) do { (void)(l); } while (0)
537 #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
538
539 #define lockdep_recursing(tsk) (0)
540
541 struct pin_cookie { };
542
543 #define NIL_COOKIE (struct pin_cookie){ }
544
545 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
546 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
547 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
548
549 #endif /* !LOCKDEP */
550
551 enum xhlock_context_t {
552 XHLOCK_HARD,
553 XHLOCK_SOFT,
554 XHLOCK_CTX_NR,
555 };
556
557 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
558 extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
559 const char *name,
560 struct lock_class_key *key,
561 int subclass);
562 extern void lock_commit_crosslock(struct lockdep_map *lock);
563
564 /*
565 * What we essencially have to initialize is 'nr_acquire'. Other members
566 * will be initialized in add_xlock().
567 */
568 #define STATIC_CROSS_LOCK_INIT() \
569 { .nr_acquire = 0,}
570
571 #define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
572 { .map.name = (_name), .map.key = (void *)(_key), \
573 .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
574
575 /*
576 * To initialize a lockdep_map statically use this macro.
577 * Note that _name must not be NULL.
578 */
579 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
580 { .name = (_name), .key = (void *)(_key), .cross = 0, }
581
582 extern void crossrelease_hist_start(enum xhlock_context_t c);
583 extern void crossrelease_hist_end(enum xhlock_context_t c);
584 extern void lockdep_invariant_state(bool force);
585 extern void lockdep_init_task(struct task_struct *task);
586 extern void lockdep_free_task(struct task_struct *task);
587 #else /* !CROSSRELEASE */
588 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
589 /*
590 * To initialize a lockdep_map statically use this macro.
591 * Note that _name must not be NULL.
592 */
593 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
594 { .name = (_name), .key = (void *)(_key), }
595
596 static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
597 static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
598 static inline void lockdep_invariant_state(bool force) {}
599 static inline void lockdep_init_task(struct task_struct *task) {}
600 static inline void lockdep_free_task(struct task_struct *task) {}
601 #endif /* CROSSRELEASE */
602
603 #ifdef CONFIG_LOCK_STAT
604
605 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
606 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
607
608 #define LOCK_CONTENDED(_lock, try, lock) \
609 do { \
610 if (!try(_lock)) { \
611 lock_contended(&(_lock)->dep_map, _RET_IP_); \
612 lock(_lock); \
613 } \
614 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
615 } while (0)
616
617 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
618 ({ \
619 int ____err = 0; \
620 if (!try(_lock)) { \
621 lock_contended(&(_lock)->dep_map, _RET_IP_); \
622 ____err = lock(_lock); \
623 } \
624 if (!____err) \
625 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
626 ____err; \
627 })
628
629 #else /* CONFIG_LOCK_STAT */
630
631 #define lock_contended(lockdep_map, ip) do {} while (0)
632 #define lock_acquired(lockdep_map, ip) do {} while (0)
633
634 #define LOCK_CONTENDED(_lock, try, lock) \
635 lock(_lock)
636
637 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
638 lock(_lock)
639
640 #endif /* CONFIG_LOCK_STAT */
641
642 #ifdef CONFIG_LOCKDEP
643
644 /*
645 * On lockdep we dont want the hand-coded irq-enable of
646 * _raw_*_lock_flags() code, because lockdep assumes
647 * that interrupts are not re-enabled during lock-acquire:
648 */
649 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
650 LOCK_CONTENDED((_lock), (try), (lock))
651
652 #else /* CONFIG_LOCKDEP */
653
654 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
655 lockfl((_lock), (flags))
656
657 #endif /* CONFIG_LOCKDEP */
658
659 #ifdef CONFIG_TRACE_IRQFLAGS
660 extern void print_irqtrace_events(struct task_struct *curr);
661 #else
662 static inline void print_irqtrace_events(struct task_struct *curr)
663 {
664 }
665 #endif
666
667 /*
668 * For trivial one-depth nesting of a lock-class, the following
669 * global define can be used. (Subsystems with multiple levels
670 * of nesting should define their own lock-nesting subclasses.)
671 */
672 #define SINGLE_DEPTH_NESTING 1
673
674 /*
675 * Map the dependency ops to NOP or to real lockdep ops, depending
676 * on the per lock-class debug mode:
677 */
678
679 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
680 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
681 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
682
683 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
684 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
685 #define spin_release(l, n, i) lock_release(l, n, i)
686
687 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
688 #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
689 #define rwlock_release(l, n, i) lock_release(l, n, i)
690
691 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
692 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
693 #define seqcount_release(l, n, i) lock_release(l, n, i)
694
695 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
696 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
697 #define mutex_release(l, n, i) lock_release(l, n, i)
698
699 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
700 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
701 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
702 #define rwsem_release(l, n, i) lock_release(l, n, i)
703
704 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
705 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
706 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
707 #define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
708
709 #ifdef CONFIG_PROVE_LOCKING
710 # define might_lock(lock) \
711 do { \
712 typecheck(struct lockdep_map *, &(lock)->dep_map); \
713 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
714 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
715 } while (0)
716 # define might_lock_read(lock) \
717 do { \
718 typecheck(struct lockdep_map *, &(lock)->dep_map); \
719 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
720 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
721 } while (0)
722 #else
723 # define might_lock(lock) do { } while (0)
724 # define might_lock_read(lock) do { } while (0)
725 #endif
726
727 #ifdef CONFIG_LOCKDEP
728 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
729 #else
730 static inline void
731 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
732 {
733 }
734 #endif
735
736 #endif /* __LINUX_LOCKDEP_H */