]>
Commit | Line | Data |
---|---|---|
1 | #ifndef __LINUX_SPINLOCK_UP_H | |
2 | #define __LINUX_SPINLOCK_UP_H | |
3 | ||
4 | #ifndef __LINUX_SPINLOCK_H | |
5 | # error "please don't include this file directly" | |
6 | #endif | |
7 | ||
8 | #include <asm/processor.h> /* for cpu_relax() */ | |
9 | ||
10 | /* | |
11 | * include/linux/spinlock_up.h - UP-debug version of spinlocks. | |
12 | * | |
13 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | |
14 | * Released under the General Public License (GPL). | |
15 | * | |
16 | * In the debug case, 1 means unlocked, 0 means locked. (the values | |
17 | * are inverted, to catch initialization bugs) | |
18 | * | |
19 | * No atomicity anywhere, we are on UP. However, we still need | |
20 | * the compiler barriers, because we do not want the compiler to | |
21 | * move potentially faulting instructions (notably user accesses) | |
22 | * into the locked sequence, resulting in non-atomic execution. | |
23 | */ | |
24 | ||
25 | #ifdef CONFIG_DEBUG_SPINLOCK | |
26 | #define arch_spin_is_locked(x) ((x)->slock == 0) | |
27 | ||
28 | static inline void arch_spin_lock(arch_spinlock_t *lock) | |
29 | { | |
30 | lock->slock = 0; | |
31 | barrier(); | |
32 | } | |
33 | ||
34 | static inline void | |
35 | arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | |
36 | { | |
37 | local_irq_save(flags); | |
38 | lock->slock = 0; | |
39 | barrier(); | |
40 | } | |
41 | ||
42 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | |
43 | { | |
44 | char oldval = lock->slock; | |
45 | ||
46 | lock->slock = 0; | |
47 | barrier(); | |
48 | ||
49 | return oldval > 0; | |
50 | } | |
51 | ||
52 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | |
53 | { | |
54 | barrier(); | |
55 | lock->slock = 1; | |
56 | } | |
57 | ||
58 | /* | |
59 | * Read-write spinlocks. No debug version. | |
60 | */ | |
61 | #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) | |
62 | #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) | |
63 | #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) | |
64 | #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) | |
65 | #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) | |
66 | #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) | |
67 | ||
68 | #else /* DEBUG_SPINLOCK */ | |
69 | #define arch_spin_is_locked(lock) ((void)(lock), 0) | |
70 | /* for sched/core.c and kernel_lock.c: */ | |
71 | # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) | |
72 | # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) | |
73 | # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) | |
74 | # define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) | |
75 | #endif /* DEBUG_SPINLOCK */ | |
76 | ||
77 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) | |
78 | ||
79 | #define arch_read_can_lock(lock) (((void)(lock), 1)) | |
80 | #define arch_write_can_lock(lock) (((void)(lock), 1)) | |
81 | ||
82 | #define arch_spin_unlock_wait(lock) \ | |
83 | do { cpu_relax(); } while (arch_spin_is_locked(lock)) | |
84 | ||
85 | #endif /* __LINUX_SPINLOCK_UP_H */ |