]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * include/asm-sh/spinlock.h | |
3 | * | |
4 | * Copyright (C) 2002, 2003 Paul Mundt | |
5 | * | |
6 | * This file is subject to the terms and conditions of the GNU General Public | |
7 | * License. See the file "COPYING" in the main directory of this archive | |
8 | * for more details. | |
9 | */ | |
10 | #ifndef __ASM_SH_SPINLOCK_H | |
11 | #define __ASM_SH_SPINLOCK_H | |
12 | ||
13 | #include <asm/atomic.h> | |
14 | ||
15 | /* | |
16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
17 | */ | |
18 | typedef struct { | |
19 | volatile unsigned long lock; | |
20 | #ifdef CONFIG_PREEMPT | |
21 | unsigned int break_lock; | |
22 | #endif | |
23 | } spinlock_t; | |
24 | ||
25 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | |
26 | ||
27 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | |
28 | ||
29 | #define spin_is_locked(x) ((x)->lock != 0) | |
30 | #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) | |
31 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | |
32 | ||
33 | /* | |
34 | * Simple spin lock operations. There are two variants, one clears IRQ's | |
35 | * on the local processor, one does not. | |
36 | * | |
37 | * We make no fairness assumptions. They have a cost. | |
38 | */ | |
39 | static inline void _raw_spin_lock(spinlock_t *lock) | |
40 | { | |
41 | __asm__ __volatile__ ( | |
42 | "1:\n\t" | |
43 | "tas.b @%0\n\t" | |
44 | "bf/s 1b\n\t" | |
45 | "nop\n\t" | |
46 | : "=r" (lock->lock) | |
47 | : "r" (&lock->lock) | |
48 | : "t", "memory" | |
49 | ); | |
50 | } | |
51 | ||
52 | static inline void _raw_spin_unlock(spinlock_t *lock) | |
53 | { | |
54 | assert_spin_locked(lock); | |
55 | ||
56 | lock->lock = 0; | |
57 | } | |
58 | ||
59 | #define _raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) | |
60 | ||
61 | /* | |
62 | * Read-write spinlocks, allowing multiple readers but only one writer. | |
63 | * | |
64 | * NOTE! it is quite common to have readers in interrupts but no interrupt | |
65 | * writers. For those circumstances we can "mix" irq-safe locks - any writer | |
66 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe | |
67 | * read-locks. | |
68 | */ | |
69 | typedef struct { | |
70 | spinlock_t lock; | |
71 | atomic_t counter; | |
72 | #ifdef CONFIG_PREEMPT | |
73 | unsigned int break_lock; | |
74 | #endif | |
75 | } rwlock_t; | |
76 | ||
77 | #define RW_LOCK_BIAS 0x01000000 | |
78 | #define RW_LOCK_UNLOCKED (rwlock_t) { { 0 }, { RW_LOCK_BIAS } } | |
79 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) | |
80 | ||
81 | static inline void _raw_read_lock(rwlock_t *rw) | |
82 | { | |
83 | _raw_spin_lock(&rw->lock); | |
84 | ||
85 | atomic_inc(&rw->counter); | |
86 | ||
87 | _raw_spin_unlock(&rw->lock); | |
88 | } | |
89 | ||
90 | static inline void _raw_read_unlock(rwlock_t *rw) | |
91 | { | |
92 | _raw_spin_lock(&rw->lock); | |
93 | ||
94 | atomic_dec(&rw->counter); | |
95 | ||
96 | _raw_spin_unlock(&rw->lock); | |
97 | } | |
98 | ||
99 | static inline void _raw_write_lock(rwlock_t *rw) | |
100 | { | |
101 | _raw_spin_lock(&rw->lock); | |
102 | atomic_set(&rw->counter, -1); | |
103 | } | |
104 | ||
105 | static inline void _raw_write_unlock(rwlock_t *rw) | |
106 | { | |
107 | atomic_set(&rw->counter, 0); | |
108 | _raw_spin_unlock(&rw->lock); | |
109 | } | |
110 | ||
111 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | |
112 | ||
113 | static inline int _raw_write_trylock(rwlock_t *rw) | |
114 | { | |
115 | if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter)) | |
116 | return 1; | |
117 | ||
118 | atomic_add(RW_LOCK_BIAS, &rw->counter); | |
119 | ||
120 | return 0; | |
121 | } | |
122 | ||
123 | #endif /* __ASM_SH_SPINLOCK_H */ | |
124 |