]>
Commit | Line | Data |
---|---|---|
b920de1b DH |
1 | /* MN10300 Atomic counter operations |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public Licence | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the Licence, or (at your option) any later version. | |
10 | */ | |
11 | #ifndef _ASM_ATOMIC_H | |
12 | #define _ASM_ATOMIC_H | |
13 | ||
14 | #ifdef CONFIG_SMP | |
15 | #error not SMP safe | |
16 | #endif | |
17 | ||
18 | /* | |
19 | * Atomic operations that C can't guarantee us. Useful for | |
20 | * resource counting etc.. | |
21 | */ | |
22 | ||
23 | /* | |
24 | * Make sure gcc doesn't try to be clever and move things around | |
25 | * on us. We need to use _exactly_ the address the user gave us, | |
26 | * not some alias that contains the same information. | |
27 | */ | |
28 | typedef struct { | |
29 | int counter; | |
30 | } atomic_t; | |
31 | ||
32 | #define ATOMIC_INIT(i) { (i) } | |
33 | ||
34 | #ifdef __KERNEL__ | |
35 | ||
36 | /** | |
37 | * atomic_read - read atomic variable | |
38 | * @v: pointer of type atomic_t | |
39 | * | |
40 | * Atomically reads the value of @v. Note that the guaranteed | |
41 | * useful range of an atomic_t is only 24 bits. | |
42 | */ | |
43 | #define atomic_read(v) ((v)->counter) | |
44 | ||
45 | /** | |
46 | * atomic_set - set atomic variable | |
47 | * @v: pointer of type atomic_t | |
48 | * @i: required value | |
49 | * | |
50 | * Atomically sets the value of @v to @i. Note that the guaranteed | |
51 | * useful range of an atomic_t is only 24 bits. | |
52 | */ | |
53 | #define atomic_set(v, i) (((v)->counter) = (i)) | |
54 | ||
55 | #include <asm/system.h> | |
56 | ||
57 | /** | |
58 | * atomic_add_return - add integer to atomic variable | |
59 | * @i: integer value to add | |
60 | * @v: pointer of type atomic_t | |
61 | * | |
62 | * Atomically adds @i to @v and returns the result | |
63 | * Note that the guaranteed useful range of an atomic_t is only 24 bits. | |
64 | */ | |
65 | static inline int atomic_add_return(int i, atomic_t *v) | |
66 | { | |
67 | unsigned long flags; | |
68 | int temp; | |
69 | ||
70 | local_irq_save(flags); | |
71 | temp = v->counter; | |
72 | temp += i; | |
73 | v->counter = temp; | |
74 | local_irq_restore(flags); | |
75 | ||
76 | return temp; | |
77 | } | |
78 | ||
79 | /** | |
80 | * atomic_sub_return - subtract integer from atomic variable | |
81 | * @i: integer value to subtract | |
82 | * @v: pointer of type atomic_t | |
83 | * | |
84 | * Atomically subtracts @i from @v and returns the result | |
85 | * Note that the guaranteed useful range of an atomic_t is only 24 bits. | |
86 | */ | |
87 | static inline int atomic_sub_return(int i, atomic_t *v) | |
88 | { | |
89 | unsigned long flags; | |
90 | int temp; | |
91 | ||
92 | local_irq_save(flags); | |
93 | temp = v->counter; | |
94 | temp -= i; | |
95 | v->counter = temp; | |
96 | local_irq_restore(flags); | |
97 | ||
98 | return temp; | |
99 | } | |
100 | ||
101 | static inline int atomic_add_negative(int i, atomic_t *v) | |
102 | { | |
103 | return atomic_add_return(i, v) < 0; | |
104 | } | |
105 | ||
106 | static inline void atomic_add(int i, atomic_t *v) | |
107 | { | |
108 | atomic_add_return(i, v); | |
109 | } | |
110 | ||
111 | static inline void atomic_sub(int i, atomic_t *v) | |
112 | { | |
113 | atomic_sub_return(i, v); | |
114 | } | |
115 | ||
116 | static inline void atomic_inc(atomic_t *v) | |
117 | { | |
118 | atomic_add_return(1, v); | |
119 | } | |
120 | ||
121 | static inline void atomic_dec(atomic_t *v) | |
122 | { | |
123 | atomic_sub_return(1, v); | |
124 | } | |
125 | ||
126 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | |
127 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | |
128 | ||
129 | #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) | |
130 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | |
131 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | |
132 | ||
133 | #define atomic_add_unless(v, a, u) \ | |
134 | ({ \ | |
135 | int c, old; \ | |
136 | c = atomic_read(v); \ | |
137 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | |
138 | c = old; \ | |
139 | c != (u); \ | |
140 | }) | |
141 | ||
142 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | |
143 | ||
144 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |
145 | { | |
146 | unsigned long flags; | |
147 | ||
148 | mask = ~mask; | |
149 | local_irq_save(flags); | |
150 | *addr &= mask; | |
151 | local_irq_restore(flags); | |
152 | } | |
153 | ||
154 | #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) | |
155 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) | |
156 | ||
157 | /* Atomic operations are already serializing on MN10300??? */ | |
158 | #define smp_mb__before_atomic_dec() barrier() | |
159 | #define smp_mb__after_atomic_dec() barrier() | |
160 | #define smp_mb__before_atomic_inc() barrier() | |
161 | #define smp_mb__after_atomic_inc() barrier() | |
162 | ||
163 | #include <asm-generic/atomic.h> | |
164 | ||
165 | #endif /* __KERNEL__ */ | |
166 | #endif /* _ASM_ATOMIC_H */ |