]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | Semantics and Behavior of Atomic and |
2 | Bitmask Operations | |
3 | ||
4 | David S. Miller | |
5 | ||
6 | This document is intended to serve as a guide to Linux port | |
7 | maintainers on how to implement atomic counter, bitops, and spinlock | |
8 | interfaces properly. | |
9 | ||
10 | The atomic_t type should be defined as a signed integer. | |
11 | Also, it should be made opaque such that any kind of cast to a normal | |
12 | C integer type will fail. Something like the following should | |
13 | suffice: | |
14 | ||
15 | typedef struct { volatile int counter; } atomic_t; | |
16 | ||
1a2142b0 GG |
17 | local_t is very similar to atomic_t. If the counter is per CPU and only |
18 | updated by one CPU, local_t is probably more appropriate. Please see | |
19 | Documentation/local_ops.txt for the semantics of local_t. | |
20 | ||
1da177e4 LT |
21 | The first operations to implement for atomic_t's are the |
22 | initializers and plain reads. | |
23 | ||
24 | #define ATOMIC_INIT(i) { (i) } | |
25 | #define atomic_set(v, i) ((v)->counter = (i)) | |
26 | ||
27 | The first macro is used in definitions, such as: | |
28 | ||
29 | static atomic_t my_counter = ATOMIC_INIT(1); | |
30 | ||
31 | The second interface can be used at runtime, as in: | |
32 | ||
33 | struct foo { atomic_t counter; }; | |
34 | ... | |
35 | ||
36 | struct foo *k; | |
37 | ||
38 | k = kmalloc(sizeof(*k), GFP_KERNEL); | |
39 | if (!k) | |
40 | return -ENOMEM; | |
41 | atomic_set(&k->counter, 0); | |
42 | ||
43 | Next, we have: | |
44 | ||
45 | #define atomic_read(v) ((v)->counter) | |
46 | ||
47 | which simply reads the current value of the counter. | |
48 | ||
49 | Now, we move onto the actual atomic operation interfaces. | |
50 | ||
51 | void atomic_add(int i, atomic_t *v); | |
52 | void atomic_sub(int i, atomic_t *v); | |
53 | void atomic_inc(atomic_t *v); | |
54 | void atomic_dec(atomic_t *v); | |
55 | ||
56 | These four routines add and subtract integral values to/from the given | |
57 | atomic_t value. The first two routines pass explicit integers by | |
58 | which to make the adjustment, whereas the latter two use an implicit | |
59 | adjustment value of "1". | |
60 | ||
61 | One very important aspect of these two routines is that they DO NOT | |
62 | require any explicit memory barriers. They need only perform the | |
63 | atomic_t counter update in an SMP safe manner. | |
64 | ||
65 | Next, we have: | |
66 | ||
67 | int atomic_inc_return(atomic_t *v); | |
68 | int atomic_dec_return(atomic_t *v); | |
69 | ||
70 | These routines add 1 and subtract 1, respectively, from the given | |
71 | atomic_t and return the new counter value after the operation is | |
72 | performed. | |
73 | ||
74 | Unlike the above routines, it is required that explicit memory | |
75 | barriers are performed before and after the operation. It must be | |
76 | done such that all memory operations before and after the atomic | |
77 | operation calls are strongly ordered with respect to the atomic | |
78 | operation itself. | |
79 | ||
80 | For example, it should behave as if a smp_mb() call existed both | |
81 | before and after the atomic operation. | |
82 | ||
83 | If the atomic instructions used in an implementation provide explicit | |
84 | memory barrier semantics which satisfy the above requirements, that is | |
85 | fine as well. | |
86 | ||
87 | Let's move on: | |
88 | ||
89 | int atomic_add_return(int i, atomic_t *v); | |
90 | int atomic_sub_return(int i, atomic_t *v); | |
91 | ||
92 | These behave just like atomic_{inc,dec}_return() except that an | |
93 | explicit counter adjustment is given instead of the implicit "1". | |
94 | This means that like atomic_{inc,dec}_return(), the memory barrier | |
95 | semantics are required. | |
96 | ||
97 | Next: | |
98 | ||
99 | int atomic_inc_and_test(atomic_t *v); | |
100 | int atomic_dec_and_test(atomic_t *v); | |
101 | ||
102 | These two routines increment and decrement by 1, respectively, the | |
103 | given atomic counter. They return a boolean indicating whether the | |
104 | resulting counter value was zero or not. | |
105 | ||
106 | It requires explicit memory barrier semantics around the operation as | |
107 | above. | |
108 | ||
109 | int atomic_sub_and_test(int i, atomic_t *v); | |
110 | ||
111 | This is identical to atomic_dec_and_test() except that an explicit | |
112 | decrement is given instead of the implicit "1". It requires explicit | |
113 | memory barrier semantics around the operation. | |
114 | ||
115 | int atomic_add_negative(int i, atomic_t *v); | |
116 | ||
117 | The given increment is added to the given atomic counter value. A | |
118 | boolean is return which indicates whether the resulting counter value | |
119 | is negative. It requires explicit memory barrier semantics around the | |
120 | operation. | |
121 | ||
8426e1f6 | 122 | Then: |
4a6dae6d NP |
123 | |
124 | int atomic_cmpxchg(atomic_t *v, int old, int new); | |
125 | ||
126 | This performs an atomic compare exchange operation on the atomic value v, | |
127 | with the given old and new values. Like all atomic_xxx operations, | |
128 | atomic_cmpxchg will only satisfy its atomicity semantics as long as all | |
129 | other accesses of *v are performed through atomic_xxx operations. | |
130 | ||
131 | atomic_cmpxchg requires explicit memory barriers around the operation. | |
132 | ||
133 | The semantics for atomic_cmpxchg are the same as those defined for 'cas' | |
134 | below. | |
135 | ||
8426e1f6 NP |
136 | Finally: |
137 | ||
138 | int atomic_add_unless(atomic_t *v, int a, int u); | |
139 | ||
140 | If the atomic value v is not equal to u, this function adds a to v, and | |
141 | returns non zero. If v is equal to u then it returns zero. This is done as | |
142 | an atomic operation. | |
143 | ||
144 | atomic_add_unless requires explicit memory barriers around the operation. | |
145 | ||
146 | atomic_inc_not_zero, equivalent to atomic_add_unless(v, 1, 0) | |
147 | ||
4a6dae6d | 148 | |
1da177e4 LT |
149 | If a caller requires memory barrier semantics around an atomic_t |
150 | operation which does not return a value, a set of interfaces are | |
151 | defined which accomplish this: | |
152 | ||
153 | void smp_mb__before_atomic_dec(void); | |
154 | void smp_mb__after_atomic_dec(void); | |
155 | void smp_mb__before_atomic_inc(void); | |
4249e08e | 156 | void smp_mb__after_atomic_inc(void); |
1da177e4 LT |
157 | |
158 | For example, smp_mb__before_atomic_dec() can be used like so: | |
159 | ||
160 | obj->dead = 1; | |
161 | smp_mb__before_atomic_dec(); | |
162 | atomic_dec(&obj->ref_count); | |
163 | ||
a0ebb3ff | 164 | It makes sure that all memory operations preceding the atomic_dec() |
1da177e4 | 165 | call are strongly ordered with respect to the atomic counter |
a0ebb3ff | 166 | operation. In the above example, it guarantees that the assignment of |
1da177e4 LT |
167 | "1" to obj->dead will be globally visible to other cpus before the |
168 | atomic counter decrement. | |
169 | ||
a0ebb3ff | 170 | Without the explicit smp_mb__before_atomic_dec() call, the |
1da177e4 LT |
171 | implementation could legally allow the atomic counter update visible |
172 | to other cpus before the "obj->dead = 1;" assignment. | |
173 | ||
174 | The other three interfaces listed are used to provide explicit | |
175 | ordering with respect to memory operations after an atomic_dec() call | |
176 | (smp_mb__after_atomic_dec()) and around atomic_inc() calls | |
177 | (smp_mb__{before,after}_atomic_inc()). | |
178 | ||
179 | A missing memory barrier in the cases where they are required by the | |
a0ebb3ff MH |
180 | atomic_t implementation above can have disastrous results. Here is |
181 | an example, which follows a pattern occurring frequently in the Linux | |
1da177e4 LT |
182 | kernel. It is the use of atomic counters to implement reference |
183 | counting, and it works such that once the counter falls to zero it can | |
a0ebb3ff | 184 | be guaranteed that no other entity can be accessing the object: |
1da177e4 LT |
185 | |
186 | static void obj_list_add(struct obj *obj) | |
187 | { | |
188 | obj->active = 1; | |
189 | list_add(&obj->list); | |
190 | } | |
191 | ||
192 | static void obj_list_del(struct obj *obj) | |
193 | { | |
194 | list_del(&obj->list); | |
195 | obj->active = 0; | |
196 | } | |
197 | ||
198 | static void obj_destroy(struct obj *obj) | |
199 | { | |
200 | BUG_ON(obj->active); | |
201 | kfree(obj); | |
202 | } | |
203 | ||
204 | struct obj *obj_list_peek(struct list_head *head) | |
205 | { | |
206 | if (!list_empty(head)) { | |
207 | struct obj *obj; | |
208 | ||
209 | obj = list_entry(head->next, struct obj, list); | |
210 | atomic_inc(&obj->refcnt); | |
211 | return obj; | |
212 | } | |
213 | return NULL; | |
214 | } | |
215 | ||
216 | void obj_poke(void) | |
217 | { | |
218 | struct obj *obj; | |
219 | ||
220 | spin_lock(&global_list_lock); | |
221 | obj = obj_list_peek(&global_list); | |
222 | spin_unlock(&global_list_lock); | |
223 | ||
224 | if (obj) { | |
225 | obj->ops->poke(obj); | |
226 | if (atomic_dec_and_test(&obj->refcnt)) | |
227 | obj_destroy(obj); | |
228 | } | |
229 | } | |
230 | ||
231 | void obj_timeout(struct obj *obj) | |
232 | { | |
233 | spin_lock(&global_list_lock); | |
234 | obj_list_del(obj); | |
235 | spin_unlock(&global_list_lock); | |
236 | ||
237 | if (atomic_dec_and_test(&obj->refcnt)) | |
238 | obj_destroy(obj); | |
239 | } | |
240 | ||
241 | (This is a simplification of the ARP queue management in the | |
242 | generic neighbour discover code of the networking. Olaf Kirch | |
243 | found a bug wrt. memory barriers in kfree_skb() that exposed | |
244 | the atomic_t memory barrier requirements quite clearly.) | |
245 | ||
246 | Given the above scheme, it must be the case that the obj->active | |
247 | update done by the obj list deletion be visible to other processors | |
248 | before the atomic counter decrement is performed. | |
249 | ||
250 | Otherwise, the counter could fall to zero, yet obj->active would still | |
251 | be set, thus triggering the assertion in obj_destroy(). The error | |
252 | sequence looks like this: | |
253 | ||
254 | cpu 0 cpu 1 | |
255 | obj_poke() obj_timeout() | |
256 | obj = obj_list_peek(); | |
257 | ... gains ref to obj, refcnt=2 | |
258 | obj_list_del(obj); | |
259 | obj->active = 0 ... | |
260 | ... visibility delayed ... | |
261 | atomic_dec_and_test() | |
262 | ... refcnt drops to 1 ... | |
263 | atomic_dec_and_test() | |
264 | ... refcount drops to 0 ... | |
265 | obj_destroy() | |
266 | BUG() triggers since obj->active | |
267 | still seen as one | |
268 | obj->active update visibility occurs | |
269 | ||
270 | With the memory barrier semantics required of the atomic_t operations | |
271 | which return values, the above sequence of memory visibility can never | |
272 | happen. Specifically, in the above case the atomic_dec_and_test() | |
273 | counter decrement would not become globally visible until the | |
274 | obj->active update does. | |
275 | ||
276 | As a historical note, 32-bit Sparc used to only allow usage of | |
277 | 24-bits of it's atomic_t type. This was because it used 8 bits | |
278 | as a spinlock for SMP safety. Sparc32 lacked a "compare and swap" | |
279 | type instruction. However, 32-bit Sparc has since been moved over | |
280 | to a "hash table of spinlocks" scheme, that allows the full 32-bit | |
281 | counter to be realized. Essentially, an array of spinlocks are | |
282 | indexed into based upon the address of the atomic_t being operated | |
283 | on, and that lock protects the atomic operation. Parisc uses the | |
284 | same scheme. | |
285 | ||
286 | Another note is that the atomic_t operations returning values are | |
287 | extremely slow on an old 386. | |
288 | ||
289 | We will now cover the atomic bitmask operations. You will find that | |
290 | their SMP and memory barrier semantics are similar in shape and scope | |
291 | to the atomic_t ops above. | |
292 | ||
293 | Native atomic bit operations are defined to operate on objects aligned | |
294 | to the size of an "unsigned long" C data type, and are least of that | |
295 | size. The endianness of the bits within each "unsigned long" are the | |
296 | native endianness of the cpu. | |
297 | ||
a0ebb3ff MH |
298 | void set_bit(unsigned long nr, volatile unsigned long *addr); |
299 | void clear_bit(unsigned long nr, volatile unsigned long *addr); | |
300 | void change_bit(unsigned long nr, volatile unsigned long *addr); | |
1da177e4 LT |
301 | |
302 | These routines set, clear, and change, respectively, the bit number | |
303 | indicated by "nr" on the bit mask pointed to by "ADDR". | |
304 | ||
305 | They must execute atomically, yet there are no implicit memory barrier | |
306 | semantics required of these interfaces. | |
307 | ||
a0ebb3ff MH |
308 | int test_and_set_bit(unsigned long nr, volatile unsigned long *addr); |
309 | int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); | |
310 | int test_and_change_bit(unsigned long nr, volatile unsigned long *addr); | |
1da177e4 LT |
311 | |
312 | Like the above, except that these routines return a boolean which | |
313 | indicates whether the changed bit was set _BEFORE_ the atomic bit | |
314 | operation. | |
315 | ||
316 | WARNING! It is incredibly important that the value be a boolean, | |
317 | ie. "0" or "1". Do not try to be fancy and save a few instructions by | |
318 | declaring the above to return "long" and just returning something like | |
319 | "old_val & mask" because that will not work. | |
320 | ||
321 | For one thing, this return value gets truncated to int in many code | |
322 | paths using these interfaces, so on 64-bit if the bit is set in the | |
323 | upper 32-bits then testers will never see that. | |
324 | ||
325 | One great example of where this problem crops up are the thread_info | |
326 | flag operations. Routines such as test_and_set_ti_thread_flag() chop | |
327 | the return value into an int. There are other places where things | |
328 | like this occur as well. | |
329 | ||
330 | These routines, like the atomic_t counter operations returning values, | |
331 | require explicit memory barrier semantics around their execution. All | |
332 | memory operations before the atomic bit operation call must be made | |
333 | visible globally before the atomic bit operation is made visible. | |
334 | Likewise, the atomic bit operation must be visible globally before any | |
335 | subsequent memory operation is made visible. For example: | |
336 | ||
337 | obj->dead = 1; | |
338 | if (test_and_set_bit(0, &obj->flags)) | |
339 | /* ... */; | |
340 | obj->killed = 1; | |
341 | ||
a0ebb3ff | 342 | The implementation of test_and_set_bit() must guarantee that |
1da177e4 LT |
343 | "obj->dead = 1;" is visible to cpus before the atomic memory operation |
344 | done by test_and_set_bit() becomes visible. Likewise, the atomic | |
345 | memory operation done by test_and_set_bit() must become visible before | |
346 | "obj->killed = 1;" is visible. | |
347 | ||
348 | Finally there is the basic operation: | |
349 | ||
350 | int test_bit(unsigned long nr, __const__ volatile unsigned long *addr); | |
351 | ||
352 | Which returns a boolean indicating if bit "nr" is set in the bitmask | |
353 | pointed to by "addr". | |
354 | ||
355 | If explicit memory barriers are required around clear_bit() (which | |
356 | does not return a value, and thus does not need to provide memory | |
357 | barrier semantics), two interfaces are provided: | |
358 | ||
359 | void smp_mb__before_clear_bit(void); | |
360 | void smp_mb__after_clear_bit(void); | |
361 | ||
362 | They are used as follows, and are akin to their atomic_t operation | |
363 | brothers: | |
364 | ||
365 | /* All memory operations before this call will | |
366 | * be globally visible before the clear_bit(). | |
367 | */ | |
368 | smp_mb__before_clear_bit(); | |
369 | clear_bit( ... ); | |
370 | ||
371 | /* The clear_bit() will be visible before all | |
372 | * subsequent memory operations. | |
373 | */ | |
374 | smp_mb__after_clear_bit(); | |
375 | ||
376 | Finally, there are non-atomic versions of the bitmask operations | |
377 | provided. They are used in contexts where some other higher-level SMP | |
378 | locking scheme is being used to protect the bitmask, and thus less | |
379 | expensive non-atomic operations may be used in the implementation. | |
380 | They have names similar to the above bitmask operation interfaces, | |
381 | except that two underscores are prefixed to the interface name. | |
382 | ||
383 | void __set_bit(unsigned long nr, volatile unsigned long *addr); | |
384 | void __clear_bit(unsigned long nr, volatile unsigned long *addr); | |
385 | void __change_bit(unsigned long nr, volatile unsigned long *addr); | |
386 | int __test_and_set_bit(unsigned long nr, volatile unsigned long *addr); | |
387 | int __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); | |
388 | int __test_and_change_bit(unsigned long nr, volatile unsigned long *addr); | |
389 | ||
390 | These non-atomic variants also do not require any special memory | |
391 | barrier semantics. | |
392 | ||
393 | The routines xchg() and cmpxchg() need the same exact memory barriers | |
394 | as the atomic and bit operations returning values. | |
395 | ||
396 | Spinlocks and rwlocks have memory barrier expectations as well. | |
397 | The rule to follow is simple: | |
398 | ||
399 | 1) When acquiring a lock, the implementation must make it globally | |
400 | visible before any subsequent memory operation. | |
401 | ||
402 | 2) When releasing a lock, the implementation must make it such that | |
403 | all previous memory operations are globally visible before the | |
404 | lock release. | |
405 | ||
406 | Which finally brings us to _atomic_dec_and_lock(). There is an | |
407 | architecture-neutral version implemented in lib/dec_and_lock.c, | |
408 | but most platforms will wish to optimize this in assembler. | |
409 | ||
410 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |
411 | ||
412 | Atomically decrement the given counter, and if will drop to zero | |
413 | atomically acquire the given spinlock and perform the decrement | |
414 | of the counter to zero. If it does not drop to zero, do nothing | |
415 | with the spinlock. | |
416 | ||
417 | It is actually pretty simple to get the memory barrier correct. | |
418 | Simply satisfy the spinlock grab requirements, which is make | |
419 | sure the spinlock operation is globally visible before any | |
420 | subsequent memory operation. | |
421 | ||
422 | We can demonstrate this operation more clearly if we define | |
423 | an abstract atomic operation: | |
424 | ||
425 | long cas(long *mem, long old, long new); | |
426 | ||
427 | "cas" stands for "compare and swap". It atomically: | |
428 | ||
429 | 1) Compares "old" with the value currently at "mem". | |
430 | 2) If they are equal, "new" is written to "mem". | |
431 | 3) Regardless, the current value at "mem" is returned. | |
432 | ||
433 | As an example usage, here is what an atomic counter update | |
434 | might look like: | |
435 | ||
436 | void example_atomic_inc(long *counter) | |
437 | { | |
438 | long old, new, ret; | |
439 | ||
440 | while (1) { | |
441 | old = *counter; | |
442 | new = old + 1; | |
443 | ||
444 | ret = cas(counter, old, new); | |
445 | if (ret == old) | |
446 | break; | |
447 | } | |
448 | } | |
449 | ||
450 | Let's use cas() in order to build a pseudo-C atomic_dec_and_lock(): | |
451 | ||
452 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |
453 | { | |
454 | long old, new, ret; | |
455 | int went_to_zero; | |
456 | ||
457 | went_to_zero = 0; | |
458 | while (1) { | |
459 | old = atomic_read(atomic); | |
460 | new = old - 1; | |
461 | if (new == 0) { | |
462 | went_to_zero = 1; | |
463 | spin_lock(lock); | |
464 | } | |
465 | ret = cas(atomic, old, new); | |
466 | if (ret == old) | |
467 | break; | |
468 | if (went_to_zero) { | |
469 | spin_unlock(lock); | |
470 | went_to_zero = 0; | |
471 | } | |
472 | } | |
473 | ||
474 | return went_to_zero; | |
475 | } | |
476 | ||
477 | Now, as far as memory barriers go, as long as spin_lock() | |
478 | strictly orders all subsequent memory operations (including | |
479 | the cas()) with respect to itself, things will be fine. | |
480 | ||
a0ebb3ff | 481 | Said another way, _atomic_dec_and_lock() must guarantee that |
1da177e4 LT |
482 | a counter dropping to zero is never made visible before the |
483 | spinlock being acquired. | |
484 | ||
485 | Note that this also means that for the case where the counter | |
486 | is not dropping to zero, there are no memory ordering | |
487 | requirements. |