]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License, Version 1.0 only | |
6 | * (the "License"). You may not use this file except in compliance | |
7 | * with the License. | |
8 | * | |
9 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
10 | * or http://www.opensolaris.org/os/licensing. | |
11 | * See the License for the specific language governing permissions | |
12 | * and limitations under the License. | |
13 | * | |
14 | * When distributing Covered Code, include this CDDL HEADER in each | |
15 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
16 | * If applicable, add the following below this CDDL HEADER, with the | |
17 | * fields enclosed by brackets "[]" replaced with your own identifying | |
18 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
19 | * | |
20 | * CDDL HEADER END | |
21 | */ | |
22 | /* | |
23 | * Copyright (c) 2009 by Sun Microsystems, Inc. All rights reserved. | |
24 | * Use is subject to license terms. | |
25 | */ | |
26 | ||
27 | #include <atomic.h> | |
28 | #include <assert.h> | |
29 | #include <pthread.h> | |
30 | ||
31 | /* | |
32 | * All operations are implemented by serializing them through a global | |
33 | * pthread mutex. This provides a correct generic implementation. | |
34 | * However all supported architectures are encouraged to provide a | |
35 | * native implementation is assembly for performance reasons. | |
36 | */ | |
37 | pthread_mutex_t atomic_lock = PTHREAD_MUTEX_INITIALIZER; | |
38 | ||
39 | /* | |
40 | * Theses are the void returning variants | |
41 | */ | |
42 | ||
43 | #define ATOMIC_INC(name, type) \ | |
44 | void atomic_inc_##name(volatile type *target) \ | |
45 | { \ | |
46 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
47 | (*target)++; \ | |
48 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
49 | } | |
50 | ||
51 | ATOMIC_INC(long, unsigned long) | |
52 | ATOMIC_INC(8, uint8_t) | |
53 | ATOMIC_INC(uchar, uchar_t) | |
54 | ATOMIC_INC(16, uint16_t) | |
55 | ATOMIC_INC(ushort, ushort_t) | |
56 | ATOMIC_INC(32, uint32_t) | |
57 | ATOMIC_INC(uint, uint_t) | |
58 | ATOMIC_INC(ulong, ulong_t) | |
59 | ATOMIC_INC(64, uint64_t) | |
60 | ||
61 | ||
62 | #define ATOMIC_DEC(name, type) \ | |
63 | void atomic_dec_##name(volatile type *target) \ | |
64 | { \ | |
65 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
66 | (*target)--; \ | |
67 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
68 | } | |
69 | ||
70 | ATOMIC_DEC(long, unsigned long) | |
71 | ATOMIC_DEC(8, uint8_t) | |
72 | ATOMIC_DEC(uchar, uchar_t) | |
73 | ATOMIC_DEC(16, uint16_t) | |
74 | ATOMIC_DEC(ushort, ushort_t) | |
75 | ATOMIC_DEC(32, uint32_t) | |
76 | ATOMIC_DEC(uint, uint_t) | |
77 | ATOMIC_DEC(ulong, ulong_t) | |
78 | ATOMIC_DEC(64, uint64_t) | |
79 | ||
80 | ||
81 | #define ATOMIC_ADD(name, type1, type2) \ | |
82 | void atomic_add_##name(volatile type1 *target, type2 bits) \ | |
83 | { \ | |
84 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
85 | *target += bits; \ | |
86 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
87 | } | |
88 | ||
89 | ATOMIC_ADD(8, uint8_t, int8_t) | |
90 | ATOMIC_ADD(char, uchar_t, signed char) | |
91 | ATOMIC_ADD(16, uint16_t, int16_t) | |
92 | ATOMIC_ADD(short, ushort_t, short) | |
93 | ATOMIC_ADD(32, uint32_t, int32_t) | |
94 | ATOMIC_ADD(int, uint_t, int) | |
95 | ATOMIC_ADD(long, ulong_t, long) | |
96 | ATOMIC_ADD(64, uint64_t, int64_t) | |
97 | ||
98 | void | |
99 | atomic_add_ptr(volatile void *target, ssize_t bits) | |
100 | { | |
101 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); | |
102 | *(caddr_t *)target += bits; | |
103 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); | |
104 | } | |
105 | ||
106 | ||
107 | #define ATOMIC_SUB(name, type1, type2) \ | |
108 | void atomic_sub_##name(volatile type1 *target, type2 bits) \ | |
109 | { \ | |
110 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
111 | *target -= bits; \ | |
112 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
113 | } | |
114 | ||
115 | ATOMIC_SUB(8, uint8_t, int8_t) | |
116 | ATOMIC_SUB(char, uchar_t, signed char) | |
117 | ATOMIC_SUB(16, uint16_t, int16_t) | |
118 | ATOMIC_SUB(short, ushort_t, short) | |
119 | ATOMIC_SUB(32, uint32_t, int32_t) | |
120 | ATOMIC_SUB(int, uint_t, int) | |
121 | ATOMIC_SUB(long, ulong_t, long) | |
122 | ATOMIC_SUB(64, uint64_t, int64_t) | |
123 | ||
124 | void | |
125 | atomic_sub_ptr(volatile void *target, ssize_t bits) | |
126 | { | |
127 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); | |
128 | *(caddr_t *)target -= bits; | |
129 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); | |
130 | } | |
131 | ||
132 | ||
133 | #define ATOMIC_OR(name, type) \ | |
134 | void atomic_or_##name(volatile type *target, type bits) \ | |
135 | { \ | |
136 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
137 | *target |= bits; \ | |
138 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
139 | } | |
140 | ||
141 | ATOMIC_OR(8, uint8_t) | |
142 | ATOMIC_OR(uchar, uchar_t) | |
143 | ATOMIC_OR(16, uint16_t) | |
144 | ATOMIC_OR(ushort, ushort_t) | |
145 | ATOMIC_OR(32, uint32_t) | |
146 | ATOMIC_OR(uint, uint_t) | |
147 | ATOMIC_OR(ulong, ulong_t) | |
148 | ATOMIC_OR(64, uint64_t) | |
149 | ||
150 | ||
151 | #define ATOMIC_AND(name, type) \ | |
152 | void atomic_and_##name(volatile type *target, type bits) \ | |
153 | { \ | |
154 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
155 | *target &= bits; \ | |
156 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
157 | } | |
158 | ||
159 | ATOMIC_AND(8, uint8_t) | |
160 | ATOMIC_AND(uchar, uchar_t) | |
161 | ATOMIC_AND(16, uint16_t) | |
162 | ATOMIC_AND(ushort, ushort_t) | |
163 | ATOMIC_AND(32, uint32_t) | |
164 | ATOMIC_AND(uint, uint_t) | |
165 | ATOMIC_AND(ulong, ulong_t) | |
166 | ATOMIC_AND(64, uint64_t) | |
167 | ||
168 | ||
169 | /* | |
170 | * New value returning variants | |
171 | */ | |
172 | ||
173 | #define ATOMIC_INC_NV(name, type) \ | |
174 | type atomic_inc_##name##_nv(volatile type *target) \ | |
175 | { \ | |
176 | type rc; \ | |
177 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
178 | rc = (++(*target)); \ | |
179 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
180 | return (rc); \ | |
181 | } | |
182 | ||
183 | ATOMIC_INC_NV(long, unsigned long) | |
184 | ATOMIC_INC_NV(8, uint8_t) | |
185 | ATOMIC_INC_NV(uchar, uchar_t) | |
186 | ATOMIC_INC_NV(16, uint16_t) | |
187 | ATOMIC_INC_NV(ushort, ushort_t) | |
188 | ATOMIC_INC_NV(32, uint32_t) | |
189 | ATOMIC_INC_NV(uint, uint_t) | |
190 | ATOMIC_INC_NV(ulong, ulong_t) | |
191 | ATOMIC_INC_NV(64, uint64_t) | |
192 | ||
193 | ||
194 | #define ATOMIC_DEC_NV(name, type) \ | |
195 | type atomic_dec_##name##_nv(volatile type *target) \ | |
196 | { \ | |
197 | type rc; \ | |
198 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
199 | rc = (--(*target)); \ | |
200 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
201 | return (rc); \ | |
202 | } | |
203 | ||
204 | ATOMIC_DEC_NV(long, unsigned long) | |
205 | ATOMIC_DEC_NV(8, uint8_t) | |
206 | ATOMIC_DEC_NV(uchar, uchar_t) | |
207 | ATOMIC_DEC_NV(16, uint16_t) | |
208 | ATOMIC_DEC_NV(ushort, ushort_t) | |
209 | ATOMIC_DEC_NV(32, uint32_t) | |
210 | ATOMIC_DEC_NV(uint, uint_t) | |
211 | ATOMIC_DEC_NV(ulong, ulong_t) | |
212 | ATOMIC_DEC_NV(64, uint64_t) | |
213 | ||
214 | ||
215 | #define ATOMIC_ADD_NV(name, type1, type2) \ | |
216 | type1 atomic_add_##name##_nv(volatile type1 *target, type2 bits)\ | |
217 | { \ | |
218 | type1 rc; \ | |
219 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
220 | rc = (*target += bits); \ | |
221 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
222 | return (rc); \ | |
223 | } | |
224 | ||
225 | ATOMIC_ADD_NV(8, uint8_t, int8_t) | |
226 | ATOMIC_ADD_NV(char, uchar_t, signed char) | |
227 | ATOMIC_ADD_NV(16, uint16_t, int16_t) | |
228 | ATOMIC_ADD_NV(short, ushort_t, short) | |
229 | ATOMIC_ADD_NV(32, uint32_t, int32_t) | |
230 | ATOMIC_ADD_NV(int, uint_t, int) | |
231 | ATOMIC_ADD_NV(long, ulong_t, long) | |
232 | ATOMIC_ADD_NV(64, uint64_t, int64_t) | |
233 | ||
234 | void * | |
235 | atomic_add_ptr_nv(volatile void *target, ssize_t bits) | |
236 | { | |
237 | void *ptr; | |
238 | ||
239 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); | |
240 | ptr = (*(caddr_t *)target += bits); | |
241 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); | |
242 | ||
243 | return (ptr); | |
244 | } | |
245 | ||
246 | ||
247 | #define ATOMIC_SUB_NV(name, type1, type2) \ | |
248 | type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits)\ | |
249 | { \ | |
250 | type1 rc; \ | |
251 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
252 | rc = (*target -= bits); \ | |
253 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
254 | return (rc); \ | |
255 | } | |
256 | ||
257 | ATOMIC_SUB_NV(8, uint8_t, int8_t) | |
258 | ATOMIC_SUB_NV(char, uchar_t, signed char) | |
259 | ATOMIC_SUB_NV(16, uint16_t, int16_t) | |
260 | ATOMIC_SUB_NV(short, ushort_t, short) | |
261 | ATOMIC_SUB_NV(32, uint32_t, int32_t) | |
262 | ATOMIC_SUB_NV(int, uint_t, int) | |
263 | ATOMIC_SUB_NV(long, ulong_t, long) | |
264 | ATOMIC_SUB_NV(64, uint64_t, int64_t) | |
265 | ||
266 | void * | |
267 | atomic_sub_ptr_nv(volatile void *target, ssize_t bits) | |
268 | { | |
269 | void *ptr; | |
270 | ||
271 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); | |
272 | ptr = (*(caddr_t *)target -= bits); | |
273 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); | |
274 | ||
275 | return (ptr); | |
276 | } | |
277 | ||
278 | ||
279 | #define ATOMIC_OR_NV(name, type) \ | |
280 | type atomic_or_##name##_nv(volatile type *target, type bits) \ | |
281 | { \ | |
282 | type rc; \ | |
283 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
284 | rc = (*target |= bits); \ | |
285 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
286 | return (rc); \ | |
287 | } | |
288 | ||
289 | ATOMIC_OR_NV(long, unsigned long) | |
290 | ATOMIC_OR_NV(8, uint8_t) | |
291 | ATOMIC_OR_NV(uchar, uchar_t) | |
292 | ATOMIC_OR_NV(16, uint16_t) | |
293 | ATOMIC_OR_NV(ushort, ushort_t) | |
294 | ATOMIC_OR_NV(32, uint32_t) | |
295 | ATOMIC_OR_NV(uint, uint_t) | |
296 | ATOMIC_OR_NV(ulong, ulong_t) | |
297 | ATOMIC_OR_NV(64, uint64_t) | |
298 | ||
299 | ||
300 | #define ATOMIC_AND_NV(name, type) \ | |
301 | type atomic_and_##name##_nv(volatile type *target, type bits) \ | |
302 | { \ | |
303 | type rc; \ | |
304 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
305 | rc = (*target &= bits); \ | |
306 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
307 | return (rc); \ | |
308 | } | |
309 | ||
310 | ATOMIC_AND_NV(long, unsigned long) | |
311 | ATOMIC_AND_NV(8, uint8_t) | |
312 | ATOMIC_AND_NV(uchar, uchar_t) | |
313 | ATOMIC_AND_NV(16, uint16_t) | |
314 | ATOMIC_AND_NV(ushort, ushort_t) | |
315 | ATOMIC_AND_NV(32, uint32_t) | |
316 | ATOMIC_AND_NV(uint, uint_t) | |
317 | ATOMIC_AND_NV(ulong, ulong_t) | |
318 | ATOMIC_AND_NV(64, uint64_t) | |
319 | ||
320 | ||
321 | /* | |
322 | * If *arg1 == arg2, set *arg1 = arg3; return old value | |
323 | */ | |
324 | ||
325 | #define ATOMIC_CAS(name, type) \ | |
326 | type atomic_cas_##name(volatile type *target, type arg1, type arg2) \ | |
327 | { \ | |
328 | type old; \ | |
329 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
330 | old = *target; \ | |
331 | if (old == arg1) \ | |
332 | *target = arg2; \ | |
333 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
334 | return (old); \ | |
335 | } | |
336 | ||
337 | ATOMIC_CAS(8, uint8_t) | |
338 | ATOMIC_CAS(uchar, uchar_t) | |
339 | ATOMIC_CAS(16, uint16_t) | |
340 | ATOMIC_CAS(ushort, ushort_t) | |
341 | ATOMIC_CAS(32, uint32_t) | |
342 | ATOMIC_CAS(uint, uint_t) | |
343 | ATOMIC_CAS(ulong, ulong_t) | |
344 | ATOMIC_CAS(64, uint64_t) | |
345 | ||
346 | void * | |
347 | atomic_cas_ptr(volatile void *target, void *arg1, void *arg2) | |
348 | { | |
349 | void *old; | |
350 | ||
351 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); | |
352 | old = *(void **)target; | |
353 | if (old == arg1) | |
354 | *(void **)target = arg2; | |
355 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); | |
356 | ||
357 | return (old); | |
358 | } | |
359 | ||
360 | ||
361 | /* | |
362 | * Swap target and return old value | |
363 | */ | |
364 | ||
365 | #define ATOMIC_SWAP(name, type) \ | |
366 | type atomic_swap_##name(volatile type *target, type bits) \ | |
367 | { \ | |
368 | type old; \ | |
369 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ | |
370 | old = *target; \ | |
371 | *target = bits; \ | |
372 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ | |
373 | return (old); \ | |
374 | } | |
375 | ||
376 | ATOMIC_SWAP(8, uint8_t) | |
377 | ATOMIC_SWAP(uchar, uchar_t) | |
378 | ATOMIC_SWAP(16, uint16_t) | |
379 | ATOMIC_SWAP(ushort, ushort_t) | |
380 | ATOMIC_SWAP(32, uint32_t) | |
381 | ATOMIC_SWAP(uint, uint_t) | |
382 | ATOMIC_SWAP(ulong, ulong_t) | |
383 | ATOMIC_SWAP(64, uint64_t) | |
384 | ||
385 | void * | |
386 | atomic_swap_ptr(volatile void *target, void *bits) | |
387 | { | |
388 | void *old; | |
389 | ||
390 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); | |
391 | old = *(void **)target; | |
392 | *(void **)target = bits; | |
393 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); | |
394 | ||
395 | return (old); | |
396 | } | |
397 | ||
398 | ||
399 | int | |
400 | atomic_set_long_excl(volatile ulong_t *target, uint_t value) | |
401 | { | |
402 | ulong_t bit; | |
403 | ||
404 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); | |
405 | bit = (1UL << value); | |
406 | if ((*target & bit) != 0) { | |
407 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); | |
408 | return (-1); | |
409 | } | |
410 | *target |= bit; | |
411 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); | |
412 | ||
413 | return (0); | |
414 | } | |
415 | ||
416 | int | |
417 | atomic_clear_long_excl(volatile ulong_t *target, uint_t value) | |
418 | { | |
419 | ulong_t bit; | |
420 | ||
421 | VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); | |
422 | bit = (1UL << value); | |
423 | if ((*target & bit) != 0) { | |
424 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); | |
425 | return (-1); | |
426 | } | |
427 | *target &= ~bit; | |
428 | VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); | |
429 | ||
430 | return (0); | |
431 | } | |
432 | ||
433 | void | |
434 | membar_enter(void) | |
435 | { | |
436 | /* XXX - Implement me */ | |
437 | } | |
438 | ||
439 | void | |
440 | membar_exit(void) | |
441 | { | |
442 | /* XXX - Implement me */ | |
443 | } | |
444 | ||
445 | void | |
446 | membar_producer(void) | |
447 | { | |
448 | /* XXX - Implement me */ | |
449 | } | |
450 | ||
451 | void | |
452 | membar_consumer(void) | |
453 | { | |
454 | /* XXX - Implement me */ | |
455 | } | |
456 | ||
457 | /* Legacy kernel interfaces; they will go away (eventually). */ | |
458 | ||
459 | uint8_t | |
460 | cas8(uint8_t *target, uint8_t arg1, uint8_t arg2) | |
461 | { | |
462 | return (atomic_cas_8(target, arg1, arg2)); | |
463 | } | |
464 | ||
465 | uint32_t | |
466 | cas32(uint32_t *target, uint32_t arg1, uint32_t arg2) | |
467 | { | |
468 | return (atomic_cas_32(target, arg1, arg2)); | |
469 | } | |
470 | ||
471 | uint64_t | |
472 | cas64(uint64_t *target, uint64_t arg1, uint64_t arg2) | |
473 | { | |
474 | return (atomic_cas_64(target, arg1, arg2)); | |
475 | } | |
476 | ||
477 | ulong_t | |
478 | caslong(ulong_t *target, ulong_t arg1, ulong_t arg2) | |
479 | { | |
480 | return (atomic_cas_ulong(target, arg1, arg2)); | |
481 | } | |
482 | ||
483 | void * | |
484 | casptr(void *target, void *arg1, void *arg2) | |
485 | { | |
486 | return (atomic_cas_ptr(target, arg1, arg2)); | |
487 | } | |
488 | ||
489 | void | |
490 | atomic_and_long(ulong_t *target, ulong_t bits) | |
491 | { | |
492 | return (atomic_and_ulong(target, bits)); | |
493 | } | |
494 | ||
495 | void | |
496 | atomic_or_long(ulong_t *target, ulong_t bits) | |
497 | { | |
498 | return (atomic_or_ulong(target, bits)); | |
499 | } |