2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 #include <linux/types.h>
13 #include <linux/cache.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
17 #include <linux/atomic.h>
20 * We use a hashed array of spinlocks to provide exclusive access
21 * to each atomic64_t variable. Since this is expected to used on
22 * systems with small numbers of CPUs (<= 4 or so), we use a
23 * relatively small array of 16 spinlocks to avoid wasting too much
24 * memory on the spinlock array.
29 * Ensure each lock is in a separate cacheline.
33 char pad
[L1_CACHE_BYTES
];
34 } atomic64_lock
[NR_LOCKS
] __cacheline_aligned_in_smp
= {
35 [0 ... (NR_LOCKS
- 1)] = {
36 .lock
= __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock
.lock
),
40 static inline raw_spinlock_t
*lock_addr(const atomic64_t
*v
)
42 unsigned long addr
= (unsigned long) v
;
44 addr
>>= L1_CACHE_SHIFT
;
45 addr
^= (addr
>> 8) ^ (addr
>> 16);
46 return &atomic64_lock
[addr
& (NR_LOCKS
- 1)].lock
;
49 long long atomic64_read(const atomic64_t
*v
)
52 raw_spinlock_t
*lock
= lock_addr(v
);
55 raw_spin_lock_irqsave(lock
, flags
);
57 raw_spin_unlock_irqrestore(lock
, flags
);
60 EXPORT_SYMBOL(atomic64_read
);
62 void atomic64_set(atomic64_t
*v
, long long i
)
65 raw_spinlock_t
*lock
= lock_addr(v
);
67 raw_spin_lock_irqsave(lock
, flags
);
69 raw_spin_unlock_irqrestore(lock
, flags
);
71 EXPORT_SYMBOL(atomic64_set
);
73 #define ATOMIC64_OP(op, c_op) \
74 void atomic64_##op(long long a, atomic64_t *v) \
76 unsigned long flags; \
77 raw_spinlock_t *lock = lock_addr(v); \
79 raw_spin_lock_irqsave(lock, flags); \
81 raw_spin_unlock_irqrestore(lock, flags); \
83 EXPORT_SYMBOL(atomic64_##op);
85 #define ATOMIC64_OP_RETURN(op, c_op) \
86 long long atomic64_##op##_return(long long a, atomic64_t *v) \
88 unsigned long flags; \
89 raw_spinlock_t *lock = lock_addr(v); \
92 raw_spin_lock_irqsave(lock, flags); \
93 val = (v->counter c_op a); \
94 raw_spin_unlock_irqrestore(lock, flags); \
97 EXPORT_SYMBOL(atomic64_##op##_return);
99 #define ATOMIC64_OPS(op, c_op) \
100 ATOMIC64_OP(op, c_op) \
101 ATOMIC64_OP_RETURN(op, c_op)
103 ATOMIC64_OPS(add
, +=)
104 ATOMIC64_OPS(sub
, -=)
110 #undef ATOMIC64_OP_RETURN
113 long long atomic64_dec_if_positive(atomic64_t
*v
)
116 raw_spinlock_t
*lock
= lock_addr(v
);
119 raw_spin_lock_irqsave(lock
, flags
);
120 val
= v
->counter
- 1;
123 raw_spin_unlock_irqrestore(lock
, flags
);
126 EXPORT_SYMBOL(atomic64_dec_if_positive
);
128 long long atomic64_cmpxchg(atomic64_t
*v
, long long o
, long long n
)
131 raw_spinlock_t
*lock
= lock_addr(v
);
134 raw_spin_lock_irqsave(lock
, flags
);
138 raw_spin_unlock_irqrestore(lock
, flags
);
141 EXPORT_SYMBOL(atomic64_cmpxchg
);
143 long long atomic64_xchg(atomic64_t
*v
, long long new)
146 raw_spinlock_t
*lock
= lock_addr(v
);
149 raw_spin_lock_irqsave(lock
, flags
);
152 raw_spin_unlock_irqrestore(lock
, flags
);
155 EXPORT_SYMBOL(atomic64_xchg
);
157 int atomic64_add_unless(atomic64_t
*v
, long long a
, long long u
)
160 raw_spinlock_t
*lock
= lock_addr(v
);
163 raw_spin_lock_irqsave(lock
, flags
);
164 if (v
->counter
!= u
) {
168 raw_spin_unlock_irqrestore(lock
, flags
);
171 EXPORT_SYMBOL(atomic64_add_unless
);