2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 #include <linux/types.h>
13 #include <linux/cache.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/atomic.h>
20 * We use a hashed array of spinlocks to provide exclusive access
21 * to each atomic64_t variable. Since this is expected to used on
22 * systems with small numbers of CPUs (<= 4 or so), we use a
23 * relatively small array of 16 spinlocks to avoid wasting too much
24 * memory on the spinlock array.
29 * Ensure each lock is in a separate cacheline.
33 char pad
[L1_CACHE_BYTES
];
34 } atomic64_lock
[NR_LOCKS
] __cacheline_aligned_in_smp
;
36 static inline raw_spinlock_t
*lock_addr(const atomic64_t
*v
)
38 unsigned long addr
= (unsigned long) v
;
40 addr
>>= L1_CACHE_SHIFT
;
41 addr
^= (addr
>> 8) ^ (addr
>> 16);
42 return &atomic64_lock
[addr
& (NR_LOCKS
- 1)].lock
;
45 long long atomic64_read(const atomic64_t
*v
)
48 raw_spinlock_t
*lock
= lock_addr(v
);
51 raw_spin_lock_irqsave(lock
, flags
);
53 raw_spin_unlock_irqrestore(lock
, flags
);
56 EXPORT_SYMBOL(atomic64_read
);
58 void atomic64_set(atomic64_t
*v
, long long i
)
61 raw_spinlock_t
*lock
= lock_addr(v
);
63 raw_spin_lock_irqsave(lock
, flags
);
65 raw_spin_unlock_irqrestore(lock
, flags
);
67 EXPORT_SYMBOL(atomic64_set
);
69 void atomic64_add(long long a
, atomic64_t
*v
)
72 raw_spinlock_t
*lock
= lock_addr(v
);
74 raw_spin_lock_irqsave(lock
, flags
);
76 raw_spin_unlock_irqrestore(lock
, flags
);
78 EXPORT_SYMBOL(atomic64_add
);
80 long long atomic64_add_return(long long a
, atomic64_t
*v
)
83 raw_spinlock_t
*lock
= lock_addr(v
);
86 raw_spin_lock_irqsave(lock
, flags
);
87 val
= v
->counter
+= a
;
88 raw_spin_unlock_irqrestore(lock
, flags
);
91 EXPORT_SYMBOL(atomic64_add_return
);
93 void atomic64_sub(long long a
, atomic64_t
*v
)
96 raw_spinlock_t
*lock
= lock_addr(v
);
98 raw_spin_lock_irqsave(lock
, flags
);
100 raw_spin_unlock_irqrestore(lock
, flags
);
102 EXPORT_SYMBOL(atomic64_sub
);
104 long long atomic64_sub_return(long long a
, atomic64_t
*v
)
107 raw_spinlock_t
*lock
= lock_addr(v
);
110 raw_spin_lock_irqsave(lock
, flags
);
111 val
= v
->counter
-= a
;
112 raw_spin_unlock_irqrestore(lock
, flags
);
115 EXPORT_SYMBOL(atomic64_sub_return
);
117 long long atomic64_dec_if_positive(atomic64_t
*v
)
120 raw_spinlock_t
*lock
= lock_addr(v
);
123 raw_spin_lock_irqsave(lock
, flags
);
124 val
= v
->counter
- 1;
127 raw_spin_unlock_irqrestore(lock
, flags
);
130 EXPORT_SYMBOL(atomic64_dec_if_positive
);
132 long long atomic64_cmpxchg(atomic64_t
*v
, long long o
, long long n
)
135 raw_spinlock_t
*lock
= lock_addr(v
);
138 raw_spin_lock_irqsave(lock
, flags
);
142 raw_spin_unlock_irqrestore(lock
, flags
);
145 EXPORT_SYMBOL(atomic64_cmpxchg
);
147 long long atomic64_xchg(atomic64_t
*v
, long long new)
150 raw_spinlock_t
*lock
= lock_addr(v
);
153 raw_spin_lock_irqsave(lock
, flags
);
156 raw_spin_unlock_irqrestore(lock
, flags
);
159 EXPORT_SYMBOL(atomic64_xchg
);
161 int atomic64_add_unless(atomic64_t
*v
, long long a
, long long u
)
164 raw_spinlock_t
*lock
= lock_addr(v
);
167 raw_spin_lock_irqsave(lock
, flags
);
168 if (v
->counter
!= u
) {
172 raw_spin_unlock_irqrestore(lock
, flags
);
175 EXPORT_SYMBOL(atomic64_add_unless
);
177 static int init_atomic64_lock(void)
181 for (i
= 0; i
< NR_LOCKS
; ++i
)
182 raw_spin_lock_init(&atomic64_lock
[i
].lock
);
186 pure_initcall(init_atomic64_lock
);