]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arc/include/asm/atomic.h
Merge remote-tracking branches 'spi/topic/atmel', 'spi/topic/cadence', 'spi/topic...
[mirror_ubuntu-artful-kernel.git] / arch / arc / include / asm / atomic.h
CommitLineData
14e968ba
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
14e968ba
VG
12#ifndef __ASSEMBLY__
13
14#include <linux/types.h>
15#include <linux/compiler.h>
16#include <asm/cmpxchg.h>
17#include <asm/barrier.h>
18#include <asm/smp.h>
19
20#define atomic_read(v) ((v)->counter)
21
22#ifdef CONFIG_ARC_HAS_LLSC
23
24#define atomic_set(v, i) (((v)->counter) = (i))
25
f7d11e93
PZ
26#define ATOMIC_OP(op, c_op, asm_op) \
27static inline void atomic_##op(int i, atomic_t *v) \
28{ \
29 unsigned int temp; \
30 \
31 __asm__ __volatile__( \
32 "1: llock %0, [%1] \n" \
33 " " #asm_op " %0, %0, %2 \n" \
34 " scond %0, [%1] \n" \
35 " bnz 1b \n" \
36 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
37 : "r"(&v->counter), "ir"(i) \
38 : "cc"); \
39} \
40
41#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
42static inline int atomic_##op##_return(int i, atomic_t *v) \
43{ \
44 unsigned int temp; \
45 \
46 __asm__ __volatile__( \
47 "1: llock %0, [%1] \n" \
48 " " #asm_op " %0, %0, %2 \n" \
49 " scond %0, [%1] \n" \
50 " bnz 1b \n" \
51 : "=&r"(temp) \
52 : "r"(&v->counter), "ir"(i) \
53 : "cc"); \
54 \
55 return temp; \
14e968ba
VG
56}
57
58#else /* !CONFIG_ARC_HAS_LLSC */
59
60#ifndef CONFIG_SMP
61
62 /* violating atomic_xxx API locking protocol in UP for optimization sake */
63#define atomic_set(v, i) (((v)->counter) = (i))
64
65#else
66
67static inline void atomic_set(atomic_t *v, int i)
68{
69 /*
70 * Independent of hardware support, all of the atomic_xxx() APIs need
71 * to follow the same locking rules to make sure that a "hardware"
72 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
73 * sequence
74 *
75 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
76 * requires the locking.
77 */
78 unsigned long flags;
79
80 atomic_ops_lock(flags);
81 v->counter = i;
82 atomic_ops_unlock(flags);
83}
f7d11e93 84
14e968ba
VG
85#endif
86
87/*
88 * Non hardware assisted Atomic-R-M-W
89 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
90 */
91
f7d11e93
PZ
92#define ATOMIC_OP(op, c_op, asm_op) \
93static inline void atomic_##op(int i, atomic_t *v) \
94{ \
95 unsigned long flags; \
96 \
97 atomic_ops_lock(flags); \
98 v->counter c_op i; \
99 atomic_ops_unlock(flags); \
14e968ba
VG
100}
101
f7d11e93
PZ
102#define ATOMIC_OP_RETURN(op, c_op) \
103static inline int atomic_##op##_return(int i, atomic_t *v) \
104{ \
105 unsigned long flags; \
106 unsigned long temp; \
107 \
108 atomic_ops_lock(flags); \
109 temp = v->counter; \
110 temp c_op i; \
111 v->counter = temp; \
112 atomic_ops_unlock(flags); \
113 \
114 return temp; \
14e968ba
VG
115}
116
f7d11e93 117#endif /* !CONFIG_ARC_HAS_LLSC */
14e968ba 118
f7d11e93
PZ
119#define ATOMIC_OPS(op, c_op, asm_op) \
120 ATOMIC_OP(op, c_op, asm_op) \
121 ATOMIC_OP_RETURN(op, c_op, asm_op)
14e968ba 122
f7d11e93
PZ
123ATOMIC_OPS(add, +=, add)
124ATOMIC_OPS(sub, -=, sub)
125ATOMIC_OP(and, &=, and)
14e968ba 126
f7d11e93 127#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
14e968ba 128
f7d11e93
PZ
129#undef ATOMIC_OPS
130#undef ATOMIC_OP_RETURN
131#undef ATOMIC_OP
14e968ba
VG
132
133/**
134 * __atomic_add_unless - add unless the number is a given value
135 * @v: pointer of type atomic_t
136 * @a: the amount to add to v...
137 * @u: ...unless v is equal to u.
138 *
139 * Atomically adds @a to @v, so long as it was not @u.
140 * Returns the old value of @v
141 */
142#define __atomic_add_unless(v, a, u) \
143({ \
144 int c, old; \
145 c = atomic_read(v); \
146 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
147 c = old; \
148 c; \
149})
150
151#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
152
153#define atomic_inc(v) atomic_add(1, v)
154#define atomic_dec(v) atomic_sub(1, v)
155
156#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
157#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
158#define atomic_inc_return(v) atomic_add_return(1, (v))
159#define atomic_dec_return(v) atomic_sub_return(1, (v))
160#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
161
162#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
163
164#define ATOMIC_INIT(i) { (i) }
165
166#include <asm-generic/atomic64.h>
167
168#endif
169
170#endif