]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/percpu_counter.h
fs: prevent speculative execution
[mirror_ubuntu-artful-kernel.git] / include / linux / percpu_counter.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_PERCPU_COUNTER_H
2#define _LINUX_PERCPU_COUNTER_H
3/*
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5 *
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
7 */
8
1da177e4
LT
9#include <linux/spinlock.h>
10#include <linux/smp.h>
c67ad917 11#include <linux/list.h>
1da177e4
LT
12#include <linux/threads.h>
13#include <linux/percpu.h>
0216bfcf 14#include <linux/types.h>
908c7f19 15#include <linux/gfp.h>
1da177e4
LT
16
17#ifdef CONFIG_SMP
18
19struct percpu_counter {
f032a450 20 raw_spinlock_t lock;
0216bfcf 21 s64 count;
c67ad917
AM
22#ifdef CONFIG_HOTPLUG_CPU
23 struct list_head list; /* All percpu_counters are on a list */
24#endif
43cf38eb 25 s32 __percpu *counters;
1da177e4
LT
26};
27
179f7ebf 28extern int percpu_counter_batch;
1da177e4 29
908c7f19 30int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
ea319518
PZ
31 struct lock_class_key *key);
32
908c7f19 33#define percpu_counter_init(fbc, value, gfp) \
ea319518
PZ
34 ({ \
35 static struct lock_class_key __key; \
36 \
908c7f19 37 __percpu_counter_init(fbc, value, gfp, &__key); \
ea319518
PZ
38 })
39
c67ad917 40void percpu_counter_destroy(struct percpu_counter *fbc);
3a587f47 41void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
104b4e51
NB
42void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
43 s32 batch);
02d21168 44s64 __percpu_counter_sum(struct percpu_counter *fbc);
80188b0d
DC
45int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
46
47static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
48{
49 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
50}
1da177e4 51
20e89767 52static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
252e0ba6 53{
104b4e51 54 percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
252e0ba6
PZ
55}
56
bf1d89c8
PZ
57static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
58{
02d21168 59 s64 ret = __percpu_counter_sum(fbc);
bf1d89c8
PZ
60 return ret < 0 ? 0 : ret;
61}
62
63static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
64{
02d21168 65 return __percpu_counter_sum(fbc);
bf1d89c8
PZ
66}
67
0216bfcf 68static inline s64 percpu_counter_read(struct percpu_counter *fbc)
1da177e4
LT
69{
70 return fbc->count;
71}
72
73/*
74 * It is possible for the percpu_counter_read() to return a small negative
75 * number for some counter which should never be negative.
0216bfcf 76 *
1da177e4 77 */
0216bfcf 78static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
1da177e4 79{
0216bfcf 80 s64 ret = fbc->count;
1da177e4
LT
81
82 barrier(); /* Prevent reloads of fbc->count */
0216bfcf 83 if (ret >= 0)
1da177e4 84 return ret;
c84598bb 85 return 0;
1da177e4
LT
86}
87
7f93cff9
TT
88static inline int percpu_counter_initialized(struct percpu_counter *fbc)
89{
90 return (fbc->counters != NULL);
91}
92
7fa4cf92 93#else /* !CONFIG_SMP */
1da177e4
LT
94
95struct percpu_counter {
0216bfcf 96 s64 count;
1da177e4
LT
97};
98
908c7f19
TH
99static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
100 gfp_t gfp)
1da177e4 101{
0216bfcf 102 fbc->count = amount;
833f4077 103 return 0;
1da177e4
LT
104}
105
106static inline void percpu_counter_destroy(struct percpu_counter *fbc)
107{
108}
109
3a587f47
PZ
110static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
111{
112 fbc->count = amount;
113}
114
27f5e0f6
TC
115static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
116{
117 if (fbc->count > rhs)
118 return 1;
119 else if (fbc->count < rhs)
120 return -1;
121 else
122 return 0;
123}
124
80188b0d
DC
125static inline int
126__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
127{
128 return percpu_counter_compare(fbc, rhs);
129}
130
1da177e4 131static inline void
20e89767 132percpu_counter_add(struct percpu_counter *fbc, s64 amount)
1da177e4
LT
133{
134 preempt_disable();
135 fbc->count += amount;
136 preempt_enable();
137}
138
0c9cf2ef 139static inline void
104b4e51 140percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
0c9cf2ef
AB
141{
142 percpu_counter_add(fbc, amount);
143}
144
0216bfcf 145static inline s64 percpu_counter_read(struct percpu_counter *fbc)
1da177e4
LT
146{
147 return fbc->count;
148}
149
c84598bb
SL
150/*
151 * percpu_counter is intended to track positive numbers. In the UP case the
152 * number should never be negative.
153 */
0216bfcf 154static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
1da177e4
LT
155{
156 return fbc->count;
157}
158
52d9f3b4 159static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
e2bab3d9
AM
160{
161 return percpu_counter_read_positive(fbc);
162}
163
bf1d89c8
PZ
164static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
165{
166 return percpu_counter_read(fbc);
167}
168
7f93cff9
TT
169static inline int percpu_counter_initialized(struct percpu_counter *fbc)
170{
171 return 1;
172}
173
1da177e4
LT
174#endif /* CONFIG_SMP */
175
176static inline void percpu_counter_inc(struct percpu_counter *fbc)
177{
aa0dff2d 178 percpu_counter_add(fbc, 1);
1da177e4
LT
179}
180
181static inline void percpu_counter_dec(struct percpu_counter *fbc)
182{
aa0dff2d 183 percpu_counter_add(fbc, -1);
1da177e4
LT
184}
185
3cb4f9fa
PZ
186static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
187{
188 percpu_counter_add(fbc, -amount);
189}
190
1da177e4 191#endif /* _LINUX_PERCPU_COUNTER_H */