]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/percpu_counter.h
mm/hotplug: invalid PFNs from pfn_to_online_page()
[mirror_ubuntu-bionic-kernel.git] / include / linux / percpu_counter.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_PERCPU_COUNTER_H
3#define _LINUX_PERCPU_COUNTER_H
4/*
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 *
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
8 */
9
1da177e4
LT
10#include <linux/spinlock.h>
11#include <linux/smp.h>
c67ad917 12#include <linux/list.h>
1da177e4
LT
13#include <linux/threads.h>
14#include <linux/percpu.h>
0216bfcf 15#include <linux/types.h>
908c7f19 16#include <linux/gfp.h>
1da177e4
LT
17
18#ifdef CONFIG_SMP
19
20struct percpu_counter {
f032a450 21 raw_spinlock_t lock;
0216bfcf 22 s64 count;
c67ad917
AM
23#ifdef CONFIG_HOTPLUG_CPU
24 struct list_head list; /* All percpu_counters are on a list */
25#endif
43cf38eb 26 s32 __percpu *counters;
1da177e4
LT
27};
28
179f7ebf 29extern int percpu_counter_batch;
1da177e4 30
908c7f19 31int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
ea319518
PZ
32 struct lock_class_key *key);
33
908c7f19 34#define percpu_counter_init(fbc, value, gfp) \
ea319518
PZ
35 ({ \
36 static struct lock_class_key __key; \
37 \
908c7f19 38 __percpu_counter_init(fbc, value, gfp, &__key); \
ea319518
PZ
39 })
40
c67ad917 41void percpu_counter_destroy(struct percpu_counter *fbc);
3a587f47 42void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
104b4e51
NB
43void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
44 s32 batch);
02d21168 45s64 __percpu_counter_sum(struct percpu_counter *fbc);
80188b0d
DC
46int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
47
48static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
49{
50 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
51}
1da177e4 52
20e89767 53static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
252e0ba6 54{
104b4e51 55 percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
252e0ba6
PZ
56}
57
bf1d89c8
PZ
58static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
59{
02d21168 60 s64 ret = __percpu_counter_sum(fbc);
bf1d89c8
PZ
61 return ret < 0 ? 0 : ret;
62}
63
64static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
65{
02d21168 66 return __percpu_counter_sum(fbc);
bf1d89c8
PZ
67}
68
0216bfcf 69static inline s64 percpu_counter_read(struct percpu_counter *fbc)
1da177e4
LT
70{
71 return fbc->count;
72}
73
74/*
75 * It is possible for the percpu_counter_read() to return a small negative
76 * number for some counter which should never be negative.
0216bfcf 77 *
1da177e4 78 */
0216bfcf 79static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
1da177e4 80{
0216bfcf 81 s64 ret = fbc->count;
1da177e4
LT
82
83 barrier(); /* Prevent reloads of fbc->count */
0216bfcf 84 if (ret >= 0)
1da177e4 85 return ret;
c84598bb 86 return 0;
1da177e4
LT
87}
88
7f93cff9
TT
89static inline int percpu_counter_initialized(struct percpu_counter *fbc)
90{
91 return (fbc->counters != NULL);
92}
93
7fa4cf92 94#else /* !CONFIG_SMP */
1da177e4
LT
95
96struct percpu_counter {
0216bfcf 97 s64 count;
1da177e4
LT
98};
99
908c7f19
TH
100static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
101 gfp_t gfp)
1da177e4 102{
0216bfcf 103 fbc->count = amount;
833f4077 104 return 0;
1da177e4
LT
105}
106
107static inline void percpu_counter_destroy(struct percpu_counter *fbc)
108{
109}
110
3a587f47
PZ
111static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
112{
113 fbc->count = amount;
114}
115
27f5e0f6
TC
116static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
117{
118 if (fbc->count > rhs)
119 return 1;
120 else if (fbc->count < rhs)
121 return -1;
122 else
123 return 0;
124}
125
80188b0d
DC
126static inline int
127__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
128{
129 return percpu_counter_compare(fbc, rhs);
130}
131
1da177e4 132static inline void
20e89767 133percpu_counter_add(struct percpu_counter *fbc, s64 amount)
1da177e4
LT
134{
135 preempt_disable();
136 fbc->count += amount;
137 preempt_enable();
138}
139
0c9cf2ef 140static inline void
104b4e51 141percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
0c9cf2ef
AB
142{
143 percpu_counter_add(fbc, amount);
144}
145
0216bfcf 146static inline s64 percpu_counter_read(struct percpu_counter *fbc)
1da177e4
LT
147{
148 return fbc->count;
149}
150
c84598bb
SL
151/*
152 * percpu_counter is intended to track positive numbers. In the UP case the
153 * number should never be negative.
154 */
0216bfcf 155static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
1da177e4
LT
156{
157 return fbc->count;
158}
159
52d9f3b4 160static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
e2bab3d9
AM
161{
162 return percpu_counter_read_positive(fbc);
163}
164
bf1d89c8
PZ
165static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
166{
167 return percpu_counter_read(fbc);
168}
169
7f93cff9
TT
170static inline int percpu_counter_initialized(struct percpu_counter *fbc)
171{
172 return 1;
173}
174
1da177e4
LT
175#endif /* CONFIG_SMP */
176
177static inline void percpu_counter_inc(struct percpu_counter *fbc)
178{
aa0dff2d 179 percpu_counter_add(fbc, 1);
1da177e4
LT
180}
181
182static inline void percpu_counter_dec(struct percpu_counter *fbc)
183{
aa0dff2d 184 percpu_counter_add(fbc, -1);
1da177e4
LT
185}
186
3cb4f9fa
PZ
187static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
188{
189 percpu_counter_add(fbc, -amount);
190}
191
1da177e4 192#endif /* _LINUX_PERCPU_COUNTER_H */