]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_PERCPU_COUNTER_H |
2 | #define _LINUX_PERCPU_COUNTER_H | |
3 | /* | |
4 | * A simple "approximate counter" for use in ext2 and ext3 superblocks. | |
5 | * | |
6 | * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. | |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/spinlock.h> |
10 | #include <linux/smp.h> | |
c67ad917 | 11 | #include <linux/list.h> |
1da177e4 LT |
12 | #include <linux/threads.h> |
13 | #include <linux/percpu.h> | |
0216bfcf | 14 | #include <linux/types.h> |
1da177e4 LT |
15 | |
16 | #ifdef CONFIG_SMP | |
17 | ||
18 | struct percpu_counter { | |
19 | spinlock_t lock; | |
0216bfcf | 20 | s64 count; |
c67ad917 AM |
21 | #ifdef CONFIG_HOTPLUG_CPU |
22 | struct list_head list; /* All percpu_counters are on a list */ | |
23 | #endif | |
0216bfcf | 24 | s32 *counters; |
1da177e4 LT |
25 | }; |
26 | ||
27 | #if NR_CPUS >= 16 | |
28 | #define FBC_BATCH (NR_CPUS*2) | |
29 | #else | |
30 | #define FBC_BATCH (NR_CPUS*4) | |
31 | #endif | |
32 | ||
833f4077 | 33 | int percpu_counter_init(struct percpu_counter *fbc, s64 amount); |
dc62a30e | 34 | int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount); |
c67ad917 | 35 | void percpu_counter_destroy(struct percpu_counter *fbc); |
3a587f47 | 36 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); |
20e89767 | 37 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); |
e8ced39d | 38 | s64 __percpu_counter_sum(struct percpu_counter *fbc, int set); |
1da177e4 | 39 | |
20e89767 | 40 | static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
252e0ba6 PZ |
41 | { |
42 | __percpu_counter_add(fbc, amount, FBC_BATCH); | |
43 | } | |
44 | ||
bf1d89c8 PZ |
45 | static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) |
46 | { | |
e8ced39d | 47 | s64 ret = __percpu_counter_sum(fbc, 0); |
bf1d89c8 PZ |
48 | return ret < 0 ? 0 : ret; |
49 | } | |
50 | ||
e8ced39d MC |
51 | static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc) |
52 | { | |
53 | return __percpu_counter_sum(fbc, 1); | |
54 | } | |
55 | ||
56 | ||
bf1d89c8 PZ |
57 | static inline s64 percpu_counter_sum(struct percpu_counter *fbc) |
58 | { | |
e8ced39d | 59 | return __percpu_counter_sum(fbc, 0); |
bf1d89c8 PZ |
60 | } |
61 | ||
0216bfcf | 62 | static inline s64 percpu_counter_read(struct percpu_counter *fbc) |
1da177e4 LT |
63 | { |
64 | return fbc->count; | |
65 | } | |
66 | ||
67 | /* | |
68 | * It is possible for the percpu_counter_read() to return a small negative | |
69 | * number for some counter which should never be negative. | |
0216bfcf | 70 | * |
1da177e4 | 71 | */ |
0216bfcf | 72 | static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) |
1da177e4 | 73 | { |
0216bfcf | 74 | s64 ret = fbc->count; |
1da177e4 LT |
75 | |
76 | barrier(); /* Prevent reloads of fbc->count */ | |
0216bfcf | 77 | if (ret >= 0) |
1da177e4 LT |
78 | return ret; |
79 | return 1; | |
80 | } | |
81 | ||
82 | #else | |
83 | ||
84 | struct percpu_counter { | |
0216bfcf | 85 | s64 count; |
1da177e4 LT |
86 | }; |
87 | ||
833f4077 | 88 | static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) |
1da177e4 | 89 | { |
0216bfcf | 90 | fbc->count = amount; |
833f4077 | 91 | return 0; |
1da177e4 LT |
92 | } |
93 | ||
dc62a30e PZ |
94 | #define percpu_counter_init_irq percpu_counter_init |
95 | ||
1da177e4 LT |
96 | static inline void percpu_counter_destroy(struct percpu_counter *fbc) |
97 | { | |
98 | } | |
99 | ||
3a587f47 PZ |
100 | static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) |
101 | { | |
102 | fbc->count = amount; | |
103 | } | |
104 | ||
252e0ba6 PZ |
105 | #define __percpu_counter_add(fbc, amount, batch) \ |
106 | percpu_counter_add(fbc, amount) | |
107 | ||
1da177e4 | 108 | static inline void |
20e89767 | 109 | percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
1da177e4 LT |
110 | { |
111 | preempt_disable(); | |
112 | fbc->count += amount; | |
113 | preempt_enable(); | |
114 | } | |
115 | ||
0216bfcf | 116 | static inline s64 percpu_counter_read(struct percpu_counter *fbc) |
1da177e4 LT |
117 | { |
118 | return fbc->count; | |
119 | } | |
120 | ||
0216bfcf | 121 | static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) |
1da177e4 LT |
122 | { |
123 | return fbc->count; | |
124 | } | |
125 | ||
52d9f3b4 | 126 | static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) |
e2bab3d9 AM |
127 | { |
128 | return percpu_counter_read_positive(fbc); | |
129 | } | |
130 | ||
bf1d89c8 PZ |
131 | static inline s64 percpu_counter_sum(struct percpu_counter *fbc) |
132 | { | |
133 | return percpu_counter_read(fbc); | |
134 | } | |
135 | ||
1da177e4 LT |
136 | #endif /* CONFIG_SMP */ |
137 | ||
138 | static inline void percpu_counter_inc(struct percpu_counter *fbc) | |
139 | { | |
aa0dff2d | 140 | percpu_counter_add(fbc, 1); |
1da177e4 LT |
141 | } |
142 | ||
143 | static inline void percpu_counter_dec(struct percpu_counter *fbc) | |
144 | { | |
aa0dff2d | 145 | percpu_counter_add(fbc, -1); |
1da177e4 LT |
146 | } |
147 | ||
3cb4f9fa PZ |
148 | static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) |
149 | { | |
150 | percpu_counter_add(fbc, -amount); | |
151 | } | |
152 | ||
1da177e4 | 153 | #endif /* _LINUX_PERCPU_COUNTER_H */ |