]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
16b8a476 ED |
2 | #ifndef _LINUX_U64_STATS_SYNC_H |
3 | #define _LINUX_U64_STATS_SYNC_H | |
4 | ||
5 | /* | |
6501bf87 AD |
6 | * Protect against 64-bit values tearing on 32-bit architectures. This is |
7 | * typically used for statistics read/update in different subsystems. | |
16b8a476 ED |
8 | * |
9 | * Key points : | |
6501bf87 AD |
10 | * |
11 | * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP. | |
12 | * - The whole thing is a no-op on 64-bit architectures. | |
13 | * | |
14 | * Usage constraints: | |
15 | * | |
16 | * 1) Write side must ensure mutual exclusion, or one seqcount update could | |
16b8a476 | 17 | * be lost, thus blocking readers forever. |
16b8a476 | 18 | * |
6501bf87 AD |
19 | * 2) Write side must disable preemption, or a seqcount reader can preempt the |
20 | * writer and also spin forever. | |
21 | * | |
22 | * 3) Write side must use the _irqsave() variant if other writers, or a reader, | |
23 | * can be invoked from an IRQ context. | |
16b8a476 | 24 | * |
6501bf87 AD |
25 | * 4) If reader fetches several counters, there is no guarantee the whole values |
26 | * are consistent w.r.t. each other (remember point #2: seqcounts are not | |
27 | * used for 64bit architectures). | |
16b8a476 | 28 | * |
6501bf87 AD |
29 | * 5) Readers are allowed to sleep or be preempted/interrupted: they perform |
30 | * pure reads. | |
b6b3ecc7 | 31 | * |
6501bf87 AD |
32 | * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats |
33 | * might be updated from a hardirq or softirq context (remember point #1: | |
34 | * seqcounts are not used for UP kernels). 32-bit UP stat readers could read | |
35 | * corrupted 64-bit values otherwise. | |
33d91f00 | 36 | * |
16b8a476 ED |
37 | * Usage : |
38 | * | |
39 | * Stats producer (writer) should use following template granted it already got | |
40 | * an exclusive access to counters (a lock is already taken, or per cpu | |
41 | * data is used [in a non preemptable context]) | |
42 | * | |
43 | * spin_lock_bh(...) or other synchronization to get exclusive access | |
44 | * ... | |
45 | * u64_stats_update_begin(&stats->syncp); | |
316580b6 ED |
46 | * u64_stats_add(&stats->bytes64, len); // non atomic operation |
47 | * u64_stats_inc(&stats->packets64); // non atomic operation | |
16b8a476 ED |
48 | * u64_stats_update_end(&stats->syncp); |
49 | * | |
50 | * While a consumer (reader) should use following template to get consistent | |
51 | * snapshot for each variable (but no guarantee on several ones) | |
52 | * | |
53 | * u64 tbytes, tpackets; | |
54 | * unsigned int start; | |
55 | * | |
56 | * do { | |
57 | * start = u64_stats_fetch_begin(&stats->syncp); | |
316580b6 ED |
58 | * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation |
59 | * tpackets = u64_stats_read(&stats->packets64); // non atomic operation | |
b6b3ecc7 | 60 | * } while (u64_stats_fetch_retry(&stats->syncp, start)); |
16b8a476 ED |
61 | * |
62 | * | |
63 | * Example of use in drivers/net/loopback.c, using per_cpu containers, | |
64 | * in BH disabled context. | |
65 | */ | |
66 | #include <linux/seqlock.h> | |
67 | ||
16b8a476 | 68 | struct u64_stats_sync { |
33d91f00 | 69 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 70 | seqcount_t seq; |
33d91f00 | 71 | #endif |
16b8a476 ED |
72 | }; |
73 | ||
316580b6 ED |
74 | #if BITS_PER_LONG == 64 |
75 | #include <asm/local64.h> | |
76 | ||
77 | typedef struct { | |
78 | local64_t v; | |
79 | } u64_stats_t ; | |
80 | ||
81 | static inline u64 u64_stats_read(const u64_stats_t *p) | |
82 | { | |
83 | return local64_read(&p->v); | |
84 | } | |
85 | ||
86 | static inline void u64_stats_add(u64_stats_t *p, unsigned long val) | |
87 | { | |
88 | local64_add(val, &p->v); | |
89 | } | |
90 | ||
91 | static inline void u64_stats_inc(u64_stats_t *p) | |
92 | { | |
93 | local64_inc(&p->v); | |
94 | } | |
95 | ||
96 | #else | |
97 | ||
98 | typedef struct { | |
99 | u64 v; | |
100 | } u64_stats_t; | |
101 | ||
102 | static inline u64 u64_stats_read(const u64_stats_t *p) | |
103 | { | |
104 | return p->v; | |
105 | } | |
106 | ||
107 | static inline void u64_stats_add(u64_stats_t *p, unsigned long val) | |
108 | { | |
109 | p->v += val; | |
110 | } | |
111 | ||
112 | static inline void u64_stats_inc(u64_stats_t *p) | |
113 | { | |
114 | p->v++; | |
115 | } | |
116 | #endif | |
827da44c | 117 | |
d5b0e067 PZ |
118 | #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) |
119 | #define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) | |
120 | #else | |
9464ca65 ED |
121 | static inline void u64_stats_init(struct u64_stats_sync *syncp) |
122 | { | |
9464ca65 | 123 | } |
d5b0e067 | 124 | #endif |
827da44c | 125 | |
fa9f90be | 126 | static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) |
16b8a476 | 127 | { |
33d91f00 | 128 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 129 | write_seqcount_begin(&syncp->seq); |
33d91f00 | 130 | #endif |
16b8a476 ED |
131 | } |
132 | ||
fa9f90be | 133 | static inline void u64_stats_update_end(struct u64_stats_sync *syncp) |
16b8a476 | 134 | { |
33d91f00 | 135 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 136 | write_seqcount_end(&syncp->seq); |
33d91f00 | 137 | #endif |
16b8a476 ED |
138 | } |
139 | ||
2695578b ED |
140 | static inline unsigned long |
141 | u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) | |
142 | { | |
143 | unsigned long flags = 0; | |
144 | ||
145 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | |
146 | local_irq_save(flags); | |
147 | write_seqcount_begin(&syncp->seq); | |
148 | #endif | |
149 | return flags; | |
150 | } | |
151 | ||
152 | static inline void | |
153 | u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, | |
154 | unsigned long flags) | |
155 | { | |
156 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | |
157 | write_seqcount_end(&syncp->seq); | |
158 | local_irq_restore(flags); | |
159 | #endif | |
160 | } | |
161 | ||
68107df5 | 162 | static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) |
16b8a476 | 163 | { |
33d91f00 | 164 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 165 | return read_seqcount_begin(&syncp->seq); |
33d91f00 | 166 | #else |
33d91f00 ED |
167 | return 0; |
168 | #endif | |
16b8a476 ED |
169 | } |
170 | ||
68107df5 FW |
171 | static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) |
172 | { | |
173 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) | |
174 | preempt_disable(); | |
175 | #endif | |
176 | return __u64_stats_fetch_begin(syncp); | |
177 | } | |
178 | ||
179 | static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, | |
16b8a476 ED |
180 | unsigned int start) |
181 | { | |
33d91f00 | 182 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 183 | return read_seqcount_retry(&syncp->seq, start); |
16b8a476 | 184 | #else |
33d91f00 ED |
185 | return false; |
186 | #endif | |
16b8a476 ED |
187 | } |
188 | ||
68107df5 FW |
189 | static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, |
190 | unsigned int start) | |
191 | { | |
192 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) | |
193 | preempt_enable(); | |
194 | #endif | |
195 | return __u64_stats_fetch_retry(syncp, start); | |
196 | } | |
197 | ||
33d91f00 | 198 | /* |
57a7744e | 199 | * In case irq handlers can update u64 counters, readers can use following helpers |
33d91f00 | 200 | * - SMP 32bit arches use seqcount protection, irq safe. |
57a7744e | 201 | * - UP 32bit must disable irqs. |
33d91f00 ED |
202 | * - 64bit have no problem atomically reading u64 values, irq safe. |
203 | */ | |
57a7744e | 204 | static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) |
16b8a476 | 205 | { |
68107df5 | 206 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
57a7744e | 207 | local_irq_disable(); |
33d91f00 | 208 | #endif |
68107df5 | 209 | return __u64_stats_fetch_begin(syncp); |
16b8a476 ED |
210 | } |
211 | ||
57a7744e | 212 | static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, |
68107df5 | 213 | unsigned int start) |
16b8a476 | 214 | { |
68107df5 | 215 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
57a7744e | 216 | local_irq_enable(); |
33d91f00 | 217 | #endif |
68107df5 | 218 | return __u64_stats_fetch_retry(syncp, start); |
33d91f00 | 219 | } |
16b8a476 ED |
220 | |
221 | #endif /* _LINUX_U64_STATS_SYNC_H */ |