]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
16b8a476 ED |
2 | #ifndef _LINUX_U64_STATS_SYNC_H |
3 | #define _LINUX_U64_STATS_SYNC_H | |
4 | ||
5 | /* | |
6 | * To properly implement 64bits network statistics on 32bit and 64bit hosts, | |
7 | * we provide a synchronization point, that is a noop on 64bit or UP kernels. | |
8 | * | |
9 | * Key points : | |
10 | * 1) Use a seqcount on SMP 32bits, with low overhead. | |
11 | * 2) Whole thing is a noop on 64bit arches or UP kernels. | |
12 | * 3) Write side must ensure mutual exclusion or one seqcount update could | |
13 | * be lost, thus blocking readers forever. | |
14 | * If this synchronization point is not a mutex, but a spinlock or | |
15 | * spinlock_bh() or disable_bh() : | |
16 | * 3.1) Write side should not sleep. | |
17 | * 3.2) Write side should not allow preemption. | |
18 | * 3.3) If applicable, interrupts should be disabled. | |
19 | * | |
20 | * 4) If reader fetches several counters, there is no guarantee the whole values | |
21 | * are consistent (remember point 1) : this is a noop on 64bit arches anyway) | |
22 | * | |
23 | * 5) readers are allowed to sleep or be preempted/interrupted : They perform | |
24 | * pure reads. But if they have to fetch many values, it's better to not allow | |
25 | * preemptions/interruptions to avoid many retries. | |
26 | * | |
b6b3ecc7 ED |
27 | * 6) If counter might be written by an interrupt, readers should block interrupts. |
28 | * (On UP, there is no seqcount_t protection, a reader allowing interrupts could | |
29 | * read partial values) | |
30 | * | |
57a7744e EB |
31 | * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and |
32 | * u64_stats_fetch_retry_irq() helpers | |
33d91f00 | 33 | * |
16b8a476 ED |
34 | * Usage : |
35 | * | |
36 | * Stats producer (writer) should use following template granted it already got | |
37 | * an exclusive access to counters (a lock is already taken, or per cpu | |
38 | * data is used [in a non preemptable context]) | |
39 | * | |
40 | * spin_lock_bh(...) or other synchronization to get exclusive access | |
41 | * ... | |
42 | * u64_stats_update_begin(&stats->syncp); | |
43 | * stats->bytes64 += len; // non atomic operation | |
44 | * stats->packets64++; // non atomic operation | |
45 | * u64_stats_update_end(&stats->syncp); | |
46 | * | |
47 | * While a consumer (reader) should use following template to get consistent | |
48 | * snapshot for each variable (but no guarantee on several ones) | |
49 | * | |
50 | * u64 tbytes, tpackets; | |
51 | * unsigned int start; | |
52 | * | |
53 | * do { | |
54 | * start = u64_stats_fetch_begin(&stats->syncp); | |
55 | * tbytes = stats->bytes64; // non atomic operation | |
56 | * tpackets = stats->packets64; // non atomic operation | |
b6b3ecc7 | 57 | * } while (u64_stats_fetch_retry(&stats->syncp, start)); |
16b8a476 ED |
58 | * |
59 | * | |
60 | * Example of use in drivers/net/loopback.c, using per_cpu containers, | |
61 | * in BH disabled context. | |
62 | */ | |
63 | #include <linux/seqlock.h> | |
64 | ||
16b8a476 | 65 | struct u64_stats_sync { |
33d91f00 | 66 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 67 | seqcount_t seq; |
33d91f00 | 68 | #endif |
16b8a476 ED |
69 | }; |
70 | ||
827da44c | 71 | |
9464ca65 ED |
72 | static inline void u64_stats_init(struct u64_stats_sync *syncp) |
73 | { | |
827da44c | 74 | #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) |
9464ca65 | 75 | seqcount_init(&syncp->seq); |
827da44c | 76 | #endif |
9464ca65 | 77 | } |
827da44c | 78 | |
fa9f90be | 79 | static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) |
16b8a476 | 80 | { |
33d91f00 | 81 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 82 | write_seqcount_begin(&syncp->seq); |
33d91f00 | 83 | #endif |
16b8a476 ED |
84 | } |
85 | ||
fa9f90be | 86 | static inline void u64_stats_update_end(struct u64_stats_sync *syncp) |
16b8a476 | 87 | { |
33d91f00 | 88 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 89 | write_seqcount_end(&syncp->seq); |
33d91f00 | 90 | #endif |
16b8a476 ED |
91 | } |
92 | ||
6d586993 ED |
93 | static inline unsigned long |
94 | u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) | |
95 | { | |
96 | unsigned long flags = 0; | |
97 | ||
98 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | |
99 | local_irq_save(flags); | |
100 | write_seqcount_begin(&syncp->seq); | |
101 | #endif | |
102 | return flags; | |
103 | } | |
104 | ||
105 | static inline void | |
106 | u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, | |
107 | unsigned long flags) | |
108 | { | |
109 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | |
110 | write_seqcount_end(&syncp->seq); | |
111 | local_irq_restore(flags); | |
112 | #endif | |
113 | } | |
114 | ||
46cc6e49 ED |
115 | static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp) |
116 | { | |
117 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | |
118 | raw_write_seqcount_begin(&syncp->seq); | |
119 | #endif | |
120 | } | |
121 | ||
122 | static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp) | |
123 | { | |
124 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | |
125 | raw_write_seqcount_end(&syncp->seq); | |
126 | #endif | |
127 | } | |
128 | ||
68107df5 | 129 | static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) |
16b8a476 | 130 | { |
33d91f00 | 131 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 132 | return read_seqcount_begin(&syncp->seq); |
33d91f00 | 133 | #else |
33d91f00 ED |
134 | return 0; |
135 | #endif | |
16b8a476 ED |
136 | } |
137 | ||
68107df5 FW |
138 | static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) |
139 | { | |
140 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) | |
141 | preempt_disable(); | |
142 | #endif | |
143 | return __u64_stats_fetch_begin(syncp); | |
144 | } | |
145 | ||
146 | static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, | |
16b8a476 ED |
147 | unsigned int start) |
148 | { | |
33d91f00 | 149 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 150 | return read_seqcount_retry(&syncp->seq, start); |
16b8a476 | 151 | #else |
33d91f00 ED |
152 | return false; |
153 | #endif | |
16b8a476 ED |
154 | } |
155 | ||
68107df5 FW |
156 | static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, |
157 | unsigned int start) | |
158 | { | |
159 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) | |
160 | preempt_enable(); | |
161 | #endif | |
162 | return __u64_stats_fetch_retry(syncp, start); | |
163 | } | |
164 | ||
33d91f00 | 165 | /* |
57a7744e | 166 | * In case irq handlers can update u64 counters, readers can use following helpers |
33d91f00 | 167 | * - SMP 32bit arches use seqcount protection, irq safe. |
57a7744e | 168 | * - UP 32bit must disable irqs. |
33d91f00 ED |
169 | * - 64bit have no problem atomically reading u64 values, irq safe. |
170 | */ | |
57a7744e | 171 | static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) |
16b8a476 | 172 | { |
68107df5 | 173 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
57a7744e | 174 | local_irq_disable(); |
33d91f00 | 175 | #endif |
68107df5 | 176 | return __u64_stats_fetch_begin(syncp); |
16b8a476 ED |
177 | } |
178 | ||
57a7744e | 179 | static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, |
68107df5 | 180 | unsigned int start) |
16b8a476 | 181 | { |
68107df5 | 182 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
57a7744e | 183 | local_irq_enable(); |
33d91f00 | 184 | #endif |
68107df5 | 185 | return __u64_stats_fetch_retry(syncp, start); |
33d91f00 | 186 | } |
16b8a476 ED |
187 | |
188 | #endif /* _LINUX_U64_STATS_SYNC_H */ |