]>
Commit | Line | Data |
---|---|---|
16b8a476 ED |
1 | #ifndef _LINUX_U64_STATS_SYNC_H |
2 | #define _LINUX_U64_STATS_SYNC_H | |
3 | ||
4 | /* | |
5 | * To properly implement 64bits network statistics on 32bit and 64bit hosts, | |
6 | * we provide a synchronization point, that is a noop on 64bit or UP kernels. | |
7 | * | |
8 | * Key points : | |
9 | * 1) Use a seqcount on SMP 32bits, with low overhead. | |
10 | * 2) Whole thing is a noop on 64bit arches or UP kernels. | |
11 | * 3) Write side must ensure mutual exclusion or one seqcount update could | |
12 | * be lost, thus blocking readers forever. | |
13 | * If this synchronization point is not a mutex, but a spinlock or | |
14 | * spinlock_bh() or disable_bh() : | |
15 | * 3.1) Write side should not sleep. | |
16 | * 3.2) Write side should not allow preemption. | |
17 | * 3.3) If applicable, interrupts should be disabled. | |
18 | * | |
19 | * 4) If reader fetches several counters, there is no guarantee the whole values | |
20 | * are consistent (remember point 1) : this is a noop on 64bit arches anyway) | |
21 | * | |
22 | * 5) readers are allowed to sleep or be preempted/interrupted : They perform | |
23 | * pure reads. But if they have to fetch many values, it's better to not allow | |
24 | * preemptions/interruptions to avoid many retries. | |
25 | * | |
b6b3ecc7 ED |
26 | * 6) If counter might be written by an interrupt, readers should block interrupts. |
27 | * (On UP, there is no seqcount_t protection, a reader allowing interrupts could | |
28 | * read partial values) | |
29 | * | |
57a7744e EB |
30 | * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and |
31 | * u64_stats_fetch_retry_irq() helpers | |
33d91f00 | 32 | * |
16b8a476 ED |
33 | * Usage : |
34 | * | |
35 | * Stats producer (writer) should use following template granted it already got | |
36 | * an exclusive access to counters (a lock is already taken, or per cpu | |
37 | * data is used [in a non preemptable context]) | |
38 | * | |
39 | * spin_lock_bh(...) or other synchronization to get exclusive access | |
40 | * ... | |
41 | * u64_stats_update_begin(&stats->syncp); | |
42 | * stats->bytes64 += len; // non atomic operation | |
43 | * stats->packets64++; // non atomic operation | |
44 | * u64_stats_update_end(&stats->syncp); | |
45 | * | |
46 | * While a consumer (reader) should use following template to get consistent | |
47 | * snapshot for each variable (but no guarantee on several ones) | |
48 | * | |
49 | * u64 tbytes, tpackets; | |
50 | * unsigned int start; | |
51 | * | |
52 | * do { | |
53 | * start = u64_stats_fetch_begin(&stats->syncp); | |
54 | * tbytes = stats->bytes64; // non atomic operation | |
55 | * tpackets = stats->packets64; // non atomic operation | |
b6b3ecc7 | 56 | * } while (u64_stats_fetch_retry(&stats->syncp, start)); |
16b8a476 ED |
57 | * |
58 | * | |
59 | * Example of use in drivers/net/loopback.c, using per_cpu containers, | |
60 | * in BH disabled context. | |
61 | */ | |
62 | #include <linux/seqlock.h> | |
63 | ||
16b8a476 | 64 | struct u64_stats_sync { |
33d91f00 | 65 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 66 | seqcount_t seq; |
33d91f00 | 67 | #endif |
16b8a476 ED |
68 | }; |
69 | ||
827da44c | 70 | |
9464ca65 ED |
71 | static inline void u64_stats_init(struct u64_stats_sync *syncp) |
72 | { | |
827da44c | 73 | #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) |
9464ca65 | 74 | seqcount_init(&syncp->seq); |
827da44c | 75 | #endif |
9464ca65 | 76 | } |
827da44c | 77 | |
fa9f90be | 78 | static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) |
16b8a476 | 79 | { |
33d91f00 | 80 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 81 | write_seqcount_begin(&syncp->seq); |
33d91f00 | 82 | #endif |
16b8a476 ED |
83 | } |
84 | ||
fa9f90be | 85 | static inline void u64_stats_update_end(struct u64_stats_sync *syncp) |
16b8a476 | 86 | { |
33d91f00 | 87 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 88 | write_seqcount_end(&syncp->seq); |
33d91f00 | 89 | #endif |
16b8a476 ED |
90 | } |
91 | ||
46cc6e49 ED |
92 | static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp) |
93 | { | |
94 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | |
95 | raw_write_seqcount_begin(&syncp->seq); | |
96 | #endif | |
97 | } | |
98 | ||
99 | static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp) | |
100 | { | |
101 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | |
102 | raw_write_seqcount_end(&syncp->seq); | |
103 | #endif | |
104 | } | |
105 | ||
68107df5 | 106 | static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) |
16b8a476 | 107 | { |
33d91f00 | 108 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 109 | return read_seqcount_begin(&syncp->seq); |
33d91f00 | 110 | #else |
33d91f00 ED |
111 | return 0; |
112 | #endif | |
16b8a476 ED |
113 | } |
114 | ||
68107df5 FW |
115 | static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) |
116 | { | |
117 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) | |
118 | preempt_disable(); | |
119 | #endif | |
120 | return __u64_stats_fetch_begin(syncp); | |
121 | } | |
122 | ||
123 | static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, | |
16b8a476 ED |
124 | unsigned int start) |
125 | { | |
33d91f00 | 126 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
16b8a476 | 127 | return read_seqcount_retry(&syncp->seq, start); |
16b8a476 | 128 | #else |
33d91f00 ED |
129 | return false; |
130 | #endif | |
16b8a476 ED |
131 | } |
132 | ||
68107df5 FW |
133 | static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, |
134 | unsigned int start) | |
135 | { | |
136 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) | |
137 | preempt_enable(); | |
138 | #endif | |
139 | return __u64_stats_fetch_retry(syncp, start); | |
140 | } | |
141 | ||
33d91f00 | 142 | /* |
57a7744e | 143 | * In case irq handlers can update u64 counters, readers can use following helpers |
33d91f00 | 144 | * - SMP 32bit arches use seqcount protection, irq safe. |
57a7744e | 145 | * - UP 32bit must disable irqs. |
33d91f00 ED |
146 | * - 64bit have no problem atomically reading u64 values, irq safe. |
147 | */ | |
57a7744e | 148 | static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) |
16b8a476 | 149 | { |
68107df5 | 150 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
57a7744e | 151 | local_irq_disable(); |
33d91f00 | 152 | #endif |
68107df5 | 153 | return __u64_stats_fetch_begin(syncp); |
16b8a476 ED |
154 | } |
155 | ||
57a7744e | 156 | static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, |
68107df5 | 157 | unsigned int start) |
16b8a476 | 158 | { |
68107df5 | 159 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
57a7744e | 160 | local_irq_enable(); |
33d91f00 | 161 | #endif |
68107df5 | 162 | return __u64_stats_fetch_retry(syncp, start); |
33d91f00 | 163 | } |
16b8a476 ED |
164 | |
165 | #endif /* _LINUX_U64_STATS_SYNC_H */ |