]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - include/linux/u64_stats_sync.h
Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
[mirror_ubuntu-zesty-kernel.git] / include / linux / u64_stats_sync.h
1 #ifndef _LINUX_U64_STATS_SYNC_H
2 #define _LINUX_U64_STATS_SYNC_H
3
4 /*
5 * To properly implement 64bits network statistics on 32bit and 64bit hosts,
6 * we provide a synchronization point, that is a noop on 64bit or UP kernels.
7 *
8 * Key points :
9 * 1) Use a seqcount on SMP 32bits, with low overhead.
10 * 2) Whole thing is a noop on 64bit arches or UP kernels.
11 * 3) Write side must ensure mutual exclusion or one seqcount update could
12 * be lost, thus blocking readers forever.
13 * If this synchronization point is not a mutex, but a spinlock or
14 * spinlock_bh() or disable_bh() :
15 * 3.1) Write side should not sleep.
16 * 3.2) Write side should not allow preemption.
17 * 3.3) If applicable, interrupts should be disabled.
18 *
19 * 4) If reader fetches several counters, there is no guarantee the whole values
20 * are consistent (remember point 1) : this is a noop on 64bit arches anyway)
21 *
22 * 5) readers are allowed to sleep or be preempted/interrupted : They perform
23 * pure reads. But if they have to fetch many values, it's better to not allow
24 * preemptions/interruptions to avoid many retries.
25 *
26 * 6) If counter might be written by an interrupt, readers should block interrupts.
27 * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
28 * read partial values)
29 *
30 * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
31 * u64_stats_fetch_retry_irq() helpers
32 *
33 * Usage :
34 *
35 * Stats producer (writer) should use following template granted it already got
36 * an exclusive access to counters (a lock is already taken, or per cpu
37 * data is used [in a non preemptable context])
38 *
39 * spin_lock_bh(...) or other synchronization to get exclusive access
40 * ...
41 * u64_stats_update_begin(&stats->syncp);
42 * stats->bytes64 += len; // non atomic operation
43 * stats->packets64++; // non atomic operation
44 * u64_stats_update_end(&stats->syncp);
45 *
46 * While a consumer (reader) should use following template to get consistent
47 * snapshot for each variable (but no guarantee on several ones)
48 *
49 * u64 tbytes, tpackets;
50 * unsigned int start;
51 *
52 * do {
53 * start = u64_stats_fetch_begin(&stats->syncp);
54 * tbytes = stats->bytes64; // non atomic operation
55 * tpackets = stats->packets64; // non atomic operation
56 * } while (u64_stats_fetch_retry(&stats->syncp, start));
57 *
58 *
59 * Example of use in drivers/net/loopback.c, using per_cpu containers,
60 * in BH disabled context.
61 */
62 #include <linux/seqlock.h>
63
64 struct u64_stats_sync {
65 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
66 seqcount_t seq;
67 #endif
68 };
69
70
71 #if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
72 # define u64_stats_init(syncp) seqcount_init(syncp.seq)
73 #else
74 # define u64_stats_init(syncp) do { } while (0)
75 #endif
76
77 static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
78 {
79 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
80 write_seqcount_begin(&syncp->seq);
81 #endif
82 }
83
84 static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
85 {
86 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
87 write_seqcount_end(&syncp->seq);
88 #endif
89 }
90
91 static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
92 {
93 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
94 return read_seqcount_begin(&syncp->seq);
95 #else
96 #if BITS_PER_LONG==32
97 preempt_disable();
98 #endif
99 return 0;
100 #endif
101 }
102
103 static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
104 unsigned int start)
105 {
106 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
107 return read_seqcount_retry(&syncp->seq, start);
108 #else
109 #if BITS_PER_LONG==32
110 preempt_enable();
111 #endif
112 return false;
113 #endif
114 }
115
116 /*
117 * In case irq handlers can update u64 counters, readers can use following helpers
118 * - SMP 32bit arches use seqcount protection, irq safe.
119 * - UP 32bit must disable irqs.
120 * - 64bit have no problem atomically reading u64 values, irq safe.
121 */
122 static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
123 {
124 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
125 return read_seqcount_begin(&syncp->seq);
126 #else
127 #if BITS_PER_LONG==32
128 local_irq_disable();
129 #endif
130 return 0;
131 #endif
132 }
133
134 static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
135 unsigned int start)
136 {
137 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
138 return read_seqcount_retry(&syncp->seq, start);
139 #else
140 #if BITS_PER_LONG==32
141 local_irq_enable();
142 #endif
143 return false;
144 #endif
145 }
146
147 #endif /* _LINUX_U64_STATS_SYNC_H */