]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/seqlock.h
seqcount: Provide raw_read_seqcount()
[mirror_ubuntu-bionic-kernel.git] / include / linux / seqlock.h
CommitLineData
1da177e4
LT
1#ifndef __LINUX_SEQLOCK_H
2#define __LINUX_SEQLOCK_H
3/*
4 * Reader/writer consistent mechanism without starving writers. This type of
d08df601 5 * lock for data where the reader wants a consistent set of information
1370e97b
WL
6 * and is willing to retry if the information changes. There are two types
7 * of readers:
8 * 1. Sequence readers which never block a writer but they may have to retry
9 * if a writer is in progress by detecting change in sequence number.
10 * Writers do not wait for a sequence reader.
11 * 2. Locking readers which will wait if a writer or another locking reader
12 * is in progress. A locking reader in progress will also block a writer
13 * from going forward. Unlike the regular rwlock, the read lock here is
14 * exclusive so that only one locking reader can get it.
1da177e4 15 *
1370e97b 16 * This is not as cache friendly as brlock. Also, this may not work well
1da177e4
LT
17 * for data that contains pointers, because any writer could
18 * invalidate a pointer that a reader was following.
19 *
1370e97b 20 * Expected non-blocking reader usage:
1da177e4
LT
21 * do {
22 * seq = read_seqbegin(&foo);
23 * ...
24 * } while (read_seqretry(&foo, seq));
25 *
26 *
27 * On non-SMP the spin locks disappear but the writer still needs
28 * to increment the sequence variables because an interrupt routine could
29 * change the state of the data.
30 *
31 * Based on x86_64 vsyscall gettimeofday
32 * by Keith Owens and Andrea Arcangeli
33 */
34
1da177e4
LT
35#include <linux/spinlock.h>
36#include <linux/preempt.h>
1ca7d67c 37#include <linux/lockdep.h>
56a21052 38#include <asm/processor.h>
1da177e4 39
1da177e4
LT
40/*
41 * Version using sequence counter only.
42 * This can be used when code has its own mutex protecting the
43 * updating starting before the write_seqcountbeqin() and ending
44 * after the write_seqcount_end().
45 */
1da177e4
LT
46typedef struct seqcount {
47 unsigned sequence;
1ca7d67c
JS
48#ifdef CONFIG_DEBUG_LOCK_ALLOC
49 struct lockdep_map dep_map;
50#endif
1da177e4
LT
51} seqcount_t;
52
1ca7d67c
JS
53static inline void __seqcount_init(seqcount_t *s, const char *name,
54 struct lock_class_key *key)
55{
56 /*
57 * Make sure we are not reinitializing a held lock:
58 */
59 lockdep_init_map(&s->dep_map, name, key, 0);
60 s->sequence = 0;
61}
62
63#ifdef CONFIG_DEBUG_LOCK_ALLOC
64# define SEQCOUNT_DEP_MAP_INIT(lockname) \
65 .dep_map = { .name = #lockname } \
66
67# define seqcount_init(s) \
68 do { \
69 static struct lock_class_key __key; \
70 __seqcount_init((s), #s, &__key); \
71 } while (0)
72
73static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
74{
75 seqcount_t *l = (seqcount_t *)s;
76 unsigned long flags;
77
78 local_irq_save(flags);
79 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
80 seqcount_release(&l->dep_map, 1, _RET_IP_);
81 local_irq_restore(flags);
82}
83
84#else
85# define SEQCOUNT_DEP_MAP_INIT(lockname)
86# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
87# define seqcount_lockdep_reader_access(x)
88#endif
89
90#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
91
1da177e4 92
3c22cd57
NP
93/**
94 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
95 * @s: pointer to seqcount_t
96 * Returns: count to be passed to read_seqcount_retry
97 *
98 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
99 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
100 * provided before actually loading any of the variables that are to be
101 * protected in this critical section.
102 *
103 * Use carefully, only in critical code, and comment how the barrier is
104 * provided.
105 */
106static inline unsigned __read_seqcount_begin(const seqcount_t *s)
1da177e4 107{
88a411c0
IM
108 unsigned ret;
109
110repeat:
2f624278 111 ret = ACCESS_ONCE(s->sequence);
88a411c0
IM
112 if (unlikely(ret & 1)) {
113 cpu_relax();
114 goto repeat;
115 }
1da177e4
LT
116 return ret;
117}
118
0ea5a520
TG
119/**
120 * raw_read_seqcount - Read the raw seqcount
121 * @s: pointer to seqcount_t
122 * Returns: count to be passed to read_seqcount_retry
123 *
124 * raw_read_seqcount opens a read critical section of the given
125 * seqcount without any lockdep checking and without checking or
126 * masking the LSB. Calling code is responsible for handling that.
127 */
128static inline unsigned raw_read_seqcount(const seqcount_t *s)
129{
130 unsigned ret = ACCESS_ONCE(s->sequence);
131 smp_rmb();
132 return ret;
133}
134
1ca7d67c 135/**
0c3351d4 136 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
1ca7d67c
JS
137 * @s: pointer to seqcount_t
138 * Returns: count to be passed to read_seqcount_retry
139 *
0c3351d4 140 * raw_read_seqcount_begin opens a read critical section of the given
1ca7d67c
JS
141 * seqcount, but without any lockdep checking. Validity of the critical
142 * section is tested by checking read_seqcount_retry function.
143 */
0c3351d4 144static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
1ca7d67c
JS
145{
146 unsigned ret = __read_seqcount_begin(s);
147 smp_rmb();
148 return ret;
149}
150
3c22cd57
NP
151/**
152 * read_seqcount_begin - begin a seq-read critical section
153 * @s: pointer to seqcount_t
154 * Returns: count to be passed to read_seqcount_retry
155 *
156 * read_seqcount_begin opens a read critical section of the given seqcount.
157 * Validity of the critical section is tested by checking read_seqcount_retry
158 * function.
159 */
160static inline unsigned read_seqcount_begin(const seqcount_t *s)
161{
1ca7d67c 162 seqcount_lockdep_reader_access(s);
0c3351d4 163 return raw_read_seqcount_begin(s);
3c22cd57
NP
164}
165
4f988f15
LT
166/**
167 * raw_seqcount_begin - begin a seq-read critical section
168 * @s: pointer to seqcount_t
169 * Returns: count to be passed to read_seqcount_retry
170 *
171 * raw_seqcount_begin opens a read critical section of the given seqcount.
172 * Validity of the critical section is tested by checking read_seqcount_retry
173 * function.
174 *
175 * Unlike read_seqcount_begin(), this function will not wait for the count
176 * to stabilize. If a writer is active when we begin, we will fail the
177 * read_seqcount_retry() instead of stabilizing at the beginning of the
178 * critical section.
179 */
180static inline unsigned raw_seqcount_begin(const seqcount_t *s)
181{
182 unsigned ret = ACCESS_ONCE(s->sequence);
1ca7d67c
JS
183
184 seqcount_lockdep_reader_access(s);
4f988f15
LT
185 smp_rmb();
186 return ret & ~1;
187}
188
3c22cd57
NP
189/**
190 * __read_seqcount_retry - end a seq-read critical section (without barrier)
191 * @s: pointer to seqcount_t
192 * @start: count, from read_seqcount_begin
193 * Returns: 1 if retry is required, else 0
194 *
195 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
196 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
197 * provided before actually loading any of the variables that are to be
198 * protected in this critical section.
199 *
200 * Use carefully, only in critical code, and comment how the barrier is
201 * provided.
202 */
203static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
204{
205 return unlikely(s->sequence != start);
206}
207
208/**
209 * read_seqcount_retry - end a seq-read critical section
210 * @s: pointer to seqcount_t
211 * @start: count, from read_seqcount_begin
212 * Returns: 1 if retry is required, else 0
213 *
214 * read_seqcount_retry closes a read critical section of the given seqcount.
215 * If the critical section was invalid, it must be ignored (and typically
216 * retried).
1da177e4 217 */
88a411c0 218static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
1da177e4
LT
219{
220 smp_rmb();
3c22cd57 221 return __read_seqcount_retry(s, start);
1da177e4
LT
222}
223
224
0c3351d4
JS
225
226static inline void raw_write_seqcount_begin(seqcount_t *s)
227{
228 s->sequence++;
229 smp_wmb();
230}
231
232static inline void raw_write_seqcount_end(seqcount_t *s)
233{
234 smp_wmb();
235 s->sequence++;
236}
237
1da177e4
LT
238/*
239 * Sequence counter only version assumes that callers are using their
240 * own mutexing.
241 */
1ca7d67c 242static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
1da177e4 243{
0c3351d4 244 raw_write_seqcount_begin(s);
1ca7d67c
JS
245 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
246}
247
248static inline void write_seqcount_begin(seqcount_t *s)
249{
250 write_seqcount_begin_nested(s, 0);
1da177e4
LT
251}
252
253static inline void write_seqcount_end(seqcount_t *s)
254{
1ca7d67c 255 seqcount_release(&s->dep_map, 1, _RET_IP_);
0c3351d4 256 raw_write_seqcount_end(s);
1da177e4
LT
257}
258
3c22cd57
NP
259/**
260 * write_seqcount_barrier - invalidate in-progress read-side seq operations
261 * @s: pointer to seqcount_t
262 *
263 * After write_seqcount_barrier, no read-side seq operations will complete
264 * successfully and see data older than this.
265 */
266static inline void write_seqcount_barrier(seqcount_t *s)
267{
268 smp_wmb();
269 s->sequence+=2;
270}
271
6617feca
TG
272typedef struct {
273 struct seqcount seqcount;
274 spinlock_t lock;
275} seqlock_t;
276
277/*
278 * These macros triggered gcc-3.x compile-time problems. We think these are
279 * OK now. Be cautious.
280 */
281#define __SEQLOCK_UNLOCKED(lockname) \
282 { \
1ca7d67c 283 .seqcount = SEQCNT_ZERO(lockname), \
6617feca
TG
284 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
285 }
286
287#define seqlock_init(x) \
288 do { \
289 seqcount_init(&(x)->seqcount); \
290 spin_lock_init(&(x)->lock); \
291 } while (0)
292
293#define DEFINE_SEQLOCK(x) \
294 seqlock_t x = __SEQLOCK_UNLOCKED(x)
295
296/*
297 * Read side functions for starting and finalizing a read side section.
298 */
299static inline unsigned read_seqbegin(const seqlock_t *sl)
300{
301 return read_seqcount_begin(&sl->seqcount);
302}
303
304static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
305{
306 return read_seqcount_retry(&sl->seqcount, start);
307}
308
1da177e4 309/*
6617feca
TG
310 * Lock out other writers and update the count.
311 * Acts like a normal spin_lock/unlock.
312 * Don't need preempt_disable() because that is in the spin_lock already.
1da177e4 313 */
6617feca
TG
314static inline void write_seqlock(seqlock_t *sl)
315{
316 spin_lock(&sl->lock);
317 write_seqcount_begin(&sl->seqcount);
318}
319
320static inline void write_sequnlock(seqlock_t *sl)
321{
322 write_seqcount_end(&sl->seqcount);
323 spin_unlock(&sl->lock);
324}
325
326static inline void write_seqlock_bh(seqlock_t *sl)
327{
328 spin_lock_bh(&sl->lock);
329 write_seqcount_begin(&sl->seqcount);
330}
331
332static inline void write_sequnlock_bh(seqlock_t *sl)
333{
334 write_seqcount_end(&sl->seqcount);
335 spin_unlock_bh(&sl->lock);
336}
337
338static inline void write_seqlock_irq(seqlock_t *sl)
339{
340 spin_lock_irq(&sl->lock);
341 write_seqcount_begin(&sl->seqcount);
342}
343
344static inline void write_sequnlock_irq(seqlock_t *sl)
345{
346 write_seqcount_end(&sl->seqcount);
347 spin_unlock_irq(&sl->lock);
348}
349
350static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
351{
352 unsigned long flags;
353
354 spin_lock_irqsave(&sl->lock, flags);
355 write_seqcount_begin(&sl->seqcount);
356 return flags;
357}
358
1da177e4 359#define write_seqlock_irqsave(lock, flags) \
6617feca 360 do { flags = __write_seqlock_irqsave(lock); } while (0)
1da177e4 361
6617feca
TG
362static inline void
363write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
364{
365 write_seqcount_end(&sl->seqcount);
366 spin_unlock_irqrestore(&sl->lock, flags);
367}
1da177e4 368
1370e97b
WL
369/*
370 * A locking reader exclusively locks out other writers and locking readers,
371 * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
372 * Don't need preempt_disable() because that is in the spin_lock already.
373 */
374static inline void read_seqlock_excl(seqlock_t *sl)
375{
376 spin_lock(&sl->lock);
377}
378
379static inline void read_sequnlock_excl(seqlock_t *sl)
380{
381 spin_unlock(&sl->lock);
382}
383
2bc74feb
AV
384/**
385 * read_seqbegin_or_lock - begin a sequence number check or locking block
386 * @lock: sequence lock
387 * @seq : sequence number to be checked
388 *
389 * First try it once optimistically without taking the lock. If that fails,
390 * take the lock. The sequence number is also used as a marker for deciding
391 * whether to be a reader (even) or writer (odd).
392 * N.B. seq must be initialized to an even number to begin with.
393 */
394static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
395{
396 if (!(*seq & 1)) /* Even */
397 *seq = read_seqbegin(lock);
398 else /* Odd */
399 read_seqlock_excl(lock);
400}
401
402static inline int need_seqretry(seqlock_t *lock, int seq)
403{
404 return !(seq & 1) && read_seqretry(lock, seq);
405}
406
407static inline void done_seqretry(seqlock_t *lock, int seq)
408{
409 if (seq & 1)
410 read_sequnlock_excl(lock);
411}
412
1370e97b
WL
413static inline void read_seqlock_excl_bh(seqlock_t *sl)
414{
415 spin_lock_bh(&sl->lock);
416}
417
418static inline void read_sequnlock_excl_bh(seqlock_t *sl)
419{
420 spin_unlock_bh(&sl->lock);
421}
422
423static inline void read_seqlock_excl_irq(seqlock_t *sl)
424{
425 spin_lock_irq(&sl->lock);
426}
427
428static inline void read_sequnlock_excl_irq(seqlock_t *sl)
429{
430 spin_unlock_irq(&sl->lock);
431}
432
433static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
434{
435 unsigned long flags;
436
437 spin_lock_irqsave(&sl->lock, flags);
438 return flags;
439}
440
441#define read_seqlock_excl_irqsave(lock, flags) \
442 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
443
444static inline void
445read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
446{
447 spin_unlock_irqrestore(&sl->lock, flags);
448}
449
1da177e4 450#endif /* __LINUX_SEQLOCK_H */