]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/seqlock.h
Merge branch 'pci/microchip'
[mirror_ubuntu-jammy-kernel.git] / include / linux / seqlock.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __LINUX_SEQLOCK_H
3#define __LINUX_SEQLOCK_H
0d24f65e 4
1da177e4 5/*
0d24f65e
AD
6 * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
7 * lockless readers (read-only retry loops), and no writer starvation.
8 *
9 * See Documentation/locking/seqlock.rst
10 *
11 * Copyrights:
12 * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
55f3560d 13 * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
1da177e4
LT
14 */
15
7fc26327 16#include <linux/compiler.h>
b968a08f 17#include <linux/kcsan-checks.h>
55f3560d
AD
18#include <linux/lockdep.h>
19#include <linux/mutex.h>
8117ab50 20#include <linux/ww_mutex.h>
55f3560d
AD
21#include <linux/preempt.h>
22#include <linux/spinlock.h>
55f3560d 23
56a21052 24#include <asm/processor.h>
1da177e4 25
88ecd153 26/*
0d24f65e
AD
27 * The seqlock seqcount_t interface does not prescribe a precise sequence of
28 * read begin/retry/end. For readers, typically there is a call to
88ecd153
ME
29 * read_seqcount_begin() and read_seqcount_retry(), however, there are more
30 * esoteric cases which do not follow this pattern.
31 *
32 * As a consequence, we take the following best-effort approach for raw usage
33 * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
5cbaefe9 34 * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
88ecd153 35 * atomics; if there is a matching read_seqcount_retry() call, no following
0d24f65e
AD
36 * memory operations are considered atomic. Usage of the seqlock_t interface
37 * is not affected.
88ecd153
ME
38 */
39#define KCSAN_SEQLOCK_REGION_MAX 1000
40
1da177e4 41/*
0d24f65e
AD
42 * Sequence counters (seqcount_t)
43 *
44 * This is the raw counting mechanism, without any writer protection.
45 *
46 * Write side critical sections must be serialized and non-preemptible.
47 *
48 * If readers can be invoked from hardirq or softirq contexts,
49 * interrupts or bottom halves must also be respectively disabled before
50 * entering the write section.
51 *
52 * This mechanism can't be used if the protected data contains pointers,
53 * as the writer can invalidate a pointer that a reader is following.
54 *
55f3560d
AD
55 * If the write serialization mechanism is one of the common kernel
56 * locking primitives, use a sequence counter with associated lock
6dd699b1 57 * (seqcount_LOCKNAME_t) instead.
55f3560d 58 *
0d24f65e
AD
59 * If it's desired to automatically handle the sequence counter writer
60 * serialization and non-preemptibility requirements, use a sequential
61 * lock (seqlock_t) instead.
62 *
63 * See Documentation/locking/seqlock.rst
1da177e4 64 */
1da177e4
LT
65typedef struct seqcount {
66 unsigned sequence;
1ca7d67c
JS
67#ifdef CONFIG_DEBUG_LOCK_ALLOC
68 struct lockdep_map dep_map;
69#endif
1da177e4
LT
70} seqcount_t;
71
1ca7d67c
JS
72static inline void __seqcount_init(seqcount_t *s, const char *name,
73 struct lock_class_key *key)
74{
75 /*
76 * Make sure we are not reinitializing a held lock:
77 */
78 lockdep_init_map(&s->dep_map, name, key, 0);
79 s->sequence = 0;
80}
81
82#ifdef CONFIG_DEBUG_LOCK_ALLOC
ec8702da
AD
83
84# define SEQCOUNT_DEP_MAP_INIT(lockname) \
85 .dep_map = { .name = #lockname }
1ca7d67c 86
89b88845
AD
87/**
88 * seqcount_init() - runtime initializer for seqcount_t
89 * @s: Pointer to the seqcount_t instance
90 */
ec8702da
AD
91# define seqcount_init(s) \
92 do { \
93 static struct lock_class_key __key; \
94 __seqcount_init((s), #s, &__key); \
1ca7d67c
JS
95 } while (0)
96
97static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
98{
99 seqcount_t *l = (seqcount_t *)s;
100 unsigned long flags;
101
102 local_irq_save(flags);
103 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
5facae4f 104 seqcount_release(&l->dep_map, _RET_IP_);
1ca7d67c
JS
105 local_irq_restore(flags);
106}
107
108#else
109# define SEQCOUNT_DEP_MAP_INIT(lockname)
110# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
111# define seqcount_lockdep_reader_access(x)
112#endif
113
89b88845
AD
114/**
115 * SEQCNT_ZERO() - static initializer for seqcount_t
116 * @name: Name of the seqcount_t instance
117 */
118#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
1da177e4 119
55f3560d 120/*
6dd699b1 121 * Sequence counters with associated locks (seqcount_LOCKNAME_t)
55f3560d
AD
122 *
123 * A sequence counter which associates the lock used for writer
124 * serialization at initialization time. This enables lockdep to validate
125 * that the write side critical section is properly serialized.
126 *
127 * For associated locks which do not implicitly disable preemption,
128 * preemption protection is enforced in the write side function.
129 *
130 * Lockdep is never used in any for the raw write variants.
131 *
132 * See Documentation/locking/seqlock.rst
133 */
134
8117ab50
AD
135/*
136 * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
137 * disable preemption. It can lead to higher latencies, and the write side
138 * sections will not be able to acquire locks which become sleeping locks
139 * (e.g. spinlock_t).
140 *
141 * To remain preemptible while avoiding a possible livelock caused by the
142 * reader preempting the writer, use a different technique: let the reader
143 * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
144 * case, acquire then release the associated LOCKNAME writer serialization
145 * lock. This will allow any possibly-preempted writer to make progress
146 * until the end of its writer serialization lock critical section.
147 *
148 * This lock-unlock technique must be implemented for all of PREEMPT_RT
149 * sleeping locks. See Documentation/locking/locktypes.rst
150 */
151#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
e55687fe 152#define __SEQ_LOCK(expr) expr
55f3560d 153#else
e55687fe 154#define __SEQ_LOCK(expr)
55f3560d
AD
155#endif
156
ed3e4537 157/*
6dd699b1 158 * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
a8772dcc 159 * @seqcount: The real sequence counter
6dd699b1 160 * @lock: Pointer to the associated lock
a8772dcc 161 *
6dd699b1
AD
162 * A plain sequence counter with external writer synchronization by
163 * LOCKNAME @lock. The lock is associated to the sequence counter in the
a8772dcc
PZ
164 * static initializer or init function. This enables lockdep to validate
165 * that the write side critical section is properly serialized.
6dd699b1
AD
166 *
167 * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex.
a8772dcc
PZ
168 */
169
a28e884b 170/*
e4e9ab3f
PZ
171 * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
172 * @s: Pointer to the seqcount_LOCKNAME_t instance
6dd699b1 173 * @lock: Pointer to the associated lock
e4e9ab3f
PZ
174 */
175
267580db 176#define seqcount_LOCKNAME_init(s, _lock, lockname) \
177 do { \
178 seqcount_##lockname##_t *____s = (s); \
179 seqcount_init(&____s->seqcount); \
180 __SEQ_LOCK(____s->lock = (_lock)); \
181 } while (0)
182
183#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
184#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
185#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock);
186#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex);
187#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex);
188
55f3560d 189/*
5cdd2557
AD
190 * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
191 * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
192 *
6dd699b1
AD
193 * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
194 * @locktype: LOCKNAME canonical C data type
8117ab50 195 * @preemptible: preemptibility of above locktype
a8772dcc 196 * @lockmember: argument for lockdep_assert_held()
8117ab50
AD
197 * @lockbase: associated lock release function (prefix only)
198 * @lock_acquire: associated lock acquisition function (full call)
55f3560d 199 */
8117ab50 200#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
a8772dcc
PZ
201typedef struct seqcount_##lockname { \
202 seqcount_t seqcount; \
203 __SEQ_LOCK(locktype *lock); \
204} seqcount_##lockname##_t; \
205 \
206static __always_inline seqcount_t * \
5cdd2557 207__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
55f3560d
AD
208{ \
209 return &s->seqcount; \
210} \
211 \
52ac39e5
AD
212static __always_inline unsigned \
213__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
214{ \
8117ab50
AD
215 unsigned seq = READ_ONCE(s->seqcount.sequence); \
216 \
217 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
218 return seq; \
219 \
220 if (preemptible && unlikely(seq & 1)) { \
221 __SEQ_LOCK(lock_acquire); \
222 __SEQ_LOCK(lockbase##_unlock(s->lock)); \
223 \
224 /* \
225 * Re-read the sequence counter since the (possibly \
226 * preempted) writer made progress. \
227 */ \
228 seq = READ_ONCE(s->seqcount.sequence); \
229 } \
230 \
231 return seq; \
52ac39e5
AD
232} \
233 \
a8772dcc 234static __always_inline bool \
5cdd2557 235__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
55f3560d 236{ \
8117ab50
AD
237 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
238 return preemptible; \
239 \
240 /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \
241 return false; \
55f3560d
AD
242} \
243 \
a8772dcc 244static __always_inline void \
5cdd2557 245__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
55f3560d 246{ \
e55687fe 247 __SEQ_LOCK(lockdep_assert_held(lockmember)); \
55f3560d
AD
248}
249
250/*
a8772dcc 251 * __seqprop() for seqcount_t
55f3560d
AD
252 */
253
5cdd2557 254static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
55f3560d
AD
255{
256 return s;
257}
258
52ac39e5
AD
259static inline unsigned __seqprop_sequence(const seqcount_t *s)
260{
261 return READ_ONCE(s->sequence);
262}
263
5cdd2557 264static inline bool __seqprop_preemptible(const seqcount_t *s)
55f3560d
AD
265{
266 return false;
267}
268
5cdd2557 269static inline void __seqprop_assert(const seqcount_t *s)
55f3560d
AD
270{
271 lockdep_assert_preemption_disabled();
272}
273
8117ab50
AD
274#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
275
276SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
277SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
278SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
279SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
280SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL))
a8772dcc 281
a28e884b 282/*
0efc94c5
PZ
283 * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
284 * @name: Name of the seqcount_LOCKNAME_t instance
6dd699b1 285 * @lock: Pointer to the associated LOCKNAME
0efc94c5
PZ
286 */
287
6dd699b1 288#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \
0efc94c5
PZ
289 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
290 __SEQ_LOCK(.lock = (assoc_lock)) \
291}
292
6dd699b1 293#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
267580db 294#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
6dd699b1
AD
295#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
296#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
297#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
0efc94c5 298
a8772dcc 299#define __seqprop_case(s, lockname, prop) \
5cdd2557 300 seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
55f3560d
AD
301
302#define __seqprop(s, prop) _Generic(*(s), \
5cdd2557 303 seqcount_t: __seqprop_##prop((void *)(s)), \
55f3560d
AD
304 __seqprop_case((s), raw_spinlock, prop), \
305 __seqprop_case((s), spinlock, prop), \
306 __seqprop_case((s), rwlock, prop), \
307 __seqprop_case((s), mutex, prop), \
308 __seqprop_case((s), ww_mutex, prop))
309
ab440b2c
PZ
310#define seqprop_ptr(s) __seqprop(s, ptr)
311#define seqprop_sequence(s) __seqprop(s, sequence)
312#define seqprop_preemptible(s) __seqprop(s, preemptible)
313#define seqprop_assert(s) __seqprop(s, assert)
55f3560d 314
3c22cd57 315/**
89b88845 316 * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
6dd699b1 317 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
3c22cd57
NP
318 *
319 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
320 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
321 * provided before actually loading any of the variables that are to be
322 * protected in this critical section.
323 *
324 * Use carefully, only in critical code, and comment how the barrier is
325 * provided.
89b88845
AD
326 *
327 * Return: count to be passed to read_seqcount_retry()
3c22cd57 328 */
55f3560d 329#define __read_seqcount_begin(s) \
52ac39e5 330({ \
a07c4531 331 unsigned __seq; \
52ac39e5 332 \
ab440b2c 333 while ((__seq = seqprop_sequence(s)) & 1) \
52ac39e5
AD
334 cpu_relax(); \
335 \
336 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
a07c4531 337 __seq; \
52ac39e5 338})
1da177e4 339
1ca7d67c 340/**
89b88845 341 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
6dd699b1 342 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
1ca7d67c 343 *
89b88845 344 * Return: count to be passed to read_seqcount_retry()
1ca7d67c 345 */
55f3560d 346#define raw_read_seqcount_begin(s) \
52ac39e5 347({ \
a07c4531 348 unsigned _seq = __read_seqcount_begin(s); \
52ac39e5
AD
349 \
350 smp_rmb(); \
a07c4531 351 _seq; \
52ac39e5 352})
1ca7d67c 353
3c22cd57 354/**
89b88845 355 * read_seqcount_begin() - begin a seqcount_t read critical section
6dd699b1 356 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
3c22cd57 357 *
89b88845 358 * Return: count to be passed to read_seqcount_retry()
3c22cd57 359 */
55f3560d 360#define read_seqcount_begin(s) \
52ac39e5 361({ \
ab440b2c 362 seqcount_lockdep_reader_access(seqprop_ptr(s)); \
52ac39e5
AD
363 raw_read_seqcount_begin(s); \
364})
3c22cd57 365
f4a27cbc 366/**
89b88845 367 * raw_read_seqcount() - read the raw seqcount_t counter value
6dd699b1 368 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
f4a27cbc
AD
369 *
370 * raw_read_seqcount opens a read critical section of the given
89b88845
AD
371 * seqcount_t, without any lockdep checking, and without checking or
372 * masking the sequence counter LSB. Calling code is responsible for
373 * handling that.
374 *
375 * Return: count to be passed to read_seqcount_retry()
f4a27cbc 376 */
55f3560d 377#define raw_read_seqcount(s) \
52ac39e5 378({ \
ab440b2c 379 unsigned __seq = seqprop_sequence(s); \
52ac39e5
AD
380 \
381 smp_rmb(); \
382 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
a07c4531 383 __seq; \
52ac39e5 384})
f4a27cbc 385
4f988f15 386/**
89b88845
AD
387 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
388 * lockdep and w/o counter stabilization
6dd699b1 389 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
4f988f15 390 *
89b88845
AD
391 * raw_seqcount_begin opens a read critical section of the given
392 * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
393 * for the count to stabilize. If a writer is active when it begins, it
394 * will fail the read_seqcount_retry() at the end of the read critical
395 * section instead of stabilizing at the beginning of it.
4f988f15 396 *
89b88845
AD
397 * Use this only in special kernel hot paths where the read section is
398 * small and has a high probability of success through other external
399 * means. It will save a single branching instruction.
400 *
401 * Return: count to be passed to read_seqcount_retry()
4f988f15 402 */
55f3560d 403#define raw_seqcount_begin(s) \
52ac39e5
AD
404({ \
405 /* \
406 * If the counter is odd, let read_seqcount_retry() fail \
407 * by decrementing the counter. \
408 */ \
409 raw_read_seqcount(s) & ~1; \
410})
4f988f15 411
3c22cd57 412/**
89b88845 413 * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
6dd699b1 414 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
89b88845 415 * @start: count, from read_seqcount_begin()
3c22cd57
NP
416 *
417 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
418 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
419 * provided before actually loading any of the variables that are to be
420 * protected in this critical section.
421 *
422 * Use carefully, only in critical code, and comment how the barrier is
423 * provided.
89b88845
AD
424 *
425 * Return: true if a read section retry is required, else false
3c22cd57 426 */
55f3560d 427#define __read_seqcount_retry(s, start) \
66bcfcdf 428 do___read_seqcount_retry(seqprop_ptr(s), start)
55f3560d 429
66bcfcdf 430static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
3c22cd57 431{
88ecd153
ME
432 kcsan_atomic_next(0);
433 return unlikely(READ_ONCE(s->sequence) != start);
3c22cd57
NP
434}
435
436/**
89b88845 437 * read_seqcount_retry() - end a seqcount_t read critical section
6dd699b1 438 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
89b88845 439 * @start: count, from read_seqcount_begin()
3c22cd57 440 *
89b88845
AD
441 * read_seqcount_retry closes the read critical section of given
442 * seqcount_t. If the critical section was invalid, it must be ignored
443 * (and typically retried).
444 *
445 * Return: true if a read section retry is required, else false
1da177e4 446 */
55f3560d 447#define read_seqcount_retry(s, start) \
66bcfcdf 448 do_read_seqcount_retry(seqprop_ptr(s), start)
55f3560d 449
66bcfcdf 450static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
1da177e4
LT
451{
452 smp_rmb();
66bcfcdf 453 return do___read_seqcount_retry(s, start);
1da177e4
LT
454}
455
89b88845
AD
456/**
457 * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
6dd699b1 458 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
cb262935
AD
459 *
460 * Context: check write_seqcount_begin()
89b88845 461 */
55f3560d
AD
462#define raw_write_seqcount_begin(s) \
463do { \
ab440b2c 464 if (seqprop_preemptible(s)) \
55f3560d
AD
465 preempt_disable(); \
466 \
66bcfcdf 467 do_raw_write_seqcount_begin(seqprop_ptr(s)); \
55f3560d
AD
468} while (0)
469
66bcfcdf 470static inline void do_raw_write_seqcount_begin(seqcount_t *s)
0c3351d4 471{
88ecd153 472 kcsan_nestable_atomic_begin();
0c3351d4
JS
473 s->sequence++;
474 smp_wmb();
475}
476
89b88845
AD
477/**
478 * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
6dd699b1 479 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
cb262935
AD
480 *
481 * Context: check write_seqcount_end()
89b88845 482 */
55f3560d
AD
483#define raw_write_seqcount_end(s) \
484do { \
66bcfcdf 485 do_raw_write_seqcount_end(seqprop_ptr(s)); \
55f3560d 486 \
ab440b2c 487 if (seqprop_preemptible(s)) \
55f3560d
AD
488 preempt_enable(); \
489} while (0)
490
66bcfcdf 491static inline void do_raw_write_seqcount_end(seqcount_t *s)
0c3351d4
JS
492{
493 smp_wmb();
494 s->sequence++;
88ecd153 495 kcsan_nestable_atomic_end();
0c3351d4
JS
496}
497
89b88845
AD
498/**
499 * write_seqcount_begin_nested() - start a seqcount_t write section with
500 * custom lockdep nesting level
6dd699b1 501 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
89b88845
AD
502 * @subclass: lockdep nesting level
503 *
504 * See Documentation/locking/lockdep-design.rst
cb262935 505 * Context: check write_seqcount_begin()
89b88845 506 */
55f3560d
AD
507#define write_seqcount_begin_nested(s, subclass) \
508do { \
ab440b2c 509 seqprop_assert(s); \
55f3560d 510 \
ab440b2c 511 if (seqprop_preemptible(s)) \
55f3560d
AD
512 preempt_disable(); \
513 \
66bcfcdf 514 do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
55f3560d 515} while (0)
859247d3 516
66bcfcdf 517static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
859247d3 518{
66bcfcdf 519 do_raw_write_seqcount_begin(s);
55f3560d 520 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
f4a27cbc
AD
521}
522
89b88845
AD
523/**
524 * write_seqcount_begin() - start a seqcount_t write side critical section
6dd699b1 525 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
89b88845 526 *
cb262935
AD
527 * Context: sequence counter write side sections must be serialized and
528 * non-preemptible. Preemption will be automatically disabled if and
529 * only if the seqcount write serialization lock is associated, and
530 * preemptible. If readers can be invoked from hardirq or softirq
89b88845
AD
531 * context, interrupts or bottom halves must be respectively disabled.
532 */
55f3560d
AD
533#define write_seqcount_begin(s) \
534do { \
ab440b2c 535 seqprop_assert(s); \
55f3560d 536 \
ab440b2c 537 if (seqprop_preemptible(s)) \
55f3560d
AD
538 preempt_disable(); \
539 \
66bcfcdf 540 do_write_seqcount_begin(seqprop_ptr(s)); \
55f3560d
AD
541} while (0)
542
66bcfcdf 543static inline void do_write_seqcount_begin(seqcount_t *s)
f4a27cbc 544{
66bcfcdf 545 do_write_seqcount_begin_nested(s, 0);
f4a27cbc
AD
546}
547
89b88845
AD
548/**
549 * write_seqcount_end() - end a seqcount_t write side critical section
6dd699b1 550 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
89b88845 551 *
cb262935
AD
552 * Context: Preemption will be automatically re-enabled if and only if
553 * the seqcount write serialization lock is associated, and preemptible.
89b88845 554 */
55f3560d
AD
555#define write_seqcount_end(s) \
556do { \
66bcfcdf 557 do_write_seqcount_end(seqprop_ptr(s)); \
55f3560d 558 \
ab440b2c 559 if (seqprop_preemptible(s)) \
55f3560d
AD
560 preempt_enable(); \
561} while (0)
562
66bcfcdf 563static inline void do_write_seqcount_end(seqcount_t *s)
f4a27cbc
AD
564{
565 seqcount_release(&s->dep_map, _RET_IP_);
66bcfcdf 566 do_raw_write_seqcount_end(s);
f4a27cbc
AD
567}
568
c4bfa3f5 569/**
89b88845 570 * raw_write_seqcount_barrier() - do a seqcount_t write barrier
6dd699b1 571 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
c4bfa3f5 572 *
89b88845
AD
573 * This can be used to provide an ordering guarantee instead of the usual
574 * consistency guarantee. It is one wmb cheaper, because it can collapse
575 * the two back-to-back wmb()s.
c4bfa3f5 576 *
5cbaefe9 577 * Note that writes surrounding the barrier should be declared atomic (e.g.
bf07132f
ME
578 * via WRITE_ONCE): a) to ensure the writes become visible to other threads
579 * atomically, avoiding compiler optimizations; b) to document which writes are
580 * meant to propagate to the reader critical section. This is necessary because
581 * neither writes before and after the barrier are enclosed in a seq-writer
15cbe67b 582 * critical section that would ensure readers are aware of ongoing writes::
bf07132f 583 *
15cbe67b
AD
584 * seqcount_t seq;
585 * bool X = true, Y = false;
c4bfa3f5 586 *
15cbe67b
AD
587 * void read(void)
588 * {
589 * bool x, y;
c4bfa3f5 590 *
15cbe67b
AD
591 * do {
592 * int s = read_seqcount_begin(&seq);
c4bfa3f5 593 *
15cbe67b 594 * x = X; y = Y;
c4bfa3f5 595 *
15cbe67b 596 * } while (read_seqcount_retry(&seq, s));
c4bfa3f5 597 *
15cbe67b 598 * BUG_ON(!x && !y);
c4bfa3f5
PZ
599 * }
600 *
601 * void write(void)
602 * {
15cbe67b 603 * WRITE_ONCE(Y, true);
c4bfa3f5 604 *
15cbe67b 605 * raw_write_seqcount_barrier(seq);
c4bfa3f5 606 *
15cbe67b 607 * WRITE_ONCE(X, false);
c4bfa3f5
PZ
608 * }
609 */
55f3560d 610#define raw_write_seqcount_barrier(s) \
66bcfcdf 611 do_raw_write_seqcount_barrier(seqprop_ptr(s))
55f3560d 612
66bcfcdf 613static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
c4bfa3f5 614{
88ecd153 615 kcsan_nestable_atomic_begin();
c4bfa3f5
PZ
616 s->sequence++;
617 smp_wmb();
618 s->sequence++;
88ecd153 619 kcsan_nestable_atomic_end();
c4bfa3f5
PZ
620}
621
f4a27cbc 622/**
89b88845
AD
623 * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
624 * side operations
6dd699b1 625 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
f4a27cbc 626 *
89b88845
AD
627 * After write_seqcount_invalidate, no seqcount_t read side operations
628 * will complete successfully and see data older than this.
f4a27cbc 629 */
55f3560d 630#define write_seqcount_invalidate(s) \
66bcfcdf 631 do_write_seqcount_invalidate(seqprop_ptr(s))
55f3560d 632
66bcfcdf 633static inline void do_write_seqcount_invalidate(seqcount_t *s)
f4a27cbc
AD
634{
635 smp_wmb();
636 kcsan_nestable_atomic_begin();
637 s->sequence+=2;
638 kcsan_nestable_atomic_end();
639}
640
80793c34
AD
641/*
642 * Latch sequence counters (seqcount_latch_t)
89b88845 643 *
80793c34
AD
644 * A sequence counter variant where the counter even/odd value is used to
645 * switch between two copies of protected data. This allows the read path,
646 * typically NMIs, to safely interrupt the write side critical section.
89b88845 647 *
80793c34
AD
648 * As the write sections are fully preemptible, no special handling for
649 * PREEMPT_RT is needed.
650 */
651typedef struct {
652 seqcount_t seqcount;
653} seqcount_latch_t;
654
655/**
656 * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
657 * @seq_name: Name of the seqcount_latch_t instance
658 */
659#define SEQCNT_LATCH_ZERO(seq_name) { \
660 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
661}
662
663/**
664 * seqcount_latch_init() - runtime initializer for seqcount_latch_t
665 * @s: Pointer to the seqcount_latch_t instance
666 */
667static inline void seqcount_latch_init(seqcount_latch_t *s)
668{
669 seqcount_init(&s->seqcount);
670}
671
672/**
673 * raw_read_seqcount_latch() - pick even/odd latch data copy
0c9794c8 674 * @s: Pointer to seqcount_latch_t
80793c34
AD
675 *
676 * See raw_write_seqcount_latch() for details and a full reader/writer
677 * usage example.
89b88845
AD
678 *
679 * Return: sequence counter raw value. Use the lowest bit as an index for
80793c34
AD
680 * picking which data copy to read. The full counter must then be checked
681 * with read_seqcount_latch_retry().
89b88845 682 */
0c9794c8
AD
683static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
684{
685 /*
686 * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
687 * Due to the dependent load, a full smp_rmb() is not needed.
688 */
689 return READ_ONCE(s->seqcount.sequence);
690}
55f3560d 691
80793c34
AD
692/**
693 * read_seqcount_latch_retry() - end a seqcount_latch_t read section
694 * @s: Pointer to seqcount_latch_t
695 * @start: count, from raw_read_seqcount_latch()
696 *
697 * Return: true if a read section retry is required, else false
698 */
699static inline int
700read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
7fc26327 701{
80793c34 702 return read_seqcount_retry(&s->seqcount, start);
7fc26327
PZ
703}
704
6695b92a 705/**
80793c34 706 * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
0c9794c8 707 * @s: Pointer to seqcount_latch_t
6695b92a
PZ
708 *
709 * The latch technique is a multiversion concurrency control method that allows
710 * queries during non-atomic modifications. If you can guarantee queries never
711 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
712 * -- you most likely do not need this.
713 *
714 * Where the traditional RCU/lockless data structures rely on atomic
715 * modifications to ensure queries observe either the old or the new state the
716 * latch allows the same for non-atomic updates. The trade-off is doubling the
717 * cost of storage; we have to maintain two copies of the entire data
718 * structure.
719 *
720 * Very simply put: we first modify one copy and then the other. This ensures
721 * there is always one copy in a stable state, ready to give us an answer.
722 *
15cbe67b 723 * The basic form is a data structure like::
6695b92a 724 *
15cbe67b 725 * struct latch_struct {
80793c34 726 * seqcount_latch_t seq;
15cbe67b
AD
727 * struct data_struct data[2];
728 * };
6695b92a
PZ
729 *
730 * Where a modification, which is assumed to be externally serialized, does the
15cbe67b 731 * following::
6695b92a 732 *
15cbe67b
AD
733 * void latch_modify(struct latch_struct *latch, ...)
734 * {
735 * smp_wmb(); // Ensure that the last data[1] update is visible
80793c34 736 * latch->seq.sequence++;
15cbe67b 737 * smp_wmb(); // Ensure that the seqcount update is visible
6695b92a 738 *
15cbe67b 739 * modify(latch->data[0], ...);
6695b92a 740 *
15cbe67b 741 * smp_wmb(); // Ensure that the data[0] update is visible
80793c34 742 * latch->seq.sequence++;
15cbe67b 743 * smp_wmb(); // Ensure that the seqcount update is visible
6695b92a 744 *
15cbe67b
AD
745 * modify(latch->data[1], ...);
746 * }
6695b92a 747 *
15cbe67b 748 * The query will have a form like::
6695b92a 749 *
15cbe67b
AD
750 * struct entry *latch_query(struct latch_struct *latch, ...)
751 * {
752 * struct entry *entry;
753 * unsigned seq, idx;
6695b92a 754 *
15cbe67b
AD
755 * do {
756 * seq = raw_read_seqcount_latch(&latch->seq);
6695b92a 757 *
15cbe67b
AD
758 * idx = seq & 0x01;
759 * entry = data_query(latch->data[idx], ...);
6695b92a 760 *
80793c34
AD
761 * // This includes needed smp_rmb()
762 * } while (read_seqcount_latch_retry(&latch->seq, seq));
6695b92a 763 *
15cbe67b
AD
764 * return entry;
765 * }
6695b92a
PZ
766 *
767 * So during the modification, queries are first redirected to data[1]. Then we
768 * modify data[0]. When that is complete, we redirect queries back to data[0]
769 * and we can modify data[1].
770 *
15cbe67b
AD
771 * NOTE:
772 *
773 * The non-requirement for atomic modifications does _NOT_ include
774 * the publishing of new entries in the case where data is a dynamic
775 * data structure.
776 *
777 * An iteration might start in data[0] and get suspended long enough
778 * to miss an entire modification sequence, once it resumes it might
779 * observe the new entry.
6695b92a 780 *
a28e884b 781 * NOTE2:
6695b92a 782 *
15cbe67b
AD
783 * When data is a dynamic data structure; one should use regular RCU
784 * patterns to manage the lifetimes of the objects within.
9b0fd802 785 */
0c9794c8
AD
786static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
787{
788 smp_wmb(); /* prior stores before incrementing "sequence" */
789 s->seqcount.sequence++;
790 smp_wmb(); /* increment "sequence" before following stores */
9b0fd802
MD
791}
792
0d24f65e
AD
793/*
794 * Sequential locks (seqlock_t)
795 *
796 * Sequence counters with an embedded spinlock for writer serialization
797 * and non-preemptibility.
798 *
799 * For more info, see:
800 * - Comments on top of seqcount_t
801 * - Documentation/locking/seqlock.rst
802 */
6617feca 803typedef struct {
1909760f
AD
804 /*
805 * Make sure that readers don't starve writers on PREEMPT_RT: use
806 * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
807 */
808 seqcount_spinlock_t seqcount;
6617feca
TG
809 spinlock_t lock;
810} seqlock_t;
811
ec8702da
AD
812#define __SEQLOCK_UNLOCKED(lockname) \
813 { \
1909760f 814 .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
ec8702da 815 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
6617feca
TG
816 }
817
89b88845
AD
818/**
819 * seqlock_init() - dynamic initializer for seqlock_t
820 * @sl: Pointer to the seqlock_t instance
821 */
ec8702da
AD
822#define seqlock_init(sl) \
823 do { \
ec8702da 824 spin_lock_init(&(sl)->lock); \
1909760f 825 seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
6617feca
TG
826 } while (0)
827
89b88845 828/**
24a18772 829 * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
89b88845
AD
830 * @sl: Name of the seqlock_t instance
831 */
832#define DEFINE_SEQLOCK(sl) \
833 seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
6617feca 834
89b88845
AD
835/**
836 * read_seqbegin() - start a seqlock_t read side critical section
837 * @sl: Pointer to seqlock_t
838 *
839 * Return: count, to be passed to read_seqretry()
6617feca
TG
840 */
841static inline unsigned read_seqbegin(const seqlock_t *sl)
842{
88ecd153
ME
843 unsigned ret = read_seqcount_begin(&sl->seqcount);
844
5cbaefe9 845 kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
88ecd153
ME
846 kcsan_flat_atomic_begin();
847 return ret;
6617feca
TG
848}
849
89b88845
AD
850/**
851 * read_seqretry() - end a seqlock_t read side section
852 * @sl: Pointer to seqlock_t
853 * @start: count, from read_seqbegin()
854 *
855 * read_seqretry closes the read side critical section of given seqlock_t.
856 * If the critical section was invalid, it must be ignored (and typically
857 * retried).
858 *
859 * Return: true if a read section retry is required, else false
860 */
6617feca
TG
861static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
862{
88ecd153 863 /*
5cbaefe9 864 * Assume not nested: read_seqretry() may be called multiple times when
88ecd153
ME
865 * completing read critical section.
866 */
867 kcsan_flat_atomic_end();
868
6617feca
TG
869 return read_seqcount_retry(&sl->seqcount, start);
870}
871
1909760f 872/*
66bcfcdf
AD
873 * For all seqlock_t write side functions, use the the internal
874 * do_write_seqcount_begin() instead of generic write_seqcount_begin().
875 * This way, no redundant lockdep_assert_held() checks are added.
1909760f
AD
876 */
877
89b88845
AD
878/**
879 * write_seqlock() - start a seqlock_t write side critical section
880 * @sl: Pointer to seqlock_t
881 *
882 * write_seqlock opens a write side critical section for the given
883 * seqlock_t. It also implicitly acquires the spinlock_t embedded inside
884 * that sequential lock. All seqlock_t write side sections are thus
885 * automatically serialized and non-preemptible.
886 *
887 * Context: if the seqlock_t read section, or other write side critical
888 * sections, can be invoked from hardirq or softirq contexts, use the
889 * _irqsave or _bh variants of this function instead.
1da177e4 890 */
6617feca
TG
891static inline void write_seqlock(seqlock_t *sl)
892{
893 spin_lock(&sl->lock);
66bcfcdf 894 do_write_seqcount_begin(&sl->seqcount.seqcount);
6617feca
TG
895}
896
89b88845
AD
897/**
898 * write_sequnlock() - end a seqlock_t write side critical section
899 * @sl: Pointer to seqlock_t
900 *
901 * write_sequnlock closes the (serialized and non-preemptible) write side
902 * critical section of given seqlock_t.
903 */
6617feca
TG
904static inline void write_sequnlock(seqlock_t *sl)
905{
66bcfcdf 906 do_write_seqcount_end(&sl->seqcount.seqcount);
6617feca
TG
907 spin_unlock(&sl->lock);
908}
909
89b88845
AD
910/**
911 * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
912 * @sl: Pointer to seqlock_t
913 *
914 * _bh variant of write_seqlock(). Use only if the read side section, or
915 * other write side sections, can be invoked from softirq contexts.
916 */
6617feca
TG
917static inline void write_seqlock_bh(seqlock_t *sl)
918{
919 spin_lock_bh(&sl->lock);
66bcfcdf 920 do_write_seqcount_begin(&sl->seqcount.seqcount);
6617feca
TG
921}
922
89b88845
AD
923/**
924 * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
925 * @sl: Pointer to seqlock_t
926 *
927 * write_sequnlock_bh closes the serialized, non-preemptible, and
928 * softirqs-disabled, seqlock_t write side critical section opened with
929 * write_seqlock_bh().
930 */
6617feca
TG
931static inline void write_sequnlock_bh(seqlock_t *sl)
932{
66bcfcdf 933 do_write_seqcount_end(&sl->seqcount.seqcount);
6617feca
TG
934 spin_unlock_bh(&sl->lock);
935}
936
89b88845
AD
937/**
938 * write_seqlock_irq() - start a non-interruptible seqlock_t write section
939 * @sl: Pointer to seqlock_t
940 *
941 * _irq variant of write_seqlock(). Use only if the read side section, or
942 * other write sections, can be invoked from hardirq contexts.
943 */
6617feca
TG
944static inline void write_seqlock_irq(seqlock_t *sl)
945{
946 spin_lock_irq(&sl->lock);
66bcfcdf 947 do_write_seqcount_begin(&sl->seqcount.seqcount);
6617feca
TG
948}
949
89b88845
AD
950/**
951 * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
952 * @sl: Pointer to seqlock_t
953 *
954 * write_sequnlock_irq closes the serialized and non-interruptible
955 * seqlock_t write side section opened with write_seqlock_irq().
956 */
6617feca
TG
957static inline void write_sequnlock_irq(seqlock_t *sl)
958{
66bcfcdf 959 do_write_seqcount_end(&sl->seqcount.seqcount);
6617feca
TG
960 spin_unlock_irq(&sl->lock);
961}
962
963static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
964{
965 unsigned long flags;
966
967 spin_lock_irqsave(&sl->lock, flags);
66bcfcdf 968 do_write_seqcount_begin(&sl->seqcount.seqcount);
6617feca
TG
969 return flags;
970}
971
89b88845
AD
972/**
973 * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
974 * section
975 * @lock: Pointer to seqlock_t
976 * @flags: Stack-allocated storage for saving caller's local interrupt
977 * state, to be passed to write_sequnlock_irqrestore().
978 *
979 * _irqsave variant of write_seqlock(). Use it only if the read side
980 * section, or other write sections, can be invoked from hardirq context.
981 */
1da177e4 982#define write_seqlock_irqsave(lock, flags) \
6617feca 983 do { flags = __write_seqlock_irqsave(lock); } while (0)
1da177e4 984
89b88845
AD
985/**
986 * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
987 * section
988 * @sl: Pointer to seqlock_t
989 * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
990 *
991 * write_sequnlock_irqrestore closes the serialized and non-interruptible
992 * seqlock_t write section previously opened with write_seqlock_irqsave().
993 */
6617feca
TG
994static inline void
995write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
996{
66bcfcdf 997 do_write_seqcount_end(&sl->seqcount.seqcount);
6617feca
TG
998 spin_unlock_irqrestore(&sl->lock, flags);
999}
1da177e4 1000
89b88845
AD
1001/**
1002 * read_seqlock_excl() - begin a seqlock_t locking reader section
55f3560d 1003 * @sl: Pointer to seqlock_t
89b88845
AD
1004 *
1005 * read_seqlock_excl opens a seqlock_t locking reader critical section. A
1006 * locking reader exclusively locks out *both* other writers *and* other
1007 * locking readers, but it does not update the embedded sequence number.
1008 *
1009 * Locking readers act like a normal spin_lock()/spin_unlock().
1010 *
1011 * Context: if the seqlock_t write section, *or other read sections*, can
1012 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1013 * variant of this function instead.
1014 *
1015 * The opened read section must be closed with read_sequnlock_excl().
1370e97b
WL
1016 */
1017static inline void read_seqlock_excl(seqlock_t *sl)
1018{
1019 spin_lock(&sl->lock);
1020}
1021
89b88845
AD
1022/**
1023 * read_sequnlock_excl() - end a seqlock_t locking reader critical section
1024 * @sl: Pointer to seqlock_t
1025 */
1370e97b
WL
1026static inline void read_sequnlock_excl(seqlock_t *sl)
1027{
1028 spin_unlock(&sl->lock);
1029}
1030
89b88845
AD
1031/**
1032 * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
1033 * softirqs disabled
1034 * @sl: Pointer to seqlock_t
1035 *
1036 * _bh variant of read_seqlock_excl(). Use this variant only if the
1037 * seqlock_t write side section, *or other read sections*, can be invoked
1038 * from softirq contexts.
1039 */
1370e97b
WL
1040static inline void read_seqlock_excl_bh(seqlock_t *sl)
1041{
1042 spin_lock_bh(&sl->lock);
1043}
1044
89b88845
AD
1045/**
1046 * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
1047 * reader section
1048 * @sl: Pointer to seqlock_t
1049 */
1370e97b
WL
1050static inline void read_sequnlock_excl_bh(seqlock_t *sl)
1051{
1052 spin_unlock_bh(&sl->lock);
1053}
1054
89b88845
AD
1055/**
1056 * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
1057 * reader section
1058 * @sl: Pointer to seqlock_t
1059 *
1060 * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
1061 * write side section, *or other read sections*, can be invoked from a
1062 * hardirq context.
1063 */
1370e97b
WL
1064static inline void read_seqlock_excl_irq(seqlock_t *sl)
1065{
1066 spin_lock_irq(&sl->lock);
1067}
1068
89b88845
AD
1069/**
1070 * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
1071 * locking reader section
1072 * @sl: Pointer to seqlock_t
1073 */
1370e97b
WL
1074static inline void read_sequnlock_excl_irq(seqlock_t *sl)
1075{
1076 spin_unlock_irq(&sl->lock);
1077}
1078
1079static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
1080{
1081 unsigned long flags;
1082
1083 spin_lock_irqsave(&sl->lock, flags);
1084 return flags;
1085}
1086
89b88845
AD
1087/**
1088 * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1089 * locking reader section
1090 * @lock: Pointer to seqlock_t
1091 * @flags: Stack-allocated storage for saving caller's local interrupt
1092 * state, to be passed to read_sequnlock_excl_irqrestore().
1093 *
1094 * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
1095 * write side section, *or other read sections*, can be invoked from a
1096 * hardirq context.
1097 */
1370e97b
WL
1098#define read_seqlock_excl_irqsave(lock, flags) \
1099 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
1100
89b88845
AD
1101/**
1102 * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1103 * locking reader section
1104 * @sl: Pointer to seqlock_t
1105 * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
1106 */
1370e97b
WL
1107static inline void
1108read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
1109{
1110 spin_unlock_irqrestore(&sl->lock, flags);
1111}
1112
f4a27cbc 1113/**
89b88845
AD
1114 * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1115 * @lock: Pointer to seqlock_t
1116 * @seq : Marker and return parameter. If the passed value is even, the
1117 * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1118 * If the passed value is odd, the reader will become a *locking* reader
1119 * as in read_seqlock_excl(). In the first call to this function, the
1120 * caller *must* initialize and pass an even value to @seq; this way, a
1121 * lockless read can be optimistically tried first.
1122 *
1123 * read_seqbegin_or_lock is an API designed to optimistically try a normal
1124 * lockless seqlock_t read section first. If an odd counter is found, the
1125 * lockless read trial has failed, and the next read iteration transforms
1126 * itself into a full seqlock_t locking reader.
1127 *
1128 * This is typically used to avoid seqlock_t lockless readers starvation
1129 * (too much retry loops) in the case of a sharp spike in write side
1130 * activity.
1131 *
1132 * Context: if the seqlock_t write section, *or other read sections*, can
1133 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1134 * variant of this function instead.
1135 *
1136 * Check Documentation/locking/seqlock.rst for template example code.
1137 *
1138 * Return: the encountered sequence counter value, through the @seq
1139 * parameter, which is overloaded as a return parameter. This returned
1140 * value must be checked with need_seqretry(). If the read section need to
1141 * be retried, this returned value must also be passed as the @seq
1142 * parameter of the next read_seqbegin_or_lock() iteration.
f4a27cbc
AD
1143 */
1144static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
1145{
1146 if (!(*seq & 1)) /* Even */
1147 *seq = read_seqbegin(lock);
1148 else /* Odd */
1149 read_seqlock_excl(lock);
1150}
1151
89b88845
AD
1152/**
1153 * need_seqretry() - validate seqlock_t "locking or lockless" read section
1154 * @lock: Pointer to seqlock_t
1155 * @seq: sequence count, from read_seqbegin_or_lock()
1156 *
1157 * Return: true if a read section retry is required, false otherwise
1158 */
f4a27cbc
AD
1159static inline int need_seqretry(seqlock_t *lock, int seq)
1160{
1161 return !(seq & 1) && read_seqretry(lock, seq);
1162}
1163
89b88845
AD
1164/**
1165 * done_seqretry() - end seqlock_t "locking or lockless" reader section
1166 * @lock: Pointer to seqlock_t
1167 * @seq: count, from read_seqbegin_or_lock()
1168 *
1169 * done_seqretry finishes the seqlock_t read side critical section started
1170 * with read_seqbegin_or_lock() and validated by need_seqretry().
1171 */
f4a27cbc
AD
1172static inline void done_seqretry(seqlock_t *lock, int seq)
1173{
1174 if (seq & 1)
1175 read_sequnlock_excl(lock);
1176}
1177
89b88845
AD
1178/**
1179 * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1180 * a non-interruptible locking reader
1181 * @lock: Pointer to seqlock_t
1182 * @seq: Marker and return parameter. Check read_seqbegin_or_lock().
1183 *
1184 * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
1185 * the seqlock_t write section, *or other read sections*, can be invoked
1186 * from hardirq context.
1187 *
1188 * Note: Interrupts will be disabled only for "locking reader" mode.
1189 *
1190 * Return:
1191 *
1192 * 1. The saved local interrupts state in case of a locking reader, to
1193 * be passed to done_seqretry_irqrestore().
1194 *
1195 * 2. The encountered sequence counter value, returned through @seq
1196 * overloaded as a return parameter. Check read_seqbegin_or_lock().
1197 */
ef8ac063
RR
1198static inline unsigned long
1199read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
1200{
1201 unsigned long flags = 0;
1202
1203 if (!(*seq & 1)) /* Even */
1204 *seq = read_seqbegin(lock);
1205 else /* Odd */
1206 read_seqlock_excl_irqsave(lock, flags);
1207
1208 return flags;
1209}
1210
89b88845
AD
1211/**
1212 * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1213 * non-interruptible locking reader section
1214 * @lock: Pointer to seqlock_t
1215 * @seq: Count, from read_seqbegin_or_lock_irqsave()
1216 * @flags: Caller's saved local interrupt state in case of a locking
1217 * reader, also from read_seqbegin_or_lock_irqsave()
1218 *
1219 * This is the _irqrestore variant of done_seqretry(). The read section
1220 * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
1221 * by need_seqretry().
1222 */
ef8ac063
RR
1223static inline void
1224done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
1225{
1226 if (seq & 1)
1227 read_sequnlock_excl_irqrestore(lock, flags);
1228}
1da177e4 1229#endif /* __LINUX_SEQLOCK_H */