]> git.proxmox.com Git - mirror_frr.git/blame - lib/seqlock.h
Merge pull request #13649 from donaldsharp/unlock_the_node_or_else
[mirror_frr.git] / lib / seqlock.h
CommitLineData
47a3a827 1// SPDX-License-Identifier: LGPL-2.1-or-later
440d5faa
DL
2/*
3 * "Sequence" lock primitive
4 *
5 * Copyright (C) 2015 David Lamparter <equinox@diac24.net>
440d5faa
DL
6 */
7
8#ifndef _SEQLOCK_H
9#define _SEQLOCK_H
10
11#include <stdbool.h>
12#include <stdint.h>
13#include <pthread.h>
14#include "frratomic.h"
15
17e38209
RW
16#ifdef __cplusplus
17extern "C" {
18#endif
19
440d5faa
DL
20/*
21 * this locking primitive is intended to use in a 1:N setup.
22 *
23 * - one "counter" seqlock issuing increasing numbers
24 * - multiple seqlock users hold references on these numbers
25 *
26 * this is intended for implementing RCU reference-holding. There is one
27 * global counter, with threads locking a seqlock whenever they take a
28 * reference. A seqlock can also be idle/unlocked.
29 *
30 * The "counter" seqlock will always stay locked; the RCU cleanup thread
31 * continuously counts it up, waiting for threads to release or progress to a
32 * sequence number further ahead. If all threads are > N, references dropped
33 * in N can be free'd.
34 *
35 * generally, the lock function is:
36 *
37 * Thread-A Thread-B
38 *
39 * seqlock_acquire(a)
40 * | running seqlock_wait(b) -- a <= b
41 * seqlock_release() | blocked
42 * OR: seqlock_acquire(a') | -- a' > b
43 * (resumes)
44 */
45
46/* use sequentially increasing "ticket numbers". lowest bit will always
6046b690
DL
47 * be 1 to have a 'cleared' indication (i.e., counts 1,5,9,13,etc. )
48 * 2nd lowest bit is used to indicate we have waiters.
440d5faa
DL
49 */
50typedef _Atomic uint32_t seqlock_ctr_t;
51typedef uint32_t seqlock_val_t;
6046b690 52#define seqlock_assert_valid(val) assert((val) & SEQLOCK_HELD)
440d5faa 53
30ef834a
DL
54/* NB: SEQLOCK_WAITERS is only allowed if SEQLOCK_HELD is also set; can't
55 * have waiters on an unheld seqlock
56 */
6046b690
DL
57#define SEQLOCK_HELD (1U << 0)
58#define SEQLOCK_WAITERS (1U << 1)
59#define SEQLOCK_VAL(n) ((n) & ~SEQLOCK_WAITERS)
60#define SEQLOCK_STARTVAL 1U
61#define SEQLOCK_INCR 4U
440d5faa 62
30ef834a
DL
63/* TODO: originally, this was using "atomic_fetch_add", which is the reason
64 * bit 0 is used to indicate held state. With SEQLOCK_WAITERS added, there's
65 * no fetch_add anymore (cmpxchg loop instead), so we don't need to use bit 0
66 * for this anymore & can just special-case the value 0 for it and skip it in
67 * counting.
68 */
69
440d5faa
DL
70struct seqlock {
71/* always used */
72 seqlock_ctr_t pos;
73/* used when futexes not available: (i.e. non-linux) */
74 pthread_mutex_t lock;
75 pthread_cond_t wake;
76};
77
78
79/* sqlo = 0 - init state: not held */
80extern void seqlock_init(struct seqlock *sqlo);
81
82
30ef834a
DL
83/* basically: "while (sqlo <= val) wait();"
84 * returns when sqlo > val || !seqlock_held(sqlo)
85 */
440d5faa 86extern void seqlock_wait(struct seqlock *sqlo, seqlock_val_t val);
30ef834a
DL
87
88/* same, but time-limited (limit is an absolute CLOCK_MONOTONIC value) */
2a5e6235
DL
89extern bool seqlock_timedwait(struct seqlock *sqlo, seqlock_val_t val,
90 const struct timespec *abs_monotime_limit);
30ef834a
DL
91
92/* one-shot test, returns true if seqlock_wait would return immediately */
440d5faa
DL
93extern bool seqlock_check(struct seqlock *sqlo, seqlock_val_t val);
94
95static inline bool seqlock_held(struct seqlock *sqlo)
96{
97 return !!atomic_load_explicit(&sqlo->pos, memory_order_relaxed);
98}
99
100/* sqlo - get seqlock position -- for the "counter" seqlock */
101extern seqlock_val_t seqlock_cur(struct seqlock *sqlo);
30ef834a
DL
102
103/* ++sqlo (but atomic & wakes waiters) - returns value that we bumped to.
104 *
105 * guarantees:
106 * - each seqlock_bump call bumps the position by exactly one SEQLOCK_INCR.
107 * There are no skipped/missed or multiple increments.
108 * - each return value is only returned from one seqlock_bump() call
109 */
440d5faa
DL
110extern seqlock_val_t seqlock_bump(struct seqlock *sqlo);
111
112
113/* sqlo = val - can be used on held seqlock. */
114extern void seqlock_acquire_val(struct seqlock *sqlo, seqlock_val_t val);
30ef834a 115
440d5faa
DL
116/* sqlo = ref - standard pattern: acquire relative to other seqlock */
117static inline void seqlock_acquire(struct seqlock *sqlo, struct seqlock *ref)
118{
119 seqlock_acquire_val(sqlo, seqlock_cur(ref));
120}
121
122/* sqlo = 0 - set seqlock position to 0, marking as non-held */
123extern void seqlock_release(struct seqlock *sqlo);
124/* release should normally be followed by a bump on the "counter", if
125 * anything other than reading RCU items was done
126 */
127
17e38209
RW
128#ifdef __cplusplus
129}
130#endif
131
440d5faa 132#endif /* _SEQLOCK_H */