]> git.proxmox.com Git - mirror_frr.git/blame - lib/seqlock.h
*: sort out & explain licenses used in FRR
[mirror_frr.git] / lib / seqlock.h
CommitLineData
440d5faa
DL
1/*
2 * "Sequence" lock primitive
3 *
4 * Copyright (C) 2015 David Lamparter <equinox@diac24.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the
18 * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
19 * Boston, MA 02110-1301 USA
20 */
21
22#ifndef _SEQLOCK_H
23#define _SEQLOCK_H
24
25#include <stdbool.h>
26#include <stdint.h>
27#include <pthread.h>
28#include "frratomic.h"
29
17e38209
RW
30#ifdef __cplusplus
31extern "C" {
32#endif
33
440d5faa
DL
34/*
35 * this locking primitive is intended to use in a 1:N setup.
36 *
37 * - one "counter" seqlock issuing increasing numbers
38 * - multiple seqlock users hold references on these numbers
39 *
40 * this is intended for implementing RCU reference-holding. There is one
41 * global counter, with threads locking a seqlock whenever they take a
42 * reference. A seqlock can also be idle/unlocked.
43 *
44 * The "counter" seqlock will always stay locked; the RCU cleanup thread
45 * continuously counts it up, waiting for threads to release or progress to a
46 * sequence number further ahead. If all threads are > N, references dropped
47 * in N can be free'd.
48 *
49 * generally, the lock function is:
50 *
51 * Thread-A Thread-B
52 *
53 * seqlock_acquire(a)
54 * | running seqlock_wait(b) -- a <= b
55 * seqlock_release() | blocked
56 * OR: seqlock_acquire(a') | -- a' > b
57 * (resumes)
58 */
59
60/* use sequentially increasing "ticket numbers". lowest bit will always
6046b690
DL
61 * be 1 to have a 'cleared' indication (i.e., counts 1,5,9,13,etc. )
62 * 2nd lowest bit is used to indicate we have waiters.
440d5faa
DL
63 */
64typedef _Atomic uint32_t seqlock_ctr_t;
65typedef uint32_t seqlock_val_t;
6046b690 66#define seqlock_assert_valid(val) assert((val) & SEQLOCK_HELD)
440d5faa 67
30ef834a
DL
68/* NB: SEQLOCK_WAITERS is only allowed if SEQLOCK_HELD is also set; can't
69 * have waiters on an unheld seqlock
70 */
6046b690
DL
71#define SEQLOCK_HELD (1U << 0)
72#define SEQLOCK_WAITERS (1U << 1)
73#define SEQLOCK_VAL(n) ((n) & ~SEQLOCK_WAITERS)
74#define SEQLOCK_STARTVAL 1U
75#define SEQLOCK_INCR 4U
440d5faa 76
30ef834a
DL
77/* TODO: originally, this was using "atomic_fetch_add", which is the reason
78 * bit 0 is used to indicate held state. With SEQLOCK_WAITERS added, there's
79 * no fetch_add anymore (cmpxchg loop instead), so we don't need to use bit 0
80 * for this anymore & can just special-case the value 0 for it and skip it in
81 * counting.
82 */
83
440d5faa
DL
84struct seqlock {
85/* always used */
86 seqlock_ctr_t pos;
87/* used when futexes not available: (i.e. non-linux) */
88 pthread_mutex_t lock;
89 pthread_cond_t wake;
90};
91
92
93/* sqlo = 0 - init state: not held */
94extern void seqlock_init(struct seqlock *sqlo);
95
96
30ef834a
DL
97/* basically: "while (sqlo <= val) wait();"
98 * returns when sqlo > val || !seqlock_held(sqlo)
99 */
440d5faa 100extern void seqlock_wait(struct seqlock *sqlo, seqlock_val_t val);
30ef834a
DL
101
102/* same, but time-limited (limit is an absolute CLOCK_MONOTONIC value) */
2a5e6235
DL
103extern bool seqlock_timedwait(struct seqlock *sqlo, seqlock_val_t val,
104 const struct timespec *abs_monotime_limit);
30ef834a
DL
105
106/* one-shot test, returns true if seqlock_wait would return immediately */
440d5faa
DL
107extern bool seqlock_check(struct seqlock *sqlo, seqlock_val_t val);
108
109static inline bool seqlock_held(struct seqlock *sqlo)
110{
111 return !!atomic_load_explicit(&sqlo->pos, memory_order_relaxed);
112}
113
114/* sqlo - get seqlock position -- for the "counter" seqlock */
115extern seqlock_val_t seqlock_cur(struct seqlock *sqlo);
30ef834a
DL
116
117/* ++sqlo (but atomic & wakes waiters) - returns value that we bumped to.
118 *
119 * guarantees:
120 * - each seqlock_bump call bumps the position by exactly one SEQLOCK_INCR.
121 * There are no skipped/missed or multiple increments.
122 * - each return value is only returned from one seqlock_bump() call
123 */
440d5faa
DL
124extern seqlock_val_t seqlock_bump(struct seqlock *sqlo);
125
126
127/* sqlo = val - can be used on held seqlock. */
128extern void seqlock_acquire_val(struct seqlock *sqlo, seqlock_val_t val);
30ef834a 129
440d5faa
DL
130/* sqlo = ref - standard pattern: acquire relative to other seqlock */
131static inline void seqlock_acquire(struct seqlock *sqlo, struct seqlock *ref)
132{
133 seqlock_acquire_val(sqlo, seqlock_cur(ref));
134}
135
136/* sqlo = 0 - set seqlock position to 0, marking as non-held */
137extern void seqlock_release(struct seqlock *sqlo);
138/* release should normally be followed by a bump on the "counter", if
139 * anything other than reading RCU items was done
140 */
141
17e38209
RW
142#ifdef __cplusplus
143}
144#endif
145
440d5faa 146#endif /* _SEQLOCK_H */