]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: ISC | |
2 | /* | |
3 | * Copyright (c) 2017-19 David Lamparter, for NetDEF, Inc. | |
4 | */ | |
5 | ||
6 | #ifndef _FRRCU_H | |
7 | #define _FRRCU_H | |
8 | ||
9 | #include <assert.h> | |
10 | ||
11 | #include "memory.h" | |
12 | #include "atomlist.h" | |
13 | ||
14 | #ifdef __cplusplus | |
15 | extern "C" { | |
16 | #endif | |
17 | ||
18 | /* quick RCU primer: | |
19 | * There's a global sequence counter. Whenever a thread does a | |
20 | * rcu_read_lock(), it is marked as holding the current sequence counter. | |
21 | * When something is cleaned with RCU, the global sequence counter is | |
22 | * increased and the item is queued for cleanup - *after* all threads are | |
23 | * at a more recent sequence counter (or no sequence counter / unheld). | |
24 | * | |
25 | * So, by delaying resource cleanup, RCU ensures that things don't go away | |
26 | * while another thread may hold a (stale) reference. | |
27 | * | |
28 | * Note that even if a thread is in rcu_read_lock(), it is invalid for that | |
29 | * thread to access bits after rcu_free() & co on them. This is a design | |
30 | * choice to allow no-op'ing out the entire RCU mechanism if we're running | |
31 | * singlethreaded. (Also allows some optimization on the counter bumping.) | |
32 | * | |
33 | * differences from Linux Kernel RCU: | |
34 | * - there's no rcu_synchronize(), if you really need to defer something | |
35 | * use rcu_call() (and double check it's really necessary) | |
36 | * - rcu_dereference() and rcu_assign_pointer() don't exist, use atomic_* | |
37 | * instead (ATOM* list structures do the right thing) | |
38 | */ | |
39 | ||
40 | /* opaque */ | |
41 | struct rcu_thread; | |
42 | ||
43 | /* called before new thread creation, sets up rcu thread info for new thread | |
44 | * before it actually exits. This ensures possible RCU references are held | |
45 | * for thread startup. | |
46 | * | |
47 | * return value must be passed into the new thread's call to rcu_thread_start() | |
48 | */ | |
49 | extern struct rcu_thread *rcu_thread_prepare(void); | |
50 | ||
51 | /* cleanup in case pthread_create() fails */ | |
52 | extern void rcu_thread_unprepare(struct rcu_thread *rcu_thread); | |
53 | ||
54 | /* called early in the new thread, with the return value from the above. | |
55 | * NB: new thread is initially in RCU-held state! (at depth 1) | |
56 | * | |
57 | * TBD: maybe inherit RCU state from rcu_thread_prepare()? | |
58 | */ | |
59 | extern void rcu_thread_start(struct rcu_thread *rcu_thread); | |
60 | ||
61 | /* thread exit is handled through pthread_key_create's destructor function */ | |
62 | ||
63 | /* global RCU shutdown - must be called with only 1 active thread left. waits | |
64 | * until remaining RCU actions are done & RCU thread has exited. | |
65 | * | |
66 | * This is mostly here to get a clean exit without memleaks. | |
67 | */ | |
68 | extern void rcu_shutdown(void); | |
69 | ||
70 | /* enter / exit RCU-held state. counter-based, so can be called nested. */ | |
71 | extern void rcu_read_lock(void); | |
72 | extern void rcu_read_unlock(void); | |
73 | ||
74 | /* for debugging / safety checks */ | |
75 | extern void rcu_assert_read_locked(void); | |
76 | extern void rcu_assert_read_unlocked(void); | |
77 | ||
78 | enum rcu_action_type { | |
79 | RCUA_INVALID = 0, | |
80 | /* used internally by the RCU code, shouldn't ever show up outside */ | |
81 | RCUA_NEXT, | |
82 | RCUA_END, | |
83 | /* normal RCU actions, for outside use */ | |
84 | RCUA_FREE, | |
85 | RCUA_CLOSE, | |
86 | RCUA_CALL, | |
87 | }; | |
88 | ||
89 | /* since rcu_head is intended to be embedded into structs which may exist | |
90 | * with lots of copies, rcu_head is shrunk down to its absolute minimum - | |
91 | * the atomlist pointer + a pointer to this action struct. | |
92 | */ | |
93 | struct rcu_action { | |
94 | enum rcu_action_type type; | |
95 | ||
96 | union { | |
97 | struct { | |
98 | struct memtype *mt; | |
99 | ptrdiff_t offset; | |
100 | } free; | |
101 | ||
102 | struct { | |
103 | void (*fptr)(void *arg); | |
104 | ptrdiff_t offset; | |
105 | } call; | |
106 | } u; | |
107 | }; | |
108 | ||
109 | /* RCU cleanup function queue item */ | |
110 | PREDECL_ATOMLIST(rcu_heads); | |
111 | struct rcu_head { | |
112 | struct rcu_heads_item head; | |
113 | const struct rcu_action *action; | |
114 | }; | |
115 | ||
116 | /* special RCU head for delayed fd-close */ | |
117 | struct rcu_head_close { | |
118 | struct rcu_head rcu_head; | |
119 | int fd; | |
120 | }; | |
121 | ||
122 | /* enqueue RCU action - use the macros below to get the rcu_action set up */ | |
123 | extern void rcu_enqueue(struct rcu_head *head, const struct rcu_action *action); | |
124 | ||
125 | /* RCU free() and file close() operations. | |
126 | * | |
127 | * freed memory / closed fds become _immediately_ unavailable to the calling | |
128 | * thread, but will remain available for other threads until they have passed | |
129 | * into RCU-released state. | |
130 | */ | |
131 | ||
132 | /* may be called with NULL mt to do non-MTYPE free() */ | |
133 | #define rcu_free(mtype, ptr, field) \ | |
134 | do { \ | |
135 | typeof(ptr) _ptr = (ptr); \ | |
136 | if (!_ptr) \ | |
137 | break; \ | |
138 | struct rcu_head *_rcu_head = &_ptr->field; \ | |
139 | static const struct rcu_action _rcu_action = { \ | |
140 | .type = RCUA_FREE, \ | |
141 | .u.free = { \ | |
142 | .mt = mtype, \ | |
143 | .offset = offsetof(typeof(*_ptr), field), \ | |
144 | }, \ | |
145 | }; \ | |
146 | rcu_enqueue(_rcu_head, &_rcu_action); \ | |
147 | } while (0) | |
148 | ||
149 | /* use this sparingly, it runs on (and blocks) the RCU thread */ | |
150 | #define rcu_call(func, ptr, field) \ | |
151 | do { \ | |
152 | typeof(ptr) _ptr = (ptr); \ | |
153 | void (*fptype)(typeof(ptr)); \ | |
154 | struct rcu_head *_rcu_head = &_ptr->field; \ | |
155 | static const struct rcu_action _rcu_action = { \ | |
156 | .type = RCUA_CALL, \ | |
157 | .u.call = { \ | |
158 | .fptr = (void *)func, \ | |
159 | .offset = offsetof(typeof(*_ptr), field), \ | |
160 | }, \ | |
161 | }; \ | |
162 | (void)(_fptype = func); \ | |
163 | rcu_enqueue(_rcu_head, &_rcu_action); \ | |
164 | } while (0) | |
165 | ||
166 | extern void rcu_close(struct rcu_head_close *head, int fd); | |
167 | ||
168 | #ifdef __cplusplus | |
169 | } | |
170 | #endif | |
171 | ||
172 | #endif /* _FRRCU_H */ |