]>
Commit | Line | Data |
---|---|---|
3e41733f DL |
1 | /* |
2 | * Copyright (c) 2017-19 David Lamparter, for NetDEF, Inc. | |
3 | * | |
4 | * Permission to use, copy, modify, and distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
17 | #ifndef _FRRCU_H | |
18 | #define _FRRCU_H | |
19 | ||
8d4e934b DL |
20 | #include <assert.h> |
21 | ||
3e41733f DL |
22 | #include "memory.h" |
23 | #include "atomlist.h" | |
3e41733f | 24 | |
17e38209 RW |
25 | #ifdef __cplusplus |
26 | extern "C" { | |
27 | #endif | |
28 | ||
3e41733f DL |
29 | /* quick RCU primer: |
30 | * There's a global sequence counter. Whenever a thread does a | |
31 | * rcu_read_lock(), it is marked as holding the current sequence counter. | |
32 | * When something is cleaned with RCU, the global sequence counter is | |
33 | * increased and the item is queued for cleanup - *after* all threads are | |
34 | * at a more recent sequence counter (or no sequence counter / unheld). | |
35 | * | |
36 | * So, by delaying resource cleanup, RCU ensures that things don't go away | |
37 | * while another thread may hold a (stale) reference. | |
38 | * | |
39 | * Note that even if a thread is in rcu_read_lock(), it is invalid for that | |
40 | * thread to access bits after rcu_free() & co on them. This is a design | |
41 | * choice to allow no-op'ing out the entire RCU mechanism if we're running | |
42 | * singlethreaded. (Also allows some optimization on the counter bumping.) | |
43 | * | |
44 | * differences from Linux Kernel RCU: | |
45 | * - there's no rcu_synchronize(), if you really need to defer something | |
46 | * use rcu_call() (and double check it's really necessary) | |
47 | * - rcu_dereference() and rcu_assign_pointer() don't exist, use atomic_* | |
48 | * instead (ATOM* list structures do the right thing) | |
49 | */ | |
50 | ||
51 | /* opaque */ | |
52 | struct rcu_thread; | |
53 | ||
54 | /* called before new thread creation, sets up rcu thread info for new thread | |
55 | * before it actually exits. This ensures possible RCU references are held | |
56 | * for thread startup. | |
57 | * | |
58 | * return value must be passed into the new thread's call to rcu_thread_start() | |
59 | */ | |
60 | extern struct rcu_thread *rcu_thread_prepare(void); | |
61 | ||
62 | /* cleanup in case pthread_create() fails */ | |
63 | extern void rcu_thread_unprepare(struct rcu_thread *rcu_thread); | |
64 | ||
65 | /* called early in the new thread, with the return value from the above. | |
66 | * NB: new thread is initially in RCU-held state! (at depth 1) | |
67 | * | |
68 | * TBD: maybe inherit RCU state from rcu_thread_prepare()? | |
69 | */ | |
70 | extern void rcu_thread_start(struct rcu_thread *rcu_thread); | |
71 | ||
72 | /* thread exit is handled through pthread_key_create's destructor function */ | |
73 | ||
74 | /* global RCU shutdown - must be called with only 1 active thread left. waits | |
75 | * until remaining RCU actions are done & RCU thread has exited. | |
76 | * | |
77 | * This is mostly here to get a clean exit without memleaks. | |
78 | */ | |
79 | extern void rcu_shutdown(void); | |
80 | ||
81 | /* enter / exit RCU-held state. counter-based, so can be called nested. */ | |
82 | extern void rcu_read_lock(void); | |
83 | extern void rcu_read_unlock(void); | |
84 | ||
85 | /* for debugging / safety checks */ | |
86 | extern void rcu_assert_read_locked(void); | |
87 | extern void rcu_assert_read_unlocked(void); | |
88 | ||
89 | enum rcu_action_type { | |
90 | RCUA_INVALID = 0, | |
91 | /* used internally by the RCU code, shouldn't ever show up outside */ | |
92 | RCUA_NEXT, | |
93 | RCUA_END, | |
94 | /* normal RCU actions, for outside use */ | |
95 | RCUA_FREE, | |
96 | RCUA_CLOSE, | |
97 | RCUA_CALL, | |
98 | }; | |
99 | ||
100 | /* since rcu_head is intended to be embedded into structs which may exist | |
101 | * with lots of copies, rcu_head is shrunk down to its absolute minimum - | |
102 | * the atomlist pointer + a pointer to this action struct. | |
103 | */ | |
104 | struct rcu_action { | |
105 | enum rcu_action_type type; | |
106 | ||
107 | union { | |
108 | struct { | |
109 | struct memtype *mt; | |
110 | ptrdiff_t offset; | |
111 | } free; | |
112 | ||
113 | struct { | |
114 | void (*fptr)(void *arg); | |
115 | ptrdiff_t offset; | |
116 | } call; | |
117 | } u; | |
118 | }; | |
119 | ||
120 | /* RCU cleanup function queue item */ | |
960b9a53 | 121 | PREDECL_ATOMLIST(rcu_heads); |
3e41733f DL |
122 | struct rcu_head { |
123 | struct rcu_heads_item head; | |
124 | const struct rcu_action *action; | |
125 | }; | |
126 | ||
127 | /* special RCU head for delayed fd-close */ | |
128 | struct rcu_head_close { | |
129 | struct rcu_head rcu_head; | |
130 | int fd; | |
131 | }; | |
132 | ||
133 | /* enqueue RCU action - use the macros below to get the rcu_action set up */ | |
134 | extern void rcu_enqueue(struct rcu_head *head, const struct rcu_action *action); | |
135 | ||
136 | /* RCU free() and file close() operations. | |
137 | * | |
138 | * freed memory / closed fds become _immediately_ unavailable to the calling | |
139 | * thread, but will remain available for other threads until they have passed | |
140 | * into RCU-released state. | |
141 | */ | |
142 | ||
143 | /* may be called with NULL mt to do non-MTYPE free() */ | |
144 | #define rcu_free(mtype, ptr, field) \ | |
145 | do { \ | |
146 | typeof(ptr) _ptr = (ptr); \ | |
9fe602a1 DL |
147 | if (!_ptr) \ |
148 | break; \ | |
3e41733f DL |
149 | struct rcu_head *_rcu_head = &_ptr->field; \ |
150 | static const struct rcu_action _rcu_action = { \ | |
151 | .type = RCUA_FREE, \ | |
152 | .u.free = { \ | |
153 | .mt = mtype, \ | |
154 | .offset = offsetof(typeof(*_ptr), field), \ | |
155 | }, \ | |
156 | }; \ | |
157 | rcu_enqueue(_rcu_head, &_rcu_action); \ | |
158 | } while (0) | |
159 | ||
160 | /* use this sparingly, it runs on (and blocks) the RCU thread */ | |
161 | #define rcu_call(func, ptr, field) \ | |
162 | do { \ | |
163 | typeof(ptr) _ptr = (ptr); \ | |
164 | void (*fptype)(typeof(ptr)); \ | |
165 | struct rcu_head *_rcu_head = &_ptr->field; \ | |
166 | static const struct rcu_action _rcu_action = { \ | |
167 | .type = RCUA_CALL, \ | |
168 | .u.call = { \ | |
169 | .fptr = (void *)func, \ | |
170 | .offset = offsetof(typeof(*_ptr), field), \ | |
171 | }, \ | |
172 | }; \ | |
173 | (void)(_fptype = func); \ | |
174 | rcu_enqueue(_rcu_head, &_rcu_action); \ | |
175 | } while (0) | |
176 | ||
177 | extern void rcu_close(struct rcu_head_close *head, int fd); | |
178 | ||
17e38209 RW |
179 | #ifdef __cplusplus |
180 | } | |
181 | #endif | |
182 | ||
3e41733f | 183 | #endif /* _FRRCU_H */ |